diff --git "a/462.jsonl" "b/462.jsonl"
new file mode 100644--- /dev/null
+++ "b/462.jsonl"
@@ -0,0 +1,763 @@
+{"seq_id":"573379885","text":"#### THE CODE IN THIS FILE IS SOURCED FROM THE LINK IN README.MD ###\n\n\n# This file converts the original \"tmdb_5000\" data from kaggle into data that can be used\n# for recommendation. Involves joining tables with movie metadata(crew, cast etc) and movie information\n# into one. New data is written to a .csv file and then read when using the program. This file is only run once\n# to create the \"movie_data.csv\" file.\n\nimport numpy as np\nimport pandas as pd\nfrom ast import literal_eval\n\ndf1 = pd.read_csv(r'C:\\Users\\rishi_rhvenli\\PycharmProjects\\MovieRec\\Data\\tmdb_5000_credits.csv')\ndf2 = pd.read_csv(r'C:\\Users\\rishi_rhvenli\\PycharmProjects\\MovieRec\\Data\\tmdb_5000_movies.csv')\ndf1.columns = ['id', 'title', 'cast', 'crew']\n\n# Merge the 2 dataframes by movie-id\ndf2= df2.merge(df1, on='id')\n\ndf2['overview'] = df2['overview'].fillna('')\n\nfeatures = ['cast', 'crew', 'genres']\nfor feature in features:\n df2[feature] = df2[feature].apply(literal_eval)\n\n\n# Extract the director from the crew-list\ndef get_director(x):\n for i in x:\n if i['job'] == 'Director':\n return i['name']\n return np.nan\n\n\n# Used to extract top-4 items in a list (eg: top 4 cast members in a movie)\ndef get_list(x):\n if isinstance(x, list):\n names = [i['name'] for i in x]\n if len(names) > 4:\n names = names[:4]\n return names\n return []\n\n\n# Extracts the director and other meta-information from the dataframe\ndf2['director'] = df2['crew'].apply(get_director)\nfeatures = ['cast', 'genres']\nfor feature in features:\n df2[feature] = df2[feature].apply(get_list)\n\n\n# Converts data in 'x' to lower case and removes all spaces.\n# Done to differentiate, for example, Chris Pratt from Chris Evans\ndef clean_data(x):\n if isinstance(x, list):\n return [str.lower(i.replace(\" \", \"\")) for i in x]\n else:\n if isinstance(x, str):\n return str.lower(x.replace(\" \", \"\"))\n else:\n return ''\n\n\n# Cleans the data in the newly made columns\nfeatures = ['cast', 'director', 'genres']\nfor feature in features:\n df2[feature] = df2[feature].apply(clean_data)\ndf2['new_title'] = df2['original_title'].apply(clean_data)\n\n\n# Creates a 'soup' of information that can be used to check for 'similar' movies\ndef create_soup(x):\n return ' ' + ' '.join(x['cast']) + ' ' + x['director'] + ' ' + ' '.join(x['genres'])\n\n\ndf2['soup'] = df2.apply(create_soup, axis=1)\ndf2.to_csv(r'C:\\Users\\rishi_rhvenli\\PycharmProjects\\MovieRec\\Data\\movie_data.csv', index=None, header=True)\n# DATA PROCESSING ENDS\n\n","sub_path":"PythonFiles/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"475296891","text":"# coding=utf-8\n# Distributed under the MIT software license, see the accompanying\n# file LICENSE or http://www.opensource.org/licenses/mit-license.php.\n\nimport time\nfrom io import StringIO\n\nimport simplejson as json\nfrom twisted.internet.protocol import Protocol, connectionDone\n\nfrom pyqrllib.pyqrllib import mnemonic2bin, hstr2bin, bin2hstr\nfrom qrl.core import helper, logger, config\nfrom qrl.crypto.words import wordlist\nfrom qrl.crypto.xmss import XMSS\n\n\ndef hexseed_to_seed(hex_seed):\n if len(hex_seed) != 96:\n return False\n return hstr2bin(hex_seed)\n\n\n# FIXME: Clean this up\n\nclass WalletProtocol(Protocol):\n def __init__(self):\n self.cmd_mapping = {\n \"create\": self._create,\n \"getnewaddress\": self._getnewaddress,\n \"hexseed\": self._hexseed,\n \"seed\": self._seed,\n \"search\": self._search,\n \"json_block\": self._json_block,\n \"savenewaddress\": self._savenewaddress,\n \"recoverfromhexseed\": self._recoverfromhexseed,\n \"recoverfromwords\": self._recoverfromwords,\n \"stake\": self._stake,\n \"stakenextepoch\": self._stakenextepoch,\n \"send\": self._send,\n \"mempool\": self._mempool,\n \"help\": self._help,\n \"quit\": self._quit,\n \"exit\": self._quit,\n \"wallet\": self._wallet,\n \"getinfo\": self._getinfo,\n \"blockheight\": self._blockheight,\n \"peers\": self._peers,\n \"reboot\": self._reboot,\n }\n self.cmd_list = list(self.cmd_mapping.keys())\n\n self.output = {'status': 1,\n 'keys': [],\n 'message': StringIO()}\n\n self.isJSON = False\n\n def parse_cmd(self, data):\n # Parse out passed in argument to get:\n # 1. Command ([0])\n # 1. 0-Many arguments ([1:])\n\n # Get entered line as an array of strings delimited by \"space.\"\n # Will chomp away any extra spaces\n data = data.split()\n # Arguments include anything beyond the first index\n\n if len(data) != 0: # if anything was entered\n\n command = data[0]\n args = None\n if len(data) > 0: # args optional\n args = data[1:]\n\n if command in self.cmd_mapping:\n self.cmd_mapping[command](args)\n\n else:\n return False\n\n return True\n\n # Called when a command is recieved through telnet\n # Might be a good idea to use a json encrypted wallet\n def dataReceived(self, data):\n try:\n data = data.strip().decode()\n\n self.factory.recn += 1\n self.isJSON = False\n if data.lower().startswith('json '):\n self.isJSON = True\n data = data[5:]\n\n if not self.parse_cmd(data):\n self.output['status'] = 1\n self.output['message'].write(\">>> Command not recognised. Use 'help' for details \\r\\n\")\n except KeyboardInterrupt as e:\n self.output['message'] = StringIO()\n self.output['message'].write('Unexpected Error\\r\\nReport to QRL Developers')\n logger.error('Unexpected Error WalletProtocol\\n')\n logger.exception(e)\n\n self.output['message'] = self.output['message'].getvalue()\n\n try:\n if self.isJSON:\n self.transport.write('%s' % (str(json.dumps(self.output)),))\n else:\n self.transport.write(bytes(str(self.output['message']), 'utf-8'))\n except Exception as e:\n logger.error('Walletprotocol unexpected exception while sending msg to client')\n logger.exception(e)\n\n del self.output\n self.output = {'status': 1,\n 'keys': [],\n 'message': StringIO()}\n\n # What does this do?\n # whenever you type telnet 127.0.0.1 2000\n # a connection is made and this function is called to initialize the things.\n def connectionMade(self):\n self.transport.write(b'QRL node connection established. Try starting with \"help\" ')\n self.factory.connections += 1\n if self.factory.connections > 1:\n logger.info('only one local connection allowed')\n self.transport.write(b'only one local connection allowed, sorry')\n self.transport.loseConnection()\n else:\n if self.transport.getPeer().host == '127.0.0.1':\n logger.info('>>> new local connection %s %s', str(self.factory.connections), self.transport.getPeer())\n else:\n self.transport.loseConnection()\n logger.info('Unauthorised remote login attempt..')\n\n def connectionLost(self, reason=connectionDone):\n self.factory.connections -= 1\n\n ###################################### LOCAL WALLET ACCESS ###############################################\n\n # Pseudocode:\n\n # is chain up to date? If not, fail/inform user\n # is address null/void? If it is, fail/print usage instructions\n # is the first letter of the address Q? If not, fail/print usage instructions\n # is the address in use? If not, fail/inform user\n\n # if all of these are met, return the balance\n def getbalance(self, addr):\n self.output['status'] = 1\n\n # is chain up to date? If not, fail/inform user\n if self.factory.state.state_uptodate(self.factory.chain.height()) is False:\n self.output['message'].write('>>> LevelDB not up to date..\\r\\n')\n # add \"force\" argument to bring it up to date and get balance?\n return\n\n # is address null/void? If it is, fail/print usage instructions\n if not addr:\n self.output['message'].write('>>> Usage: getbalance
(Addresses begin with Q)\\r\\n')\n return\n\n # is the first letter of the address Q? If not, fail/print usage instructions\n if addr[0][0] != 'Q':\n self.output['message'].write('>>> Usage: getbalance (Addresses begin with Q)\\r\\n')\n return\n\n # is the address in use? If not, fail/inform user\n if self.factory.state.state_address_used(addr[0]) is False:\n self.output['message'].write(bytes('>>> Unused address: ' + addr + '\\r\\n', 'utf-8'))\n return\n\n # if all of these are met, return the balance\n self.output['status'] = 0\n balance = self.factory.state.state_balance(addr[0])\n self.output['message'].write(bytes('>>> balance: ' + str(balance) + '\\r\\n', 'utf-8'))\n self.output['keys'] += ['balance']\n self.output['balance'] = balance\n return\n\n # Pseudocode:\n # If no arguments are used, or more than 3 are used, fail/inform user of usage\n # else:\n #\tget signature type to use, reject if the type is incorrect\n # prevent user from generating an extremely large number of XMSS signatures\n #\tgenerate address\n #\tinform user of address information\n #\ttell them how to save the address to wallet file\n def _getnewaddress(self, args):\n self.output['status'] = 0\n self.output['message'].write('>>> Creating new address, please be patient as this can take some time ...\\r\\n')\n self.output['keys'] += ['keypair_type', 'possible_signatures', 'address']\n\n addr_bundle = self.factory.chain.wallet.get_new_address()\n\n self.output['message'].write('>>> Keypair type: ' + ''.join(addr_bundle[1].get_type() + '\\r\\n'))\n self.output['message'].write(\n '>>> Signatures possible with address: ' + str(addr_bundle[1].get_number_signatures()) + '\\r\\n')\n self.output['message'].write('>>> Address: ' + addr_bundle[1].get_address() + '\\r\\n')\n\n self.output['keypair_type'] = ''.join(addr_bundle[1].get_type() + '\\r\\n')\n self.output['possible_signatures'] = str(addr_bundle[1].get_number_signatures())\n self.output['address'] = addr_bundle[1].get_address()\n\n # TODO: Would you like to save this address to your wallet file (call savenewaddress)? Y/N\n self.output['message'].write(\">>> type 'savenewaddress' to append to wallet file\" + '\\r\\n')\n self.factory.newaddress = addr_bundle\n\n # Simply saves wallet information\n def _savenewaddress(self, args):\n self.output['status'] = 1\n if not self.factory.newaddress:\n self.output['message'].write(\">>> No new addresses created, yet. Try 'getnewaddress'\" + '\\r\\n')\n return\n self.output['status'] = 0\n self.factory.chain.wallet.append_wallet(self.factory.newaddress)\n self.output['message'].write('>>> new address saved in self.factory.chain.wallet.\\r\\n')\n return\n\n # This method is for sending between local wallets as well as network wallets\n def _send(self, args):\n self.output['status'] = 1\n if not args or len(args) < 3:\n self.output['message'].write('>>> Usage: send []\\r\\n')\n self.output['message'].write('>>> i.e. send 0 4 100 5\\r\\n')\n self.output['message'].write('>>> ^ will send 100 coins from address 0 to 4 from the wallet\\r\\n')\n self.output['message'].write('>>> can be a pasted address (starts with Q)\\r\\n')\n self.output['message'].write('>>> 5 is the txn fee\\r\\n')\n return\n\n wallet_from = args[0]\n wallet_to = args[1]\n amount_arg = args[2]\n fee_arg = 0\n if len(args) == 4:\n fee_arg = args[3]\n\n qrlnode = self.factory.qrlnode\n\n ########################\n ########################\n\n try:\n wallet_from = qrlnode.get_wallet_absolute(wallet_from)\n wallet_to = qrlnode.get_wallet_absolute(wallet_to)\n amount = qrlnode.get_dec_amount(amount_arg)\n fee = qrlnode.get_dec_amount(fee_arg)\n\n tx = qrlnode.transfer_coins(wallet_from, wallet_to, amount, fee)\n\n except Exception as e:\n self.output['message'].write(str(e))\n return\n\n ########################\n ########################\n # FIXME: Clean below\n\n self.output['status'] = 0\n self.output['message'].write('>>> ' + bin2hstr(tx.txhash))\n # FIXME: Review all quantities\n # FIXME: Magic number? Unify\n self.output['message'].write('>>> From: ' + str(tx.txfrom) + ' To: ' + str(tx.txto) + ' For: ' + str(\n tx.amount / 100000000.000000000) + ' Fee: ' + str(tx.fee / 100000000.000000000) + '\\r\\n')\n self.output['message'].write('>>>created and sent into p2p network\\r\\n')\n\n def _wallet(self, args):\n if not self.factory.state.state_uptodate(self.factory.chain.height()):\n self.factory.state.state_read_chain(self.factory.chain)\n\n self.output['status'] = 0\n self.output['message'].write('>>> Wallet contents:\\r\\n')\n self.output['keys'] += ['list_addresses']\n self.output['list_addresses'] = {}\n\n list_addr, list_addresses = self.factory.chain.wallet.list_addresses(self.factory.chain.state,\n self.factory.chain.transaction_pool, True)\n self.output['list_addresses'] = list_addresses\n\n y = 0\n for address in list_addr:\n self.output['message'].write(str(y) + str(address) + '\\r\\n')\n y += 1\n\n def _create(self, args):\n self.factory.p2pFactory.pos.create_next_block(int(args[0]))\n self.output['status'] = 0\n self.output['message'].write('Creating blocknumber #' + str(args[0]))\n\n def _hexseed(self, args):\n for addr_bundle in self.factory.chain.wallet.address_bundle:\n if isinstance(addr_bundle.xmss, XMSS):\n self.output['status'] = 0\n self.output['message'].write('Address: ' + addr_bundle.xmss.get_address() + '\\r\\n')\n self.output['message'].write('Recovery seed: ' + addr_bundle.xmss.get_hexseed() + '\\r\\n')\n self.output['keys'] += ['Address', 'Recovery seed']\n self.output['Address'] = addr_bundle.xmss.get_address()\n self.output['Recovery seed'] = addr_bundle.xmss.get_hexseed()\n\n def _seed(self, args):\n for addr_bundle in self.factory.chain.wallet.address_bundle:\n if isinstance(addr_bundle.xmss, XMSS):\n self.output['status'] = 0\n self.output['message'].write('Address: ' + addr_bundle.xmss.get_address() + '\\r\\n')\n self.output['message'].write('Recovery seed: ' + addr_bundle.xmss.get_mnemonic() + '\\r\\n')\n self.output['keys'] += ['Address', 'Recovery seed']\n\n def _search(self, args):\n if not args:\n self.output['status'] = 1\n self.output['message'].write('>>> Usage: search \\r\\n')\n return None\n\n tmp_output = None\n if args[0][0] == 'Q':\n # FIXME: Accessing private member\n # FIXME: Access to another\n tmp_output = json.loads(self.factory.apiFactory.search_address(args[0]))\n self.output['message'].write('Address: ' + str(args[0]))\n self.output['message'].write('\\r\\nBalance: ' + str(tmp_output['state']['balance']))\n self.output['message'].write('\\r\\nTransactions: ' + str(tmp_output['state']['transactions']))\n for tx in tmp_output['transactions']:\n self.output['message'].write(str(tx['txhash']))\n self.output['message'].write(' ')\n self.output['message'].write(str(tx['txfrom']))\n self.output['message'].write(' ')\n self.output['message'].write(str(tx['txto']))\n self.output['message'].write(' ')\n self.output['message'].write(str(tx['amount']))\n self.output['message'].write('\\r\\n')\n else:\n tmp_output = json.loads(self.factory.apiFactory.search_txhash(args[0]))\n self.output['message'].write('Txnhash: ')\n self.output['message'].write(args[0])\n if tmp_output['status'] == 'Error':\n self.output['message'].write('\\r\\n')\n self.output['message'].write(str(tmp_output['error']))\n self.output['message'].write('\\r\\n')\n return True\n self.output['message'].write('\\r\\nTimestamp: ')\n self.output['message'].write(tmp_output['timestamp'])\n self.output['message'].write('\\r\\nBlockNumber: ')\n self.output['message'].write(tmp_output['block'])\n self.output['message'].write('\\r\\nConfirmations: ')\n self.output['message'].write(tmp_output['confirmations'])\n self.output['message'].write('\\r\\nAmount: ')\n self.output['message'].write(tmp_output['amount'])\n self.output['message'].write('\\r\\n')\n\n if not tmp_output:\n self.output['status'] = 1\n self.output['message'].write('>>> No Information available')\n return True\n\n for key in list(tmp_output.keys()):\n self.output['keys'] += [str(key)]\n self.output[key] = tmp_output[key]\n\n self.output['status'] = 0\n self.output['message'].write('')\n\n def _json_block(self, args):\n if not args:\n self.output['message'].write(\n helper.json_print_telnet(self.factory.chain.m_get_last_block()) + '\\r\\n')\n return True\n try:\n int(args[0])\n except:\n self.output['message'].write('>>> Try \"json_block \" \\r\\n')\n return True\n\n if int(args[0]) > self.factory.chain.m_blockheight():\n self.output['message'].write('>>> Block > Blockheight\\r\\n')\n return True\n self.output['status'] = 0\n self.output['message'].write(\n helper.json_print_telnet(self.factory.chain.m_get_block(int(args[0]))) + '\\r\\n')\n\n def _recoverfromhexseed(self, args):\n if not args or not hexseed_to_seed(args[0]):\n self.output['message'].write('>>> Usage: recoverfromhexseed \\r\\n')\n self.output['message'].write('>>> Could take up to a minute..\\r\\n')\n self.output['message'].write('>>> savenewaddress if Qaddress matches expectations..\\r\\n')\n return True\n\n self.output['status'] = 0\n addr = self.factory.chain.wallet.get_new_address(address_type='XMSS', seed=hexseed_to_seed(args[0]))\n self.factory.newaddress = addr\n self.output['message'].write('>>> Recovery address: ' + addr[1].get_address() + '\\r\\n')\n self.output['message'].write('>>> Recovery seed phrase: ' + addr[1].get_mnemonic() + '\\r\\n')\n self.output['message'].write('>>> hexSEED confirm: ' + addr[1].get_hexseed() + '\\r\\n')\n self.output['message'].write('>>> savenewaddress if Qaddress matches expectations..\\r\\n')\n\n self.output['keys'] += ['recovery_address', 'recovery_seed_phrase', 'hexseed_confirm']\n self.output['recovery_address'] = addr[1].get_address()\n self.output['recovery_seed_phrase'] = addr[1].get_mnemonic()\n self.output['hexseed_confirm'] = addr[1].get_hexseed()\n\n def _recoverfromwords(self, args):\n if not args:\n self.output['message'].write(\n '>>> Usage: recoverfromwords \\r\\n')\n return True\n self.output['message'].write('>>> trying..this could take up to a minute..\\r\\n')\n if len(args) != 32:\n self.output['message'].write(\n '>>> Usage: recoverfromwords \\r\\n')\n return True\n\n args = ' '.join(args)\n addr = self.factory.chain.wallet.get_new_address(address_type='XMSS', seed=mnemonic2bin(args, wordlist))\n self.factory.newaddress = addr\n self.output['status'] = 0\n self.output['message'].write('>>> Recovery address: ' + addr[1].get_address() + '\\r\\n')\n self.output['message'].write('>>> Recovery hexSEED: ' + addr[1].get_hexseed() + '\\r\\n')\n self.output['message'].write('>>> Mnemonic confirm: ' + addr[1].get_mnemonic() + '\\r\\n')\n self.output['message'].write('>>> savenewaddress if Qaddress matches expectations..\\r\\n')\n\n self.output['keys'] += ['recovery_address', 'recovery_hexseed', 'mnemonic_confirm']\n self.output['recovery_address'] = addr[1].get_address()\n self.output['recovery_hexseed'] = addr[1].get_hexseed()\n self.output['mnemonic_confirm'] = addr[1].get_mnemonic()\n\n def _stake(self, args):\n self.output['status'] = 0\n self.output['message'].write(\n '>> Toggling stake from: ' + str(self.factory.p2pFactory.stake) + ' to: ' + str(\n not self.factory.p2pFactory.stake) + '\\r\\n')\n self.factory.p2pFactory.stake = not self.factory.p2pFactory.stake\n logger.info(('STAKING set to: ', self.factory.p2pFactory.stake))\n self.output['keys'] += ['stake']\n self.output['stake'] = self.factory.p2pFactory.stake\n\n def _stakenextepoch(self, args):\n self.output['status'] = 0\n self.output['message'].write(\n '>>> Sending a stake transaction for address: ' + self.factory.chain.mining_address + ' to activate next epoch(' + str(\n config.dev.blocks_per_epoch - (\n self.factory.chain.m_blockchain[-1].blockheader.blocknumber - (\n self.factory.chain.m_blockchain[\n -1].blockheader.epoch * config.dev.blocks_per_epoch))) + ' blocks time)\\r\\n')\n\n logger.info(('STAKE for address:', self.factory.chain.mining_address))\n\n blocknumber = self.factory.chain.block_chain_buffer.height() + 1\n self.factory.p2pFactory.pos.make_st_tx(blocknumber=blocknumber, first_hash=None)\n\n def _mempool(self, args):\n self.output['status'] = 0\n self.output['message'].write('>>> Number of transactions in memory pool: ' + str(\n len(self.factory.chain.transaction_pool)) + '\\r\\n')\n self.output['keys'] += ['txn_nos']\n self.output['txn_nos'] = len(self.factory.chain.transaction_pool)\n\n def _help(self, args):\n self.output['status'] = 0\n self.output['message'].write(\n '>>> QRL ledger help: try {}'.format(', '.join(self.cmd_list)) + '\\r\\n')\n\n def _quit(self, args):\n self.transport.loseConnection()\n\n def _getinfo(self, args):\n self.output['status'] = 0\n self.output['message'].write('>>> Version: ' + config.dev.version_number + '\\r\\n')\n self.output['message'].write('>>> Uptime: ' + str(time.time() - self.factory.start_time) + '\\r\\n')\n self.output['message'].write(\n '>>> Nodes connected: ' + str(len(self.factory.p2pFactory.peer_connections)) + '\\r\\n')\n self.output['message'].write('>>> Staking set to: ' + str(self.factory.p2pFactory.stake) + '\\r\\n')\n self.output['message'].write('>>> Sync status: ' + self.factory.p2pFactory.nodeState.state.name + '\\r\\n')\n\n self.output['keys'] += ['version', 'uptime', 'nodes_connected', 'staking_status', 'sync_status']\n self.output['version'] = config.dev.version_number\n self.output['uptime'] = str(time.time() - self.factory.start_time)\n self.output['nodes_connected'] = str(len(self.factory.p2pFactory.peer_connections))\n self.output['staking_status'] = str(self.factory.p2pFactory.stake)\n self.output['sync_status'] = self.factory.p2pFactory.nodeState.state.name\n\n def _blockheight(self, args):\n self.output['status'] = 0\n self.output['message'].write('>>> Blockheight: ' + str(self.factory.chain.m_blockheight()) + '\\r\\n')\n self.output['message'].write(\n '>>> Headerhash: ' + bin2hstr(self.factory.chain.m_blockchain[-1].blockheader.headerhash) + '\\r\\n')\n\n self.output['keys'] += ['blockheight', 'headerhash']\n self.output['blockheight'] = self.factory.chain.m_blockheight()\n self.output['headerhash'] = bin2hstr(self.factory.chain.m_blockchain[-1].blockheader.headerhash)\n\n def _peers(self, args):\n self.output['status'] = 0\n self.output['message'].write('>>> Connected Peers:\\r\\n')\n self.output['keys'] += ['peers']\n self.output['peers'] = {}\n for peer in self.factory.p2pFactory.peer_connections:\n self.output['message'].write(\n '>>> ' + peer.conn_identity + \" [\" + peer.version + \"] blockheight: \" + str(\n peer.blockheight) + '\\r\\n')\n self.output['peers'][peer.conn_identity] = {}\n self.output['peers'][peer.conn_identity]['version'] = peer.version\n self.output['peers'][peer.conn_identity]['blockheight'] = peer.blockheight\n\n def _reboot(self, args):\n if len(args) < 1:\n self.output['message'].write('>>> reboot \\r\\n')\n self.output['message'].write('>>> or\\r\\n')\n self.output['message'].write('>>> reboot \\r\\n')\n self.output['message'].write('>>> or\\r\\n')\n self.output['message'].write('>>> reboot \\r\\n')\n return True\n json_hash, err = None, None\n if len(args) == 3:\n json_hash, status = self.factory.chain.generate_reboot_hash(args[0], args[1], args[2])\n self.output['message'].write(str(args[0]) + str(args[1]) + str(args[2]))\n elif len(args) == 2:\n json_hash, status = self.factory.chain.generate_reboot_hash(args[0], args[1])\n else:\n json_hash, status = self.factory.chain.generate_reboot_hash(args[0])\n\n if json_hash:\n self.factory.p2pFactory.send_reboot(json_hash)\n # self.factory.state.update(NState.synced)\n self.output['message'].write(status)\n","sub_path":"qrl/core/walletprotocol.py","file_name":"walletprotocol.py","file_ext":"py","file_size_in_byte":23919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"196747040","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 19 20:27:13 2020\n\n@author: GUPTA50\n\"\"\"\n\n#Importing the required libraries\nimport re\nimport nltk\nimport heapq\n\n\narticle_text = \"\"\n\n\n#Reading the data from the text file and creating article text\ndef text_read():\n with open(\"Economy.txt\", \"r\") as f:\n paragraphs = f.readlines()\n return paragraphs\n\n\ndef Text_processing():\n global article_text\n paragraphs = text_read()\n for p in paragraphs:\n article_text += p\n # Removing Square Brackets and Extra Spaces\n article_text = re.sub(r'\\[[0-9]*\\]', ' ', article_text)\n article_text = re.sub(r'\\s+', ' ', article_text)\n # Removing special characters and digits\n formatted_article_text = re.sub('[^a-zA-Z]', ' ', article_text )\n #Creating a list of the stopwords using nltk library\n stopwords = nltk.corpus.stopwords.words('english')\n word_frequencies = {}\n #Creating a dictionary, which has the frequence for each word like if GST is encountered seven times it will be like \"GST\": 7\n for word in nltk.word_tokenize(formatted_article_text):\n if word not in stopwords:\n if word not in word_frequencies.keys():\n word_frequencies[word] = 1\n else:\n word_frequencies[word] += 1\n\n maximum_frequency = max(word_frequencies.values())\n #to find the weighted frequency\n for word in word_frequencies.keys():\n word_frequencies[word] = (word_frequencies[word]/maximum_frequency)\n sentence_list = nltk.sent_tokenize(article_text)\n #Calcultion of sentence scores\n sentence_scores = {}\n for sent in sentence_list:\n for word in nltk.word_tokenize(sent.lower()):\n if word in word_frequencies.keys():\n if len(sent.split(' ')) < 30:\n if sent not in sentence_scores.keys():\n sentence_scores[sent] = word_frequencies[word]\n else:\n sentence_scores[sent] += word_frequencies[word]\n \n return sentence_scores\n#Taking the top 10 sentence to create the brief summary of the article \n\ndef summary():\n summary_sentences = heapq.nlargest(10, Text_processing(), key=Text_processing().get)\n summary = ' '.join(summary_sentences)\n with open(\"Economic_Summarization.txt\", \"w\") as f:\n f.write(summary) \n \n\nif __name__ == '__main__': \n summary()","sub_path":"Text_Summarization.py","file_name":"Text_Summarization.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"423812375","text":"\"\"\"\nraycaster_cc6.py\n\n# Copyright (c) 2021, Minho Kim\n# Computer Graphics Lab, Dept. of Computer Science, University of Seoul\n# All rights reserved.\n\n\"\"\"\nfrom OpenGL.GL import *\nimport numpy as np\nimport os\nimport glfw\nimport glm\n\nclass VolumeInfo:\n def __init__(self, filename, dtype, dim, scale, level, inverted):\n self.filename = filename\n self.dtype = dtype\n self.dim = dim\n self.scale = scale\n self.level = level\n self.inverted = inverted\n\ntry:\n path_volume = os.environ['VOLUME_PATH']\nexcept:\n path_volume = './'\n\nvolumes = {\n 'ML_25' :VolumeInfo(path_volume + 'ML_25_f32.raw', np.float32, (49,49,49), (1,1,1), 0.5, False),\n 'ML_50' :VolumeInfo(path_volume + 'ML_50_f32.raw', np.float32, (99,99,99), (1,1,1), 0.5, False),\n }\n\n###################################################################################################################\n# bounding box of the volume\n#\n# - the bounding box may be composed of smaller min/max boxes for culling\n###################################################################################################################\nclass BBox:\n # dim: resolution of the volume dataset\n # scale: scaling of the volume dataset. Strictly speaking, if scale is not (1,1,1) \n # then we get a different lattice.\n#------------------------------------------------------------------------------------------------------------------------ \n def __init__(self, _dim, scale, size_fbo):\n self.dim = _dim\n\n self.fbo = FBO_bbox(size_fbo[0], size_fbo[1])\n\n # - Shaders to render the bounding box containing the whole volume.\n # - Used to set the starting/ending point of each ray.\n # - Less efficient than using `bbox_minmax.*' shaders.\n # - Used when `minmax' parameter of `render' function is FALSE.\n self.prog_bbox = Program('bbox.vert', 'bbox.frag', ['MVP', 'scale']) \n\n self.size = self.dim\n\n # - Used to fit the whole volume in viewport by re-scaling.\n self.size_max = max(self.size)\n\n # - The scaling of the bounding box.\n # - We obtain the properly scaled bounding box by applying this to a unit cube.\n self.scale_bbox = tuple(self.size[i]/self.size_max for i in range(3))\n\n # - Used to convert from [0,1]^3 space to the lattice space.\n # - Passed as `scale_axes' to raycasting shader.\n self.scale_axes = tuple(((self.dim[i])*self.size[i])/self.size_max for i in range(3))\n\n positions = np.array([ 0, 0, 1,\n 1, 0, 1,\n 1, 1, 1,\n 0, 1, 1,\n 0, 0, 0,\n 1, 0, 0,\n 1, 1, 0,\n 0, 1, 0],\n dtype=np.float32)\n indices = np.array([ 0, 1, 2, 2, 3, 0, # front\n 1, 5, 6, 6, 2, 1, # top\n 7, 6, 5, 5, 4, 7, # back\n 4, 0, 3, 3, 7, 4, # bottom\n 4, 5, 1, 1, 0, 4, # left\n 3, 2, 6, 6, 7, 3 # right\n ], dtype=np.int8)\n \n # Setting up the VAO for the bbox\n self.vao = glGenVertexArrays(1)\n glBindVertexArray(self.vao)\n\n self.vbo_position = glGenBuffers(1)\n glBindBuffer(GL_ARRAY_BUFFER, self.vbo_position)\n glBufferData(GL_ARRAY_BUFFER, len(positions)*ctypes.sizeof(ctypes.c_float), positions, GL_STATIC_DRAW)\n glEnableVertexAttribArray(0)\n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, None)\n\n self.vbo_idx = glGenBuffers(1)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.vbo_idx)\n self.size_indices = len(indices)\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, len(indices)*ctypes.sizeof(ctypes.c_ubyte), indices, GL_STATIC_DRAW)\n\n glBindVertexArray(0)\n\n#------------------------------------------------------------------------------------------------------------------------ \n def render(self, MVP):\n glUseProgram(self.prog_bbox.id)\n glUniformMatrix4fv(self.prog_bbox.uniform_locs['MVP'], 1, GL_FALSE, MVP)\n glUniform3fv(self.prog_bbox.uniform_locs['scale'], 1, self.scale_bbox)\n glBindVertexArray(self.vao)\n glDrawElements(GL_TRIANGLES, self.size_indices, GL_UNSIGNED_BYTE, ctypes.c_void_p(0))\n glBindVertexArray(0)\n glUseProgram(0)\n\n#------------------------------------------------------------------------------------------------------------------------ \n def render_backfaces(self, MVP):\n glDepthFunc(GL_GREATER)\n glClearDepth(0)\n glClearColor(0, 0, 0, 1)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )\n glEnable(GL_CULL_FACE)\n glCullFace(GL_FRONT)\n self.render(MVP)\n glDisable(GL_CULL_FACE)\n\n#------------------------------------------------------------------------------------------------------------------------ \n def render_frontfaces(self, MVP):\n glDepthFunc(GL_LESS)\n glClearDepth(1)\n glClearColor(0, 0, 0, 1)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )\n glEnable(GL_CULL_FACE)\n glCullFace(GL_BACK)\n self.render(MVP)\n glDisable(GL_CULL_FACE)\n\n#------------------------------------------------------------------------------------------------------------------------ \n def render_bbox(self, MVP):\n glViewport(0, 0, self.fbo.width, self.fbo.height)\n glBindFramebuffer(GL_FRAMEBUFFER, self.fbo.fbo)\n glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, self.fbo.buf_back, 0)\n self.render_backfaces(MVP)\n glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, self.fbo.buf_front, 0)\n self.render_frontfaces(MVP)\n glBindFramebuffer(GL_FRAMEBUFFER, 0)\n\n###################################################################################################################\nclass Volume:\n\n#------------------------------------------------------------------------------------------------------------------------ \n def __init__(self, info, size_fbo_bbox):\n\n self.load_data(info)\n\n self.bbox = BBox(self.info.dim, self.info.scale, size_fbo_bbox)\n\n self.dim_tex = [self.info.dim[0], self.info.dim[1], self.info.dim[2], 1]\n\n self.upload_data()\n \n#------------------------------------------------------------------------------------------------------------------------ \n def load_data(self, info):\n self.info = info\n scale = 1\n\n self.dim_max = max(max(self.info.dim[0], self.info.dim[1]), self.info.dim[2])\n \n # Always keep in float32 format...\n self.data = np.fromfile(info.filename, dtype=info.dtype).astype(np.float32)*scale\n\n#------------------------------------------------------------------------------------------------------------------------ \n def upload_data(self):\n if self.dim_tex[3] == 1:\n internal_format = GL_R32F\n format = GL_RED\n elif self.dim_tex[3] == 2:\n internal_format = GL_RG32F\n format = GL_RG\n\n self.texid = glGenTextures(1)\n glPixelStorei(GL_UNPACK_ALIGNMENT,1)\n glBindTexture(GL_TEXTURE_3D, self.texid)\n glTexParameterf(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexParameterf(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)\n glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)\n glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_BORDER)\n glTexImage3D(GL_TEXTURE_3D, 0, internal_format, self.dim_tex[0], self.dim_tex[1], self.dim_tex[2], 0, format, GL_FLOAT, self.data)\n glBindTexture(GL_TEXTURE_3D, 0)\n\n self.data = None\n###################################################################################################################\nclass FBO_bbox:\n def __init__(self, width, height):\n self.width = width\n self.height = height\n\n self.fbo = glGenFramebuffers(1)\n glBindFramebuffer(GL_FRAMEBUFFER, self.fbo)\n\n self.buf_back = glGenTextures(1)\n glBindTexture(GL_TEXTURE_2D, self.buf_back)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, height, 0, GL_RGBA, GL_FLOAT, None)\n glBindTexture(GL_TEXTURE_2D, 0)\n\n self.buf_front = glGenTextures(1)\n glBindTexture(GL_TEXTURE_2D, self.buf_front)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)\n glTexImage2D(GL_TEXTURE_2D, 0,GL_RGBA32F, width, height, 0, GL_RGBA, GL_FLOAT, None)\n glBindTexture(GL_TEXTURE_2D, 0)\n\n self.rbo = glGenRenderbuffers(1)\n glBindRenderbuffer(GL_RENDERBUFFER, self.rbo)\n glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, width, height)\n glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, self.rbo)\n glBindRenderbuffer(GL_RENDERBUFFER, 0)\n\n glBindFramebuffer(GL_FRAMEBUFFER, 0)\n\n\n###################################################################################################################\nclass QuadFull:\n RENDER_MODE_BLINN_PHONG = 0\n RENDER_MODE_CURVATURE = 1\n NUM_RENDER_MODE = 2\n def __init__(self, volume, size_fbo):\n self.tex_bbox_back = volume.bbox.fbo.buf_back\n self.tex_bbox_front = volume.bbox.fbo.buf_front\n self.tex_volume = volume.texid\n\n self.render_mode = self.RENDER_MODE_CURVATURE\n\n uniforms = ['tex_back', 'tex_front', 'tex_volume', 'scale_axes', 'dim', \n 'level', 'scale_step', 'MV', 'render_mode', 'tex_colormap_2d']\n\n self.prog = Program('raycast_simple.vert', 'cc6_raycast_open.frag', uniforms)\n\n self.init_colormap()\n\n self.init_vao()\n\n self.scale_step = 0.001\n\n#------------------------------------------------------------------------------------------------------------------------ \n def init_vao(self):\n verts = np.array(\n [-1, -1, 0, 0,\n 1, -1, 1, 0,\n 1, 1, 1, 1,\n -1, 1, 0, 1], dtype=np.float32)\n\n self.vao = glGenVertexArrays(1)\n glBindVertexArray(self.vao)\n self.vbo = glGenBuffers(1)\n glBindBuffer(GL_ARRAY_BUFFER, self.vbo)\n glBufferData(GL_ARRAY_BUFFER, len(verts)*ctypes.sizeof(ctypes.c_float), verts, GL_STATIC_DRAW)\n glEnableVertexAttribArray(0)\n glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 4*ctypes.sizeof(ctypes.c_float), None)\n glEnableVertexAttribArray(1)\n glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 4*ctypes.sizeof(ctypes.c_float), ctypes.c_void_p(2*ctypes.sizeof(ctypes.c_float)))\n glBindVertexArray(0)\n#------------------------------------------------------------------------------------------------------------------------ \n def render_raycast_shading(self, level, volume, MV):\n\n glClearColor(0, 0, 0, 0)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glActiveTexture(GL_TEXTURE0)\n glBindTexture(GL_TEXTURE_2D, self.tex_bbox_back)\n glActiveTexture(GL_TEXTURE1)\n glBindTexture(GL_TEXTURE_2D, self.tex_bbox_front)\n glActiveTexture(GL_TEXTURE2)\n glBindTexture(GL_TEXTURE_3D, self.tex_volume)\n glActiveTexture(GL_TEXTURE3)\n glBindTexture(GL_TEXTURE_2D, self.tex_colormap_2d)\n\n glUseProgram(self.prog.id)\n\n glUniform1i(self.prog.uniform_locs['tex_back'], 0) \n glUniform1i(self.prog.uniform_locs['tex_front'], 1) \n glUniform1i(self.prog.uniform_locs['tex_volume'], 2)\n glUniform1i(self.prog.uniform_locs['tex_colormap_2d'], 3)\n glUniform1f(self.prog.uniform_locs['level'], level)\n glUniform3f(self.prog.uniform_locs['scale_axes'], volume.bbox.scale_axes[0], volume.bbox.scale_axes[1], volume.bbox.scale_axes[2])\n glUniform3f(self.prog.uniform_locs['dim'], volume.info.dim[0], volume.info.dim[1], volume.info.dim[2])\n glUniform1f(self.prog.uniform_locs['scale_step'], self.scale_step)\n glUniformMatrix4fv(self.prog.uniform_locs['MV'], 1, GL_FALSE, MV)\n glUniform1i(self.prog.uniform_locs['render_mode'], self.render_mode);\n\n glBindVertexArray(self.vao)\n glDrawArrays(GL_TRIANGLE_FAN, 0, 4)\n glBindVertexArray(0)\n\n#------------------------------------------------------------------------------------------------------------------------ \n def init_colormap(self):\n# 3x3 colormap for min-max curvature\n colormap_2d = np.array([[ 1, 0, 0], [ 1, 1, 0], [0,1,0],\n [.5,.5,.5], [.5,.5,.5], [0,1,1],\n [.5,.5,.5], [.5,.5,.5], [0,0,1]], dtype=np.float32)\n self.tex_colormap_2d = glGenTextures(1)\n glBindTexture(GL_TEXTURE_2D, self.tex_colormap_2d)\n glPixelStorei(GL_UNPACK_ALIGNMENT,1)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)\n\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 3, 3, 0, GL_RGB, GL_FLOAT, colormap_2d)\n\n###################################################################################################################\nclass Program:\n def __init__(self, filename_vert, filename_frag, uniforms):\n src_vert = open(filename_vert, 'r').read()\n src_frag = open(filename_frag, 'r').read()\n self.id = self.build(src_vert, src_frag, uniforms)\n\n#------------------------------------------------------------------------------------------------------------------------ \n def compile(self, src, type):\n \n id = glCreateShader(type)\n glShaderSource(id, src)\n glCompileShader(id)\n result = glGetShaderiv(id, GL_COMPILE_STATUS)\n \n if not(result):\n print('shader compilation error.')\n print(glGetShaderInfoLog(id))\n input('press any key to continue.')\n raise RuntimeError(\n \"\"\"Shader compile failure (%s): %s\"\"\"%( result, glGetShaderInfoLog( id ),),\n src, type,)\n return id\n\n#------------------------------------------------------------------------------------------------------------------------ \n def build(self, src_vert, src_frag, uniforms):\n id_vert = self.compile(src_vert, GL_VERTEX_SHADER)\n id_frag = self.compile(src_frag, GL_FRAGMENT_SHADER)\n program = glCreateProgram()\n if not program:\n raise RunTimeError('glCreateProgram faled!')\n \n glAttachShader(program, id_vert)\n glAttachShader(program, id_frag)\n glLinkProgram(program)\n status = glGetProgramiv(program, GL_LINK_STATUS)\n if not status:\n infoLog = glGetProgramInfoLog(program)\n glDeleteProgram(program)\n glDeleteShader(id_vert)\n glDeleteShader(id_frag)\n print(infoLog)\n raise RuntimeError(\"Error linking program:\\n%s\\n\", infoLog)\n\n self.uniform_locs = {}\n for u in uniforms:\n self.uniform_locs[u] = glGetUniformLocation(program, u)\n return program\n\n\n###################################################################################################################\nclass Scene: \n def __init__(self, width, height):\n\n self.width = width\n self.height = height\n\n self.view_angle = 21\n self.angle_x = 320\n self.angle_y = 0\n self.position_x = 0\n self.position_y = 0\n\n\n volume_name = 'ML_25'\n# volume_name = 'ML_50'\n\n fbo_size = (width, height)\n\n self.volume = Volume(volumes[volume_name], fbo_size)\n\n self.quad_full = QuadFull(self.volume, fbo_size)\n\n self.refresh_MVP()\n\n self.texid = [self.volume.bbox.fbo.buf_front, self.volume.bbox.fbo.buf_back]\n \n self.level = volumes[volume_name].level\n\n#------------------------------------------------------------------------------------------------------------------------ \n def refresh_MVP(self):\n\n self.P = glm.perspective(np.radians(self.view_angle), self.width/self.height, 1, 3)\n\n self.MV = glm.translate(glm.mat4(), glm.vec3(self.position_x, self.position_y, -2))\n self.MV = glm.rotate(self.MV, np.radians(self.angle_x), glm.vec3(1,0,0))\n self.MV = glm.rotate(self.MV, np.radians(self.angle_y), glm.vec3(0,1,0))\n\n self.MVP = np.array(self.P * self.MV)\n\n self.MV = np.array(self.MV)\n#------------------------------------------------------------------------------------------------------------------------ \n def render_shading(self):\n self.volume.bbox.render_bbox(self.MVP)\n self.quad_full.render_raycast_shading(self.level, self.volume, self.MV) \n\n###################################################################################################################\nclass RenderWindow:\n def __init__(self):\n cwd = os.getcwd() # save current working directory\n glfw.init() # initialize glfw - this changes cwd\n os.chdir(cwd) # restore cwd\n\n glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)\n glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)\n glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL_TRUE)\n glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)\n \n self.width, self.height = 512, 512\n self.aspect = self.width/float(self.height)\n self.win = glfw.create_window(self.width, self.height, 'raycaster (cc6)', None, None)\n glfw.make_context_current(self.win)\n\n # for retina display...\n self.fb_width, self.fb_height = glfw.get_framebuffer_size(self.win)\n\n glEnable(GL_DEPTH_TEST)\n glClearColor(0.0, 0.0, 0.0,0.0)\n\n glfw.set_key_callback(self.win, self.onKeyboard)\n glfw.set_window_size_callback(self.win, self.onSize) \n\n self.scene = Scene(self.fb_width, self.fb_height)\n\n self.exitNow = False\n \n def onKeyboard(self, win, key, scancode, action, mods):\n if action == glfw.PRESS:\n # ESC to quit\n if key == glfw.KEY_ESCAPE: \n self.exitNow = True\n elif key == glfw.KEY_RIGHT:\n self.scene.angle_y = (self.scene.angle_y + 10) % 360\n self.scene.refresh_MVP()\n elif key == glfw.KEY_LEFT:\n self.scene.angle_y = (self.scene.angle_y - 10) % 360\n self.scene.refresh_MVP()\n elif key == glfw.KEY_UP:\n self.scene.angle_x = (self.scene.angle_x - 10) % 360\n self.scene.refresh_MVP()\n elif key == glfw.KEY_DOWN:\n self.scene.angle_x = (self.scene.angle_x + 10) % 360\n self.scene.refresh_MVP()\n elif key == glfw.KEY_EQUAL:\n self.scene.level = self.scene.level + set_step_level(mods)\n print(self.scene.level)\n elif key == glfw.KEY_MINUS:\n self.scene.level = self.scene.level - set_step_level(mods)\n print(self.scene.level)\n elif key == glfw.KEY_PAGE_UP:\n self.scene.view_angle = self.scene.view_angle - 1\n self.scene.refresh_MVP()\n print(self.scene.view_angle)\n elif key == glfw.KEY_PAGE_DOWN:\n self.scene.view_angle = self.scene.view_angle + 1\n self.scene.refresh_MVP()\n print(self.scene.view_angle)\n elif key == glfw.KEY_TAB:\n self.scene.quad_full.render_mode = (self.scene.quad_full.render_mode + 1) % self.scene.quad_full.NUM_RENDER_MODE\n print(self.scene.quad_full.render_mode)\n \n def onSize(self, win, width, height):\n self.aspect = width/float(height)\n self.scene.width = width\n self.scene.height = height\n\n def run(self):\n glfw.set_time(0)\n glClearColor(1,1,1,1)\n lastT = glfw.get_time()\n frames = 0\n while not glfw.window_should_close(self.win) and not self.exitNow:\n currT = glfw.get_time()\n if frames == 20:\n elapsed = currT - lastT\n print('fps = {}'.format(frames/elapsed))\n lastT = currT\n frames = 0\n self.scene.render_shading()\n frames += 1\n glfw.swap_buffers(self.win)\n glfw.poll_events()\n glfw.terminate()\n\n# main() function\ndef main():\n print(\"Starting raycaster. \"\n \"Press ESC to quit.\")\n rw = RenderWindow()\n rw.run()\n\n# call main\nif __name__ == '__main__':\n main()\n","sub_path":"raycaster_cc6.py","file_name":"raycaster_cc6.py","file_ext":"py","file_size_in_byte":21470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"442184719","text":"\nfrom collections import defaultdict as dd\nimport random\nfrom scipy import sparse\nimport numpy as np\nfrom numpy import linalg as lin\n\ndef top_sort(in_edges):\n g_order, new_update = [1], [1]\n in_d = {}\n mat = set()\n for k, edges in in_edges.iteritems():\n in_d[k] = len(edges)\n for j in edges:\n mat.add((k, j))\n while True:\n new_new_update = []\n for k in in_edges.keys():\n if in_d[k] == 0: continue\n for j in new_update:\n if (k, j) in mat is not None: in_d[k] -= 1\n if in_d[k] == 0: new_new_update.append(k)\n if len(new_new_update) == 0:\n break\n for k in new_new_update:\n g_order.append(k)\n new_update = new_new_update\n return g_order\n\ndef read_cites(filename):\n cites = []\n for line in open(filename):\n cites.append(tuple(line.strip().split()))\n return cites\n\ndef read_content(filename):\n features, labels, label_set, num_f = dd(list), {}, set(), 0\n for line in open(filename):\n inputs = line.strip().split()\n id, label = inputs[0], inputs[-1]\n labels[id] = label\n label_set.add(label)\n for i in range(1, len(inputs) - 1):\n if float(inputs[i]) > 0:\n features[id].append((i - 1, 1.0))\n num_f = max(num_f, i)\n return features, labels, sorted(list(label_set)), num_f\n\ndef split_data(labels, perc):\n train, test = [], []\n random.seed(0)\n for id in labels.keys():\n rand = random.random()\n if rand < 0.7 * perc:\n train.append(id)\n elif rand > 0.7:\n test.append(id)\n return train, test\n\ndef construct_x_y(features, labels, label_list, num_f, ids):\n row, col, data = [], [], []\n y = np.zeros((len(ids), len(label_list)), dtype = np.int32)\n for i, id in enumerate(ids):\n for ff in features[id]:\n row.append(i)\n col.append(ff[0])\n data.append(ff[1])\n y[i, label_list.index(labels[id])] = 1.0\n x = sparse.coo_matrix((data, (row, col)), shape = (len(ids), num_f), dtype = np.float32).tocsr()\n return x, y\n\ndef get_index(keys):\n index = {}\n for i, id in enumerate(keys):\n index[id] = i\n return index\n\ndef gen_random_walk(cites, index):\n graph = dd(list)\n for id1, id2 in cites:\n graph[id1].append(id2)\n graph[id2].append(id1)\n\n g = []\n for _ in range(5):\n for id1 in graph.keys():\n path = [id1]\n for _ in range(10):\n path.append(random.choice(graph[path[-1]]))\n for i in range(len(path)):\n for j in range(i - 3, i + 3 + 1):\n if j < 0 or j >= len(path): continue\n if path[i] not in index or path[j] not in index: continue\n g.append([index[path[i]], index[path[j]]])\n return np.array(g, dtype = np.int32)\n\ndef gen_second(cites, index):\n graph = dd(list)\n for id1, id2 in cites:\n graph[id1].append(id2)\n graph[id2].append(id1)\n\n g = []\n for id1 in graph.keys():\n if id1 not in index: continue\n for id2 in graph[id1]:\n if id2 in index:\n g.append([index[id1], index[id2]])\n return np.array(g, dtype = np.int32)\n\ndef gen_graph_features(cites, ids):\n new_index = {}\n cnt = 0\n for id1, id2 in cites:\n if id1 not in new_index:\n new_index[id1] = cnt\n cnt += 1\n if id2 not in new_index:\n new_index[id2] = cnt\n cnt += 1\n\n row, col, data = [], [], []\n index = get_index(ids)\n for id1, id2 in cites:\n if id1 not in index: continue\n row.append(index[id1])\n col.append(new_index[id2])\n data.append(1.0)\n return sparse.coo_matrix((data, (row, col)), shape = (len(ids), cnt), dtype = np.float32).tocsr()\n\ndef read_params(filename):\n max_id = 0\n rec, label_set = [], set()\n for line in open(filename):\n if not line.startswith('f'): continue\n inputs = line.strip().split()\n w = float(inputs[1])\n inputs = inputs[0][3 :][: -1].split(',')\n id = int(inputs[0]) - 1\n label = inputs[1][1 :]\n\n max_id = max(id, max_id)\n label_set.add(label)\n rec.append((id, label, w))\n\n ret = {}\n for label in label_set:\n ret[label] = np.zeros((max_id + 1, 1), dtype = np.float32)\n for id, label, w in rec:\n ret[label][id, 0] = w\n return ret\n\ndef read_ids(filename):\n ids = []\n for line in open(filename):\n id = line.strip().split()[0].split('(')[1].split(',')[0].strip()\n ids.append(id)\n return ids\n\ndef gen_dataset(cite_file, corpus_file, perc, args):\n cites = read_cites(cite_file)\n features, labels, label_list, num_f = read_content(corpus_file)\n train_id, test_id = split_data(labels, perc)\n # train_id, test_id = read_ids('train.examples'), read_ids('test.examples')\n\n x, y = construct_x_y(features, labels, label_list, num_f, labels.keys())\n fea_vecs = {'f': sparse.hstack([x, gen_graph_features(cites, labels.keys())], format = 'csr')}\n # fea_vecs = {'f': x}\n ret_labels = {}\n for i, label in enumerate(label_list):\n ret_labels[label] = y[:, i]\n index = get_index(labels.keys())\n train_ind = [index[i] for i in train_id]\n test_ind = [index[i] for i in test_id]\n\n g, gy = {}, {}\n if args.entropy:\n g['entropy'] = np.arange(x.shape[0]).reshape((x.shape[0], 1))\n gy['entropy'] = np.ones(x.shape[0], dtype = np.float32) * 1e-1\n\n if args.manifold:\n g['manifold'] = gen_second(cites, index)\n gy['manifold'] = np.ones(g['manifold'].shape[0], dtype = np.float32) * 1e-4\n\n if args.cotrain:\n g['cotrain'] = np.arange(x.shape[0]).reshape((x.shape[0], 1))\n gy['cotrain'] = np.ones(x.shape[0], dtype = np.float32) * 1e-3\n\n # fea_vecs['g'] = sparse.hstack([x, gen_graph_features(cites, labels.keys())], format = 'csr')\n fea_vecs['g'] = gen_graph_features(cites, labels.keys())\n\n return fea_vecs, ret_labels, train_ind, test_ind, g, gy\n","sub_path":"compile/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"595305781","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 14 17:05:59 2014\n\n@author: Alejandro\n\"\"\"\nfrom GaussianExperiment import GaussianExperiment\n#[Experiment 1]\ndef experiment(n_samples, n_outliers, n_clusters, n_features, n_experiment):\n gaussianExperiment = GaussianExperiment(n_samples = n_samples, \\\n n_outliers = n_outliers, n_clusters = n_clusters, \\\n n_features = n_features, n_experiment = n_experiment)\n\n X,y = gaussianExperiment.generate_data()\n X_contaminated = gaussianExperiment.generate_contamination()\n gaussianExperiment.show_graph_3d(X,y,X_contaminated)\n #gaussianExperiment.save_data()\n \nexperiment(n_samples= 300 , n_outliers = 150 , n_clusters = 4 \\\n , n_features =3 , n_experiment = 1)\n \nexperiment(n_samples= 300 , n_outliers = 150 , n_clusters = 4 \\\n , n_features =3 , n_experiment = 2)","sub_path":"matlab/+gaussian_experiment/second_code/ParameterExperiment.py","file_name":"ParameterExperiment.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"650052236","text":"def fibo(n):\n if n <= 1:\n return n\n else:\n return (fibo(n - 1) + fibo(n - 2))\n\nntermos = eval(input('Quantos termos da sequencia deseja imprimir? '))\nwhile ntermos <= 0:\n ntermos = eval(input('Digite um número positivo: '))\nfor i in range(ntermos):\n print(fibo(i+1)) # usa-se +1 para que não seja imprimido o \"0\", que não faz parte da sequência fibonacci\n","sub_path":"fibo.py","file_name":"fibo.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"375898322","text":"# Exercise 2: Change your socket program so that it counts the number of\n# characters it has received and stops displaying any text after it has shown\n# 3000 characters. The program should retrieve the entire document and count\n# the total number of characters and display the count of the number of\n# characters at the end of the document.\n\n# This version of the program displays all received characters,\n# including the header data.\n\nimport socket\n\ngeturl = input(\"Please enter a URL: \")\n\ncount = 0\nsite = b\"\"\n\ntry:\n getsock = geturl.split('/')\n\n url = 'GET ' + geturl + ' HTTP/1.0\\r\\n\\r\\n'\n\n mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n mysock.connect((getsock[2], 80))\n cmd = url.encode()\n mysock.send(cmd)\n\n while True:\n data = mysock.recv(512)\n if len(data) < 1:\n break\n count += len(data)\n site += data\n\n mysock.close()\n\n print(site[:3001].decode())\n print(\"\\n\")\n print('Total character count is:', count)\n\nexcept:\n print('Error, improperly formatted or non-existent URL.')\n","sub_path":"Python/FreeCodeCamp/PY4E_Python_for_Everybody/ch_12_NetworkedPrograms/ex_12_02_01.py","file_name":"ex_12_02_01.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"325059714","text":"\"\"\" Convert JSON to relevant matrices. \"\"\"\n\nfrom pandas import DataFrame\n\ndef person_packages(data):\n \"\"\" Person by Package Matrix. \"\"\"\n ppm = []\n for datum in data:\n package_url = datum['package_url']\n for participant in datum['participants']['participants']:\n ppm.append((participant, package_url))\n\n df = DataFrame(ppm)\n df.columns = ['person', 'package']\n return df\n\ndef packages_weights(data):\n \"\"\" Person by Weights Matrix. \"\"\"\n pwm = []\n for datum in data:\n pwm.append((datum['package_url'],\n datum['repo_activity']['repository_forks'],\n datum['repo_activity']['repository_stars'],\n datum['usage']['# Using This']))\n pf = DataFrame(pwm)\n pf.columns = ['package', 'repository_forks', 'repository_stars',\n 'using_this']\n return pf\n\n","sub_path":"Crawl/json_conversion.py","file_name":"json_conversion.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"29119314","text":"# -*- coding: utf-8 -*-\nfrom bs4 import BeautifulSoup # Parser de código html\nimport requests # Descargar html\nimport re # Expresiones regulares\nfrom urlparse import urljoin # Obtener las url de las etiquetas a\nfrom collections import namedtuple\nimport logging\nimport sys\n\nCICLO = \"2015-2\"\nURL_HORARIOS = \"http://www.fciencias.unam.mx/docencia/horarios/indice/20152\"\n\n\nclass Materia(object):\n \"\"\"docstring for Materia\"\"\"\n def __init__(self, arg, nombre):\n super(Materia).__init__()\n self.id = id\n self.nombre = nombre\n self.carreras = list()\n\n\ndef crear_url_carreras(url_horarios, lista_tag_carreras):\n Carrera = namedtuple('Carrera', 'identificador nombre plan url')\n carreras = set()\n for tag_carrera in lista_tag_carreras:\n plan_re = re.search(\"\\d{4}\", tag_carrera.text)\n if plan_re is None: # Si no encuentra el plan entonces lo ignora\n continue\n identificador = tag_carrera[\"href\"].split(\"/\")[-1]\n nombre = tag_carrera.text\n plan = plan_re.group(0)\n url = urljoin(url_horarios, tag_carrera[\"href\"])\n nueva_carrera = Carrera(identificador, nombre, plan, url)\n carreras.add(nueva_carrera)\n return carreras\n\n\ndef crear_url_materias(url_horarios, conjunto_carreas):\n sys.stdout.write(\"Obteniendo las materias\") # python2 para imprimir sin salto de línea\n sys.stdout.flush()\n Materia = namedtuple('Materia', 'identificador nombre carrera_id url')\n materias = set()\n for carrera in conjunto_carreas:\n intentos = 10\n while intentos:\n try:\n respuesta = requests.get(carrera.url, timeout=5)\n respuesta.raise_for_status() # Verifica que el estado de la respuesta\n except Exception:\n intentos -= 1\n if intentos == 0:\n logging.warning(\"La url: \" + carrera.url + \" no se pudo acceder\")\n continue\n break\n sys.stdout.write(\".\") # python2 para imprimir sin salto de línea\n sys.stdout.flush()\n sopa_materias = BeautifulSoup(respuesta.text)\n # Encuntra el id donde están los links a las materias\n div_materias = sopa_materias.find(id=\"info-contenido\")\n # Encuentra las url de las carreras\n lista_tag_materias = div_materias.find_all(\"a\", href=re.compile(\"/docencia/\"))\n for tag_materia in lista_tag_materias:\n identificador = tag_materia[\"href\"].split(\"/\")[-1]\n nombre = tag_materia.text.split(\",\")[0]\n carrera_id = carrera.identificador\n url = urljoin(url_horarios, tag_materia[\"href\"])\n nueva_materia = Materia(identificador, nombre, carrera_id, url)\n materias.add(nueva_materia)\n print(\"\")\n return materias\n\n\ndef crear_url_cursos(url_horarios, conjunto_materias):\n sys.stdout.write(\"Obteniendo los cursos\") # python2 para imprimir sin salto de línea\n sys.stdout.flush()\n Curso = namedtuple('Curso', 'identificador semestre profesores materia_id')\n cursos = set()\n for materia in conjunto_materias:\n intentos = 10\n while intentos:\n try:\n respuesta = requests.get(materia.url, timeout=5)\n respuesta.raise_for_status() # Verifica que el estado de la respuesta\n except Exception:\n intentos -= 1\n if intentos == 0:\n logging.warning(\"La url: \" + materia.url + \" no se pudo acceder\")\n continue\n break\n sys.stdout.write(\".\") # python2 para imprimir sin salto de línea\n sys.stdout.flush()\n sopa_materias = BeautifulSoup(respuesta.text)\n # Encuntra el id donde están los cursos\n div_materias = sopa_materias.find(id=\"info-contenido\")\n # Encuentra las url de las carreras\n for contenido in div_materias.children:\n print(contenido)\n break\n # pendiente\n print(\"\")\n return cursos\n\n\ndef obtener_paginas_de_cursos(url_horarios, ciclo):\n print(\"Obteniendo las carreras\")\n cursos_html = set()\n #Procesa página del ciclo escolar\n intentos = 10\n while intentos:\n try:\n respuesta = requests.get(url_horarios, timeout=5)\n respuesta.raise_for_status() # Verifica que el estado de la respuesta\n except Exception:\n intentos -= 1\n if intentos == 0:\n logging.warning(\"No se pudo accerder a la página de la facultad\")\n return\n continue\n break\n sopa_carreras = BeautifulSoup(respuesta.text)\n # Encuntra el id donde están los links a las carreras\n div_carreras = sopa_carreras.find(id=\"info-contenido\")\n if div_carreras is None:\n raise Exception(\"No se encontró la div de las carreras\")\n # Encuentra las url de las carreras\n url_carreras = div_carreras.find_all(\"a\", href=re.compile(\"/indiceplan/\"))\n if len(url_carreras) == 0:\n raise Exception(\"No se encontraron las url de las carreras\")\n carreras = crear_url_carreras(url_horarios, url_carreras)\n materias = crear_url_materias(url_horarios, carreras)\n cursos = crear_url_cursos(url_horarios, materias)\n #Obtiene las url de las carreras y plan\n\n #Obtiene\n return cursos_html\n\n\ndef main():\n cursos_crudos = obtener_paginas_de_cursos(URL_HORARIOS, CICLO)\n pass\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"back_end/python/obtiene_horarios.py","file_name":"obtiene_horarios.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"556314401","text":"import random\n\nclass Segment:\n\n def __init__ (self, X, Y):\n self.X = int(X)\n self.Y = int(Y)\n \n def setNext (self, next):\n self.next = next\n\nclass Snake:\n\n def __init__ (self, head, tail):\n self.head = head\n self.tail = tail\n self.length = 3\n self.direction = \"down\"\n \n def move(self):\n nextX = self.head.X\n nextY = self.head.Y\n nextSeg = self.head.next\n if self.direction == \"up\": \n self.head.Y -= 1\n elif self.direction == \"down\":\n self.head.Y += 1\n elif self.direction == \"right\":\n self.head.X += 1\n elif self.direction == \"left\":\n self.head.X -= 1\n\n for i in range(self.length-1):\n xBuff = nextSeg.X\n yBuff = nextSeg.Y\n nextSeg.X = nextX\n nextSeg.Y = nextY\n if (i < self.length-2):\n nextSeg = nextSeg.next\n nextX = xBuff\n nextY = yBuff\n\n def isDead(self, height, width):\n if self.length > 4:\n currentSeg = self.head.next.next.next\n for i in range (self.length):\n if currentSeg.X == self.head.X and currentSeg.Y == self.head.Y:\n return True\n if i < self.length-4:\n currentSeg = currentSeg.next\n if self.head.Y < 0 or self.head.Y >= height or self.head.X >= width or self.head.X < 0:\n return True\n\n def grow(self):\n self.length += 1\n newSeg = Segment(self.head.X, self.head.Y)\n newSeg.next = self.head\n self.head = newSeg\n \nclass Apple: \n\n def __init__ (self, width, height):\n self.xMax = width - 1\n self.yMax = height - 1\n self.generate()\n\n def generate(self):\n self.X = random.randint(0, self.xMax)\n self.Y = random.randint(0, self.yMax) \n ","sub_path":"Snake AI/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"461991871","text":"# -*- coding:utf8 -*-\nimport sys\n\n\nclass ListNode:\n\tvalue = \"\"\n\tnext = None\n\tdef __init__(self):\n\t\tself.value = \"\"\n\t\tself.next = None\n\ndef constructList(List):\n\theadNode = None\n\tfor item in List:\n\t\theadNode = addToTail(headNode , item)\n\treturn headNode\ndef addToTail(listNode , value):\n\tnewNode = ListNode()\n\tnewNode.value = value\n\tnewNode.next = None\n\tif listNode == None:\n\t\theadNode = newNode\n\telse:\n\t\theadNode = listNode\n\t\twhile listNode.next != None:\n\t\t\tlistNode = listNode.next\n\t\tlistNode.next = newNode\n\treturn headNode\ndef addToHead(listNode , value):\n\tnewNode = ListNode()\n\tnewNode.value = value\n\tnewNode.next = listNode\n\treturn newNode\ndef removeNode(listNode , value):\n\tif listNode == None:\n\t\treturn None\n\telif listNode.next == None:\n\t\tif listNode.value == value:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn listNode\n\telse:\n\t\theadNode = listNode\n\t\twhile listNode.next != None:\n\t\t\tif listNode.next.value != value:\n\t\t\t\tlistNode = listNode.next\n\t\t\telse:\n\t\t\t\tlistNode.next = listNode.next.next\n\t\treturn headNode\n\n\n\n\n\n\n\n\n\n\n\n\t\t\t\t\n\t\t\n\t\t\n\t\t\n\n\n","sub_path":"ListNode.py","file_name":"ListNode.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"45263101","text":"#\n# @lc app=leetcode id=949 lang=python3\n#\n# [949] Largest Time for Given Digits\n#\nclass Solution:\n def largestTimeFromDigits(self, A):\n A.sort()\n for h in range(23, -1, -1):\n for m in range(59, -1, -1):\n t = [int(h / 10), h % 10, int(m / 10), m % 10]\n ts = sorted(t)\n if ts == A:\n return str(t[0]) + str(t[1]) + \":\" + str(t[2]) + str(t[3])\n return \"\"\n","sub_path":"leetcode/949.largest-time-for-given-digits.py","file_name":"949.largest-time-for-given-digits.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"42371184","text":"import pandas as pd\nimport os\nimport numpy as np\nimport xlsxwriter\n\n\n\nall_df_list = []\n\ncwd = os.path.abspath('')\nfiles = os.listdir(cwd)\nwriter = pd.ExcelWriter(cwd + '//Sorted_Stocks.xlsx', engine = 'xlsxwriter')\ncounter = 0\n\nfor file in files:\n if file.endswith ('.csv'):\n all_df_list.append(pd.read_csv(file))\n if(len(all_df_list) > 50):\n counter = counter + 1\n appended_df = pd.concat(all_df_list)\n appended_df.to_excel(writer, sheet_name = 'Set' + str(counter))\n all_df_list = []\n\nwriter.save()\nwriter.close()\n\n#appended_df.to_excel(\"Sorted_Stocks.xlsx\", index=False)\n","sub_path":"Sorter2.py","file_name":"Sorter2.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"491321470","text":"# 输入 八进制 值\nx = str(input(\"请输入需要转换的数据:\"))\n# 八进制 与 二进制 转换关系\ndict1 = {\"0\":\"000\",\n \"1\":\"001\",\n \"2\":\"010\",\n \"3\":\"011\",\n \"4\":\"100\",\n \"5\":\"101\",\n \"6\":\"110\",\n \"7\":\"111\",\n}\nlist1 = []\nlist2 = []\n# 将 x 值拆分并传入 数组list1 中\nlist1.extend(x)\n# 循环遍历\nfor i in list1:\n # 将 字典dict1 中对应值传入 数组list2 中\n list2.append(dict1.get(i))\n# 数组 转换成 字符串\nstr1 = ''.join(list2)\n# 打印 二进制 输出值\nprint(int(str1))\n\n","sub_path":"SmallProgram/demo07.py","file_name":"demo07.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"445377564","text":"import numpy as np\r\n\r\n\r\ndef run_viterbi(emission_scores, trans_scores, start_scores, end_scores):\r\n \"\"\"Run the Viterbi algorithm.\r\n\r\n N - number of tokens (length of sentence)\r\n L - number of labels\r\n\r\n As an input, you are given:\r\n - Emission scores, as an NxL array - score[token][label]\r\n - Transition scores (Yp -> Yc), as an LxL array\r\n - Start transition scores (S -> Y), as an Lx1 array\r\n - End transition scores (Y -> E), as an Lx1 array\r\n\r\n You have to return a tuple (s,y), where:\r\n - s is the score of the best sequence\r\n - y is the size N array/seq of integers representing the best sequence.\r\n \"\"\"\r\n L = start_scores.shape[0]\r\n assert end_scores.shape[0] == L\r\n assert trans_scores.shape[0] == L\r\n assert trans_scores.shape[1] == L\r\n assert emission_scores.shape[1] == L\r\n N = emission_scores.shape[0]\r\n\r\n y = [] # score table\r\n sequence = [0]*N # final sequence\r\n bp = [] # backpointers\r\n # init table, N rows and L columns\r\n for i in range(N):\r\n y.append([0]*L)\r\n\r\n # init backpointer table, N-1 rows, L columns\r\n for i in range(N-1):\r\n bp.append([0]*L)\r\n\r\n for i in range(N):\r\n for y_i in range(L):\r\n max_score = float(\"-inf\")\r\n for y_prev in range(L):\r\n score = 0\r\n if i == 0: # if first token, use start transition\r\n # note, previous score is zero\r\n score = emission_scores[i][y_i] + start_scores[y_i]\r\n else: # else lookup transition score in table\r\n score = emission_scores[i][y_i] + trans_scores[y_prev][y_i] + y[i-1][y_prev]\r\n \r\n if score > max_score:\r\n max_score = score\r\n if i > 0: # update backpointer table\r\n bp[i-1][y_i] = y_prev\r\n y[i][y_i] = max_score \r\n\r\n final_score = float(\"-inf\")\r\n for y_end in range(L): # consider end transition scores, assume zero emission for eos\r\n # y_end is the label of the last token\r\n score = end_scores[y_end] + y[N-1][y_end]\r\n if score > final_score: \r\n final_score = score\r\n sequence[-1] = y_end\r\n\r\n # build sequence:\r\n for i in range(-1, -N, -1):\r\n sequence[i-1] = bp[i][sequence[i]]\r\n\r\n # score set to 0\r\n return (final_score, sequence)","sub_path":"hw3/viterbi.py","file_name":"viterbi.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"356722416","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom Users import views\nfrom . import views as betting_app_views\n\nurlpatterns = [\n\n ##############################\n # Admin\n ##############################\n url(r'^admin/', include(admin.site.urls)),\n\n ##############################\n # Accounts\n ##############################\n url(r'^$', betting_app_views.index, name='index'),\n url(r'^login/$', views.login, name='login'),\n url(r'^logout/$', views.logout, name='logout'),\n url(r'^register/$', views.register, name='register'),\n\n ##############################\n # Search\n ##############################\n url(r'^search/$', views.search_redirect, name='search_redirect'),\n url(r'^search-home/$', views.search_home, name='search_home'),\n url(r'^search/(?P[a-zA-Z0-9]+)$', views.search, name='search'),\n\n ##############################\n # User\n ##############################\n url(r'^me/$', views.me, name='me'),\n url(r'^notifications/$', views.notifications, name='notifications'),\n url(r'^requests/$', views.friend_requests, name='requests'),\n url(r'^friends/$', views.friends, name='friends'),\n url(r'^new-bet-user/$', views.new_bet_user, name='new-bet-user'),\n url(r'^new-bet/$', views.new_bet, name='new-bet'),\n url(r'^bets/$', views.bets, name='bets'),\n url(r'^bets/(?P[\\w\\W]+)/$', views.bets_title, name='bets_title'),\n url(r'^(?P[\\w]+)/', include('Users.urls')),\n\n\n]\n","sub_path":"betting_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"352006743","text":"#!/usr/bin/env python3\n\nimport logging\nimport os\nimport re\nimport sys\nimport tempfile\nfrom shutil import move\n\nfrom api2sshallowedusers.helpers import Command\n\nlogger = logging.getLogger(__name__)\n\n\ndef find_daemon():\n if Command('systemd', 'sshd').check():\n return Command('systemd', 'sshd')\n elif Command('init', 'ssh').check():\n return Command('init', 'ssh')\n elif Command('init', 'sshd').check():\n return Command('init', 'sshd')\n else:\n logger.error('is sshd running ? (i can only reload)')\n logger.error('exiting..')\n sys.exit(1)\n\n\nclass SSHConfig(object):\n def __init__(self, file):\n\n self.sshd = find_daemon()\n self.users = []\n self.restricted_users = False\n self.regex_users = re.compile('^AllowUsers (\\w+ ?)*$')\n\n try:\n self.filename = os.path.abspath(file)\n logger.info('using ssh config file : %s' % self.filename)\n with open(self.filename, 'r+'):\n pass\n except PermissionError:\n logger.error('cant open file for modifications, exiting..')\n sys.exit(1)\n except FileNotFoundError:\n logger.error('cant find file, exiting..')\n sys.exit(1)\n\n with open(self.filename, 'r') as f:\n for line in f:\n if self.regex_users.match(line):\n self.users = line.split()\n self.users.remove('AllowUsers')\n self.restricted_users = True\n\n logger.info('users: %s' % self.users)\n\n def add_user(self, user):\n if user not in self.users:\n logger.info('adding user %s' % user)\n self.users.append(user)\n return self.commit()\n return False\n\n def del_user(self, user):\n if user in self.users:\n logger.info('removing user %s' % user)\n self.users.remove(user)\n return self.commit()\n return False\n\n def commit(self):\n new_config, new_config_name = tempfile.mkstemp(dir=os.getcwd())\n with open(new_config, 'w') as new:\n with open(self.filename, 'r') as old:\n for line in old:\n if self.regex_users.match(line):\n new.write('AllowUsers {users}\\n'.format(\n users=' '.join(self.users)))\n else:\n new.write(line)\n if not self.restricted_users:\n new.write('AllowUsers {users}\\n'.format(\n users=' '.join(self.users)))\n self.restricted_users = True\n try:\n move(new_config_name, self.filename)\n except Exception:\n logger.error('cant write new sshd_config file')\n return False\n return True\n","sub_path":"api2sshallowedusers/ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"297444507","text":"import pandas as pd\nimport seaborn as sns\nimport numpy as np\n\n\n######################################################################\n### Plotting with relative data #\n######################################################################\ntips = pd.read_csv(\"C:/Users/SumitPawar/Python_classes/GitCode/Python_Practice/datasets/seaborn-data/tips.csv\")\nsns.set()\n\nsns.load_dataset(\"tips\")\n\n# print(tips.get(\"total_bill\"))\n\nsns.relplot(x=\"total_bill\",\n y=\"tip\",\n col=\"time\",\n hue=\"smoker\",\n size=\"size\",\n style=\"smoker\",\n data=tips)\nsns.relplot(x=\"total_bill\", y=\"tip\", size=\"size\", sizes=(15, 200), data=tips);\n\n\n#prepare a dataframe and plot using lineplot\ndf = pd.DataFrame(time=np.arange(500),\n value=np.random.randn().cumsum())\ng=sns.relplot(x=\"time\",y=\"value\",kind=\"line\",data=df)\ng.fig.autofmt_xdate()\n\n#disable the default sort\ndf = pd.DataFrame(np.random.randn(500, 2).cumsum(axis=0),\n columns=[\"x\", \"y\"])\nsns.relplot(x=\"x\", y=\"y\", sort=False, kind=\"line\", data=df);\n\nfmri = sns.load_dataset(\"fmri\")\nsns.relplot(x=\"timepoint\", y=\"signal\", kind=\"line\", data=fmri);\n\n#Without confidence interval\nfmri = sns.load_dataset(\"fmri\")\nsns.relplot(x=\"timepoint\", y=\"signal\", kind=\"line\", data=fmri,ci=None);\n\nsns.relplot(x=\"timepoint\", y=\"signal\", estimator=None, kind=\"line\", data=fmri);\n\n#to turn off the estimator\nsns.relplot(x=\"timepoint\", y=\"signal\", estimator=None, kind=\"line\", data=fmri);\n\nsns.relplot(x=\"timepoint\", y=\"signal\", hue=\"event\", kind=\"line\", data=fmri);\n\n#to plot markers for identification of subset\nsns.relplot(x=\"timepoint\",y=\"signal\",hue=\"region\",style=\"event\",dashes=False,markers=True,kind=\"line\",data=fmri)\n\nemp_df = pd.read_csv(\"C:/Users/SumitPawar/Python_classes/GitCode/Python_Practice/datasets/seaborn-data/employee.csv\")\nprint(emp_df)\n\nsns.relplot(x=\"empdeptname\",y=\"empsalary\",hue=\"empdeptname\",style=\"empdeptname\",kind=\"scatter\",data=emp_df)\n\nsns.relplot(x=\"timepoint\", y=\"signal\", hue=\"region\",\n units=\"subject\", estimator=None,\n kind=\"line\", data=fmri.query(\"event == 'stim'\"));\n\ndots = pd.read_csv(\"C:/Users/SumitPawar/Python_classes/GitCode/Python_Practice/datasets/seaborn-data/dots.csv\")\n\nprint(dots)\n\nsns.relplot(x=\"time\",y=\"firing_rate\",hue=\"coherence\",style=\"choice\",kind=\"line\",data=dots)\n\n\n##changing the color of the data presentation\npalette = sns.cubehelix_palette(light=.8, n_colors=10)\nsns.relplot(x=\"time\",\n y=\"firing_rate\",\n hue=\"coherence\",\n style=\"choice\",\n kind=\"line\",\n palette=palette,\n data=dots)\n\n##use size to increase the visibility of the lines plotted\nsns.relplot(x=\"timepoint\", y=\"signal\", hue=\"subject\",\n col=\"region\", row=\"event\", height=3,\n kind=\"line\", estimator=None, data=fmri);\n\n#plot multiple facets/graph based on column and number of individual graphs\nsns.relplot(x=\"timepoint\", y=\"signal\", hue=\"event\", style=\"event\",\n col=\"subject\", col_wrap=5,\n height=3, aspect=.50, linewidth=2.5,\n kind=\"line\", data=fmri.query(\"region == 'frontal'\"));\n\n\n######################################################################\n### Plotting with categorical data #\n######################################################################\n# Categorical scatterplots:\n# stripplot() (with kind=\"strip\"; the default)\n# swarmplot() (with kind=\"swarm\")\n# Categorical distribution plots:\n# boxplot() (with kind=\"box\")\n# violinplot() (with kind=\"violin\")\n# boxenplot() (with kind=\"boxen\")\n# Categorical estimate plots:\n# pointplot() (with kind=\"point\")\n# barplot() (with kind=\"bar\")\n# countplot() (with kind=\"count\")\nimport matplotlib.pyplot as plt\nsns.set(style=\"ticks\",color_codes=True)\ntips=pd.read_csv(\"C:/Users/SumitPawar/Python_classes/GitCode/Python_Practice/datasets/seaborn-data/tips.csv\")\n\nprint(tips)\nsns.catplot(x=\"day\",y=\"tip\",data=tips)\nsns.catplot(x=\"day\",y=\"tip\",jitter=False,data=tips)\n\n##beesswarn by setting kind=\"swarm\"\nsns.catplot(x=\"day\",y=\"tip\",kind=\"swarm\",data=tips)\nsns.catplot(x=\"day\",y=\"tip\",kind=\"swarm\",hue=\"smoker\",data=tips.query(\"day=='Sun'\"))\n\n#order the x-axis elements usig list\nsns.catplot(x=\"day\", y=\"tip\", order=[\"Sun\",\"Mon\",\"Tue\",\"Wed\",\"Thur\",\"Fri\",\"Sat\"], data=tips);\n\n#When the data is clumpsy and we need to represent on the graph use box scatter option\nsns.catplot(x=\"day\", y=\"tip\", kind=\"box\", data=tips);\n\n##Use different kind\nsns.catplot(x=\"day\", y=\"tip\", kind=\"strip\",hue=\"smoker\",order=[\"Thur\",\"Fri\",\"Sat\",\"Sun\"], data=tips);\nsns.catplot(x=\"day\", y=\"tip\", kind=\"violin\",hue=\"smoker\",order=[\"Thur\",\"Fri\",\"Sat\",\"Sun\"], data=tips);\nsns.catplot(x=\"day\", y=\"tip\", kind=\"boxen\",hue=\"smoker\",order=[\"Thur\",\"Fri\",\"Sat\",\"Sun\"], data=tips);\nsns.catplot(x=\"day\", y=\"tip\", kind=\"point\",hue=\"smoker\",order=[\"Thur\",\"Fri\",\"Sat\",\"Sun\"], data=tips);\nsns.catplot(x=\"day\", y=\"tip\", kind=\"bar\",hue=\"smoker\",order=[\"Thur\",\"Fri\",\"Sat\",\"Sun\"], data=tips);\n\n# for count we need to set one of the axis as None\nsns.catplot(x=\"day\", y=None, kind=\"count\",hue=\"smoker\",order=[\"Thur\",\"Fri\",\"Sat\",\"Sun\"], data=tips);\n\n##Boxen plot\n##load the diamond dataset\ndiamonds=pd.read_csv(\"C:/Users/SumitPawar/Python_classes/GitCode/Python_Practice/datasets/seaborn-data/diamonds.csv\")\nsns.load_dataset(diamonds)\nsns.catplot(x=\"color\",y=\"price\",kind=\"boxen\",data=diamonds.sort_values(\"color\"));\nsns.catplot(x=\"cut\",y=\"price\",kind=\"boxen\",data=diamonds.sort_values(\"color\"));\n\n##palette = pastel is to reduce the color intensity and inner stick is to mark the widht length\n## split=True splits based on the hue part\nsns.catplot(x=\"day\", y=\"total_bill\", hue=\"sex\",\n kind=\"violin\", inner=\"stick\", split=True,\n palette=\"pastel\", data=tips);\n\n## Using barplot to plot the bars instead of scatterplot and swarmplot\ntitanic=sns.load_dataset(\"titanic\")\nsns.catplot(x=\"sex\",y=\"survived\",hue=\"class\",kind=\"bar\",data=titanic)\n\n##Violinplot\nsns.violinplot(x=\"day\",y=\"total_bill\",data=tips)\n\n######################################################################\n### Plotting distribution of a dataset #\n######################################################################\n##For univariate we can use kdeplot and for bivariate we can use kdeplot,joinplot and pairplot\n## to plot the relationship between variables\nx=np.random.normal(4,2,size=40)\nsns.kdeplot(x,shade=True,kernel=\"gau\",bw=\"scott\",gridsize=100,cut=3,clip=None,legend=True)\nsns.kdeplot(x,bw=0.2,label=\"bw:0.2\")\n\n##bandwidth shows the estimation with respect to the smallest and largest values in the dataset\n\nx=np.random.gamma(6,2,200)\nsns.distplot(x,kde=False)\n\nmean, cov = [0, 1], [(1, .5), (.5, 1)]\ndata = np.random.multivariate_normal(mean, cov, 200)\ndf = pd.DataFrame(data, columns=[\"x\", \"y\"])\n\nsns.scatterplot(x=\"x\",y=\"y\",data=df)\nsns.jointplot(x=\"x\",y=\"y\",data=df)\n\nx, y = np.random.multivariate_normal(mean, cov, 1000).T\n#style must be one of white, dark, whitegrid, darkgrid, ticks\nwith sns.axes_style(\"white\"):\n sns.jointplot(x=x, y=y, kind=\"hex\", color=\"k\");\n\nsns.jointplot(x=\"x\",y=\"y\",data=df,kind=\"kde\")\n\nf, ax = plt.subplots(figsize=(6, 6))\ncmap = sns.cubehelix_palette(as_cmap=True, dark=0, light=1, reverse=True)\nsns.kdeplot(df.x, df.y, cmap=cmap, n_levels=60, shade=True);\n\niris=sns.load_dataset(\"iris\")\nsns.pairplot(iris)\n\nsns.pairplot(iris,hue=\"species\")\n\n##Analysing the data using linear model and regression model techniques\n## methods used here are lmplot() and regplot()\nsns.lmplot(x=\"total_bill\",y=\"tip\",hue=\"smoker\",data=tips)\n\nsns.lmplot(x=\"size\",y=\"tip\",data=tips,x_estimator=np.mean)\n\nanscombe=sns.load_dataset(\"sns\")\nsns.lmplot(x=\"x\", y=\"y\", data=anscombe.query(\"dataset == 'I'\"),\n ci=None, scatter_kws={\"s\": 80});\n\n##User defined dataframe\nemp_df=pd.read_csv(\"C:/Users/SumitPawar/Python_classes/GitCode/Python_Practice/datasets/seaborn-data/employee.csv\")\nsns.lmplot(x=\"empid\",y=\"empsalary\",hue=\"empdeptname\",data=emp_df)\n\n#residplot\nsns.residplot(x=\"x\", y=\"y\", data=anscombe.query(\"dataset == 'I'\"),\n scatter_kws={\"s\": 80});\n\n##Creating multi plot grids\ntips=sns.load_dataset(\"tips\")\n#Below line creates the empty graph\ng=sns.FacetGrid(tips,col=\"time\")\n\n#Below line plots the Facetgrid on to the graph\ng.map(plt.hist,\"tip\")\n\ng=sns.FacetGrid(tips,col=\"time\",hue=\"smoker\")\ng.map(plt.hist,\"tip\")\n\n\ng=sns.FacetGrid(tips,col=\"time\",hue=\"smoker\")\ng.map(plt.scatter,\"total_bill\",\"tip\",alpha=.7)\n\ng=sns.FacetGrid(data=tips,row=\"smoker\",col=\"time\",margin_titles=True)\ng.map(sns.regplot,\"total_bill\",\"tip\",color=\".3\",fit_reg=False,x_jitter=.1)\n\npal = dict(Lunch=\"seagreen\", Dinner=\"gray\")\ng = sns.FacetGrid(tips, hue=\"time\", palette=pal, height=5)\ng.map(plt.scatter, \"total_bill\", \"tip\", s=50, alpha=.7, linewidth=.5, edgecolor=\"white\")\n\n\n##Changing the content of the x and y axis\nwith sns.axes_style(\"white\"):\n g = sns.FacetGrid(tips, row=\"sex\", col=\"smoker\", margin_titles=True, height=2.5)\ng.map(plt.scatter, \"total_bill\", \"tip\", color=\"#334488\", edgecolor=\"white\", lw=.5);\ng.set_axis_labels(\"Total bill (US Dollars)\", \"Tip\");\ng.set(xticks=[10, 30, 50], yticks=[2, 6, 10]);\ng.fig.subplots_adjust(wspace=.02, hspace=.02);\n\n","sub_path":"Libraries/seaborn_library/seaborn_lib.py","file_name":"seaborn_lib.py","file_ext":"py","file_size_in_byte":9262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"173394537","text":"from enum import unique\nfrom utilities import AppContext\nfrom db import get_db\nfrom anuvaad_auditor.loghandler import log_info, log_exception\nimport pymongo\nfrom config import MONGO_s3_LINK_STORE\n\nDB_SCHEMA_NAME = 'file_content'\n\nclass BlockModel(object):\n def __init__(self):\n collections = get_db()[DB_SCHEMA_NAME]\n try:\n collections.create_index('record_id')\n except pymongo.errors.DuplicateKeyError as e:\n log_info(\"duplicate key, ignoring\", AppContext.getContext())\n except Exception as e:\n log_exception(\"db connection exception \", AppContext.getContext(), e)\n \n try:\n ref_repo = get_db()[MONGO_s3_LINK_STORE]\n ref_repo.create_index('job_id',unique=True)\n except Exception as e:\n log_exception(\"db connection exception \", AppContext.getContext(), e)\n \n def update_block(self, record_id, user_id, block_identifier, block):\n try:\n collections = get_db()[DB_SCHEMA_NAME]\n results = collections.update({'$and': [{'record_id': record_id}, {'created_by': user_id}, { 'block_identifier': block_identifier }]},\n { '$set': block }, upsert=True)\n\n if 'writeError' in list(results.keys()):\n return False\n return True\n\n except Exception as e:\n log_exception(\"db connection exception \", AppContext.getContext(), e)\n return False\n\n def store_bulk_blocks(self, blocks):\n try:\n collections = get_db()[DB_SCHEMA_NAME]\n results = collections.insert_many(blocks)\n if len(blocks) == len(results.inserted_ids):\n return True\n except Exception as e:\n log_exception(\"db connection exception \", AppContext.getContext(), e)\n return False\n\n def get_all_blocks(self, user_id, record_id):\n try:\n collections = get_db()[DB_SCHEMA_NAME]\n docs = collections.find({\n 'record_id': record_id,\n 'created_by': user_id\n })\n return docs\n except Exception as e:\n log_exception(\"db connection exception \", AppContext.getContext(), e)\n return False\n \n\n def get_blocks_by_page(self, record_id, page_number):\n try:\n collections = get_db()[DB_SCHEMA_NAME]\n results = collections.aggregate([\n { '$match' : {'page_no': page_number,'record_id': record_id} },\n { '$group': { '_id': '$data_type', 'data': { '$push': \"$data\" } } }\n ])\n return results\n except Exception as e:\n AppContext.addRecordID(record_id)\n log_exception(\"db connection exception \", AppContext.getContext(), e)\n return False\n\n def get_block_by_block_identifier(self, record_id, user_id, block_identifier):\n try:\n collections = get_db()[DB_SCHEMA_NAME]\n results = collections.aggregate([\n { '$match' : {'record_id': record_id, 'block_identifier': block_identifier, 'created_by': user_id } },\n { '$group': { '_id': '$data_type', 'data': { '$push': \"$data\" } } }\n ])\n return results\n except Exception as e:\n log_exception('db connection exception ', AppContext.getContext(), e)\n return None\n\n def get_document_total_page_count(self, record_id):\n try:\n collections = get_db()[DB_SCHEMA_NAME]\n results = collections.aggregate([\n { '$match' : { 'record_id': record_id } },\n {\n '$group':\n {\n '_id': '$record_id',\n 'page_count': { '$max': \"$page_no\" }\n }\n }\n ])\n\n count = 0\n for result in results:\n count = result['page_count']\n break\n\n return count\n except Exception as e:\n log_exception(\"db connection exception \", AppContext.getContext(), e)\n return 0\n\n def store_s3_link(self, data):\n try:\n collections = get_db()[MONGO_s3_LINK_STORE]\n record = collections.find({\"job_id\":data[\"job_id\"]})\n # if record.count() == 0:\n if record.count() != 0:\n print(True)\n collections.update_one({'job_id': data['job_id']},\n { '$set': { \"file_link.parallel_doc\" : data['file_link']['parallel_doc']} })\n else:\n print(False)\n collections.insert(data)\n \n except Exception as e:\n log_exception(\"db connection exception |{}\".format(str(e)), AppContext.getContext(), e)\n return False\n\n def get_s3_link(self, job_id):\n try:\n collections = get_db()[MONGO_s3_LINK_STORE]\n result = collections.find({\"job_id\":job_id},{\"_id\":0})\n return result[0]\n except Exception as e:\n log_exception(\"db connection exception |{}\".format(str(e)), AppContext.getContext(), e)\n return False","sub_path":"anuvaad-etl/anuvaad-extractor/content-handler/src/models/block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":5298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"533660014","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 15 18:14:48 2016\n\n@author: escriva\n\"\"\"\n\n\"\"\"\nSELECTED CDEC SENSOR NUMBERS (these are not be available for all sites):\n 1 river stage [ft]\n 2 precipitation, accumulated [in]\n 3 SWE [in]\n 4 air temperature [F]\n 5 EC [ms/cm]\n 6 reservoir elevation [ft]\n 7 reservoir scheduled release [cfs]\n 8 full natural flow [cfs]\n 15 reservoir storage [af]\n 20 flow -- river discharge [cfs]\n 22 reservoir storage change [af]\n 23 reservoir outflow [cfs]\n 24 Evapotranspiration [in]\n 25 water temperature [F]\n 27 water turbidity [ntu]\n 28 chlorophyll [ug/l]\n 41 flow -- mean daily [cfs]\n 45 precipitation, incremental [in]\n 46 runoff volume [af]\n 61 water dissolved oxygen [mg/l]\n 62 water pH value [pH]\n 64 pan evaporation (incremental) [in]\n 65 full natural flow [af]\n 66 flow -- monthly volume [af]\n 67 accretions (estimated) [af]\n 71 spillway discharge [cfs]\n 74 lake evaporation (computed) [cfs]\n 76 reservoir inflow [cfs]\n 85 control regulating discharge [cfs]\n 94 top conservation storage (reservoir) [af]\n 100 water EC [us/cm]\n CDEC DURATION CODES:\n E event\n H hourly\n D daily\n M monthly\n\"\"\"\nimport ScrappingWater as sw\nimport pandas as pd\n\nlist = ['MIL', 'BUC', 'MAR', 'BAR', 'BUR', 'EXC', 'DNP', 'NML', 'CMN']\n\nresults=[]\n\nfor reservoir in list:\n df = sw.get_CDEC_data(station_id=reservoir)\n dfsum = df[['01', '02', '03' , '04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31']]\n dfsum = dfsum.convert_objects(convert_numeric=True)\n df['Total'] = dfsum.mean(axis=1)\n df['NANs'] = dfsum.isnull().values.sum(axis=1)\n df['Year']=df.iloc[:,2]\n df['Month']=df.iloc[:,3]\n df['Date']=pd.to_datetime(df.Year*10000+df.Month*100+1,format='%Y%m%d') \n df['Reservoir']=df.iloc[:,0]\n del dfsum\n dfshort = df[['Date','Total', 'NANs']]\n results.append(dfshort)\n del df\n \nfor i in range(9):\n results[i].to_csv(\"ReservoirResults\"+list[i]+\".csv\")\n results[i] = results[i].rename(columns={'Total':'Total'+list[i], 'Reservoir' : 'Reservoir'+list[i]})\n\"\"\" \na = results[0].merge(results[1],on='Date').merge(results[2],on='Date').merge(results[3],on='Date').merge(results[4],on='Date').merge(results[5],on='Date').merge(results[6],on='Date').merge(results[7],on='Date').merge(results[8],on='Date')\na.to_csv(\"ReservoirResultsMerged.csv\")\n\"\"\"","sub_path":"scrappingWithScript.py","file_name":"scrappingWithScript.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"54417430","text":"from unittest.mock import patch\nfrom intake.tests.base_testcases import IntakeDataTestCase\nfrom django.db.models import Count\nfrom user_accounts import models, exceptions\nfrom intake import models as intake_models\nfrom user_accounts.tests import mock\nfrom intake import constants\n\n\nclass TestOrganization(IntakeDataTestCase):\n\n fixtures = [\n 'counties',\n 'organizations', 'mock_profiles',\n 'mock_2_submissions_to_a_pubdef',\n 'mock_2_submissions_to_ebclc',\n 'mock_2_submissions_to_cc_pubdef',\n 'mock_2_submissions_to_sf_pubdef',\n 'mock_2_submissions_to_monterey_pubdef',\n 'mock_1_submission_to_multiple_orgs',\n 'mock_application_events',\n ]\n\n def test_has_a_pdf(self):\n self.assertTrue(self.sf_pubdef.has_a_pdf())\n self.assertFalse(self.cc_pubdef.has_a_pdf())\n\n def test_get_referral_emails_even_if_no_users(self):\n expected_email = \"foo@bar.net\"\n # we need an org\n org = models.Organization(name=\"Acme Nonprofit Services Inc.\")\n org.save()\n user = mock.fake_superuser()\n models.Invitation.create(\n expected_email,\n organization=org,\n inviter=user)\n emails = org.get_referral_emails()\n self.assertListEqual(emails, [expected_email])\n\n def test_get_referral_emails_raises_error_with_no_emails(self):\n org = models.Organization(name=\"Acme Nonprofit Services Inc.\")\n org.save()\n with self.assertRaises(exceptions.NoEmailsForOrgError):\n org.get_referral_emails()\n\n def test_get_transfer_org_returns_correct_org(self):\n ebclc = self.ebclc\n a_pubdef = self.a_pubdef\n self.assertEqual(ebclc.get_transfer_org(), a_pubdef)\n self.assertEqual(a_pubdef.get_transfer_org(), ebclc)\n\n def test_get_transfer_org_returns_none(self):\n sf_pubdef = self.sf_pubdef\n cc_pubdef = self.cc_pubdef\n self.assertIsNone(sf_pubdef.get_transfer_org())\n self.assertIsNone(cc_pubdef.get_transfer_org())\n\n def test_get_unopened_apps_returns_all_apps_if_no_open_events(self):\n ebclc = models.Organization.objects.get(\n slug=constants.Organizations.EBCLC)\n for org in models.Organization.objects.filter(\n is_receiving_agency=True):\n if org == ebclc:\n self.assertEqual(org.get_unopened_apps().count(), 2)\n else:\n self.assertEqual(org.get_unopened_apps().count(), 3)\n\n def test_get_unopened_apps_returns_apps_opened_by_other_org(self):\n # assume we have a multi-org app opened by a user from one org\n cc_pubdef = models.Organization.objects.get(\n slug=constants.Organizations.COCO_PUBDEF)\n a_pubdef = models.Organization.objects.get(\n slug=constants.Organizations.ALAMEDA_PUBDEF)\n cc_pubdef_user = models.UserProfile.objects.filter(\n organization=cc_pubdef).first().user\n sub = intake_models.FormSubmission.objects.annotate(\n org_count=Count('organizations')).filter(org_count__gte=3).first()\n intake_models.ApplicationLogEntry.log_opened([sub.id], cc_pubdef_user)\n # assert that it shows up in unopened apps\n self.assertIn(sub, a_pubdef.get_unopened_apps())\n self.assertNotIn(sub, cc_pubdef.get_unopened_apps())\n\n @patch('intake.models.ApplicationEvent.from_logs')\n def test_get_unopened_apps_with_deleted_opened_app_returns_expected_result(\n self, from_logs):\n # https://code.djangoproject.com/ticket/25467?cversion=0&cnum_hist=2\n logs = intake_models.ApplicationLogEntry.log_opened(\n [None], user=self.sf_pubdef_user)\n self.assertTrue(logs[0].id)\n self.assertIsNone(logs[0].submission_id)\n self.assertEqual(self.sf_pubdef.get_unopened_apps().count(), 3)\n","sub_path":"user_accounts/tests/models/test_organization.py","file_name":"test_organization.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"603935451","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nfrom future import standard_library\n\nimport numpy as np\n\nstandard_library.install_aliases()\n\n\ndef kernel_delta_norm(X_in_1, X_in_2):\n n_1 = X_in_1.shape[1]\n n_2 = X_in_2.shape[1]\n K = np.zeros((n_1, n_2))\n\n u_list = np.unique(X_in_1)\n for ind in u_list:\n ind_1 = (X_in_1 == ind)\n ind_2 = (X_in_2 == ind)\n idx = (ind_1 & ind_2.T)\n c_1 = np.sqrt(np.count_nonzero(ind_1))\n c_2 = np.sqrt(np.count_nonzero(ind_2))\n K[idx] = 1 / c_1 / c_2\n return K\n\n\ndef kernel_delta(X_in_1, X_in_2):\n n_1 = X_in_1.shape[1]\n n_2 = X_in_2.shape[1]\n K = np.zeros((n_1, n_2))\n u_list = np.unique(X_in_1)\n for ind in u_list:\n ind_1 = (X_in_1 == ind)\n ind_2 = (X_in_2 == ind)\n idx = (ind_1 & ind_2.T)\n K[idx] = 1\n return K\n\n\ndef kernel_gaussian(X_in_1, X_in_2, sigma):\n X_in_12 = np.sum(np.power(X_in_1, 2), 0)\n X_in_12 = np.expand_dims(X_in_12, 0)\n X_in_22 = np.sum(np.power(X_in_2, 2), 0)\n X_in_22 = np.expand_dims(X_in_22, 0)\n dist_2 = (X_in_12 + X_in_22.T) - 2 * np.dot(X_in_1.T, X_in_2)\n K = np.exp(-dist_2 / (2 * np.power(sigma, 2)))\n return K\n","sub_path":"pyHSICLasso-master/pyHSICLasso-master/pyHSICLasso/kernel_tools.py","file_name":"kernel_tools.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"382686883","text":"from collections import deque\ndx = [1,0,-1,0]\ndy = [0,1,0,-1]\n\nheight = 12\nwidth = 6\nboard = [ list(input()) for _ in range(height)]\n\n#뿌요 떨어짐\ndef down(board):\n for i in range(height-1,-1,-1):\n for j in range(width):\n x = i\n while True:\n x += 1\n if x >= height or board[x][j] != \".\":\n break\n board[x][j] = board[x-1][j]\n board[x-1][j] = \".\"\n\ndef bfs(i,j,visited,board):\n\n que = deque()\n que.append((i,j))\n points = [[i,j]]\n color = board[i][j]\n \n while que:\n cx,cy = que.popleft()\n\n for i in range(4):\n nx = cx + dx[i]\n ny = cy + dy[i]\n\n if 0 <= nx < height and 0<= ny < width and not visited[nx][ny]:\n if color == board[nx][ny]:\n visited[nx][ny] = True\n que.append((nx,ny))\n points.append((nx,ny))\n\n if len(points) >= 4:\n for x, y in points:\n board[x][y] = \".\"\n return True\n else:\n return False\n\n\n\n#뿌요 연쇄\ndef bomb(board):\n visited = [ [False for _ in range(width)] for _ in range(height) ]\n find = False\n for i in range(height):\n for j in range(width):\n if board[i][j] != \".\" and not visited[i][j]:\n visited[i][j] = True\n if bfs(i,j,visited,board):\n find = True\n return find\n\ncnt = 0\nwhile True:\n down(board)\n if bomb(board):\n cnt+=1\n else:\n print(cnt)\n break\n\n","sub_path":"3.beakjoon/구현/BOJ_11559_뿌요뿌요.py","file_name":"BOJ_11559_뿌요뿌요.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"421531445","text":"from src.Managers.HardwareManager.HardwareDevice import HardwareDevice\nimport niscope, time, numpy as np\nfrom src.Managers.HardwareManager.PacketCommands import *\nfrom src.Managers.HardwareManager.PacketMeasurements import *\n\nms = lambda: int(round(time.time() * 1000))\n\nclass PXIe_5105(HardwareDevice):\n hardwareType = 'NI PXIe-5105'\n hardwareIdentifier = 'MRB_PXIe5105'\n hardwareVersion = '1.0'\n hardwareCreator = 'Matthew R. Brantley & Ian G. M. Anthony'\n hardwareVersionDate = '5/28/2019'\n\n############################################################################################\n##################################### MANDATORY FUNCS ######################################\n def scan(self):\n for device in self.systemDeviceInfo['NI-SCOPE']:\n if(device['Device Model'] == 'NI PXIe-5105'):\n self.Add_Device(device['Device Name'])\n\n self.Add_Trigger_Mode('Software')\n self.Add_Trigger_Mode('Front Digital Trigger')\n self.scanned.emit()\n\n def initialize(self, deviceName, triggerMode):\n self.booting = True\n try:\n if(deviceName != ''):\n self.session = None\n self.wfm_handles = list()\n self.reportTime = ms()\n with niscope.Session(deviceName) as session:\n # Would get more session data here\n self.source0 = self.Add_AISource('0', -10, 10, 0.1)\n self.source1 = self.Add_AISource('1', -10, 10, 0.1)\n self.source2 = self.Add_AISource('2', -10, 10, 0.1)\n self.source3 = self.Add_AISource('3', -10, 10, 0.1)\n self.source4 = self.Add_AISource('4', -10, 10, 0.1)\n self.source5 = self.Add_AISource('5', -10, 10, 0.1)\n self.source6 = self.Add_AISource('6', -10, 10, 0.1)\n self.source7 = self.Add_AISource('7', -10, 10, 0.1)\n\n self.session = niscope.Session(deviceName)\n\n if(triggerMode == 'Front Digital Trigger'):\n self.Add_Digital_Trigger('PFI1')\n except:\n pass\n\n self.initialized.emit()\n\n def configure(self):\n \n self.configured.emit()\n\n def program(self, programmingPackets):\n self.Set_Ready_Status(False)\n if(programmingPackets):\n packet = programmingPackets[0]['programmingPacket'].Get_Commands(commandType=AnalogAcquisitionCommand)\n if packet:\n packet = packet[0]\n if(packet is not None):\n self.session.abort()\n self.session.vertical_range = packet.acqMax-packet.acqMin\n self.session.vertical_coupling = niscope.VerticalCoupling.DC\n self.session.vertical_offset = (packet.acqMin + packet.acqMax) / 2\n self.session.probe_attenuation = 1\n self.session.channels[0].channel_enabled = True\n self.session.channels[1].channel_enabled = False\n self.session.channels[2].channel_enabled = True\n self.session.channels[3].channel_enabled = False\n self.session.channels[4].channel_enabled = False\n self.session.channels[5].channel_enabled = False\n self.session.channels[6].channel_enabled = False\n self.session.channels[7].channel_enabled = False\n\n self.session.input_clock_source = 'PXI_CLK10'\n self.session.min_sample_rate = packet.rate\n self.session.horz_min_num_pts = packet.noSamples\n self.session.horz_record_ref_position = 0\n self.session.horz_num_records = 1\n self.session.horz_enforce_realtime = True\n\n self.session.trigger_type = niscope.TriggerType.EDGE\n self.session.trigger_level = 2.0\n self.session.trigger_source = '0' \n\n self.readArray = np.ndarray(packet.noSamples, dtype=np.float64)\n\n self.Send_Status_Message('Progam Rate (Hz): ' + str(packet.rate))\n self.Send_Status_Message('Real Rate (Hz): ' + str(self.session.horz_sample_rate))\n\n self.booting = False\n self.Set_Ready_Status(True)\n self.programmed.emit()\n\n def softTrigger(self):\n self.Set_Ready_Status(False)\n self.session.initiate()\n \n self.softTriggered.emit()\n\n def shutdown(self):\n if(self.session is not None):\n self.session.close()\n\n def idle(self):\n if(hasattr(self, 'session')):\n if(self.session is not None):\n try:\n if(not self.booting):\n if(self.session.acquisition_status() == niscope.AcquisitionStatus.COMPLETE):\n if(self.Ready_Status() is False):\n self.Send_Status_Message('Triggered!')\n wfmInfo = self.session.channels[2].fetch_into(self.readArray)\n self.writeToPacket(self.readArray, wfmInfo[0])\n self.session.abort()\n self.Set_Ready_Status(True)\n\n else:\n if(ms() - self.reportTime >= 500):\n self.Send_Status_Message('Armed! Waiting for trigger...')\n self.reportTime = ms()\n except:\n pass\n \n def stop(self):\n self.Send_Status_Message('Sending Stop Command...')\n if(hasattr(self, 'session')):\n if(self.session is not None):\n try:\n self.session.abort()\n except:\n pass\n\n############################################################################################\n###################################### INTERNAL FUNCS ######################################\n\n def writeToPacket(self, nparray, wfmInfo):\n mPack = measurementPacket()\n measurement = AnalogWaveformMeasurement(wfmInfo.absolute_initial_x, 1/wfmInfo.x_increment, nparray)\n mPack.Add_Measurement(measurement)\n self.Push_Measurements_Packet(self.source2, mPack)\n","sub_path":"Hardware Drivers/PXIe_5105.py","file_name":"PXIe_5105.py","file_ext":"py","file_size_in_byte":6362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"546968485","text":"import pyautogui\nfrom PIL import Image, ImageGrab\n\ndef press(key):\n pyautogui.keyDown(key)\n return\n\ndef detection(picinfo):\n \n # Draw a square to detect night\n for i in range(600,610):\n for j in range(600,610):\n if picinfo[i, j] > 150: time = \"day\"\n if picinfo[i, j] < 150: time = \"night\"\n \n # Draw the rectangle for cactus\n for x in range(150, 390):\n for y in range(375, 450):\n if time == \"day\":\n if picinfo[x, y] < 86:\n press(\"space\")\n return\n if time == \"night\":\n if picinfo[x, y] > 86:\n press(\"space\")\n return\n\nimport time\ntime.sleep(4)\n\nwhile True:\n image = ImageGrab.grab().convert('L') \n picinfo = image.load()\n detection(picinfo)\n \n '''\n # Draw the rectangle for cactus\n for x in range(150, 390):\n for y in range(375, 450):\n picinfo[x, y] = 86\n \n # Draw blank space to detect night\n for i in range(600,610):\n for j in range(600,610):\n picinfo[i, j] = 86\n \n image.show()\n break'''\n","sub_path":"Others/My Projects/Google Dino Game/full.py","file_name":"full.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"433583091","text":"import urllib.request\nimport time\n\nclass mobio:\n def CheckCode(self,servId,code):\n\n self.servId = servId\n self.code = code\n\n request = urllib.request.urlopen(\"http://www.mobio.bg/code/checkcode.php?servID={0}&code={1}\".format(self.servId,self.code))\n answer = request.read()\n reader = answer.decode(\"utf8\")\n request.close()\n \n if request:\n if reader == \"PAYBG=OK\":\n return 1\n else:\n return 0\n else:\n return 0\n","sub_path":"src/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"137232333","text":"import math\n\n# What is the smallest value of n such that an algorithm whose running time is 100n^2\n# runs faster than an algorithm whose running time is 2^n on the same machine?\n\nn = 1\nrt1 = 100 * (n ** 2)\nrt2 = 2 ** n\nwhile (rt1 >= rt2):\n print(\"n: {}, 100n^2 rt: {}, 2^2n rt: {}\".format(n, rt1, rt2))\n n += 1\n rt1 = 100 * (n ** 2)\n rt2 = 2 ** n\n\n# Answer: The answer is 14 after that it's the first algorithm always performs better than the second. \n","sub_path":"Foundations/running_times(1.2-3).py","file_name":"running_times(1.2-3).py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"375634898","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom pygit2 import Repository\nfrom pygit2 import GIT_OBJ_TAG\nfrom pygit2 import GIT_OBJ_BLOB\nfrom pygit2 import GIT_OBJ_TREE\nfrom pygit2 import GIT_OBJ_COMMIT\nfrom pygit2 import GIT_DIFF_IGNORE_WHITESPACE\n\nfrom utils import JagareError\nfrom utils.git import format_blob\nfrom utils.git import format_tree\nfrom utils.git import format_commit\nfrom utils.git import format_tag\nfrom utils.git import format_blame\nfrom utils.git import format_diff\nfrom utils.git import _resolve_version\nfrom utils.git import _resolve_type\nfrom utils.process import call, call2, _shlex_split\nfrom tree import ls_tree\nfrom rev_list import rev_list\nfrom rename import detect_renamed\nfrom tag import list_tags\nfrom commit import create_commit\nfrom diff import diff\nfrom ref import update_ref\nfrom clone import clone_repository\nfrom clone import update_server_info\nfrom init import init_repository\nfrom archive import archive_repository\n\n\nclass Jagare(object):\n ''' pygit2 and git commands wrapper '''\n\n def __init__(self, path):\n self.repository = repository(path)\n self.repository_name = None\n\n @property\n def empty(self):\n return self.repository.is_empty\n\n @property\n def bare(self):\n return self.repository.is_bare\n\n @property\n def branches(self):\n return self.list_branches()\n\n @property\n def tags(self):\n return self.list_tags(name_only=True)\n\n def list_tags(self, *w, **kw):\n return list_tags(self.repository, *w, **kw)\n\n def list_branches(self):\n branches = self.repository.listall_branches()\n return branches\n\n def show(self, ref):\n try:\n obj = self.repository.revparse_single(ref)\n except KeyError:\n return {}\n obj_type = obj.type\n\n if obj_type == GIT_OBJ_COMMIT:\n return format_commit(ref, obj, self.repository)\n elif obj_type == GIT_OBJ_TAG:\n return format_tag(ref, obj, self.repository)\n elif obj_type == GIT_OBJ_TREE:\n return format_tree(ref, obj, self.repository)\n elif obj_type == GIT_OBJ_BLOB:\n return format_blob(ref, obj, self.repository)\n\n def ls_tree(self, ref, path=None, recursive=False, size=None, with_commit=False):\n return ls_tree(self.repository, ref, req_path=path,\n recursive=recursive, size=size, with_commit=with_commit)\n\n def rev_list(self, *w, **kw):\n commits = []\n try:\n commits = rev_list(self.repository, *w, **kw)\n except KeyError:\n # FIXME: use JagareError\n pass\n return commits\n\n def blame(self, ref, path, lineno=None):\n if lineno:\n result = call(self.repository,\n 'blame -L %s,%s --porcelain %s -- %s' % (\n lineno, lineno, ref, path))\n else:\n result = call(self.repository,\n 'blame -p -CM %s -- %s' % (ref, path))\n result = format_blame(result['stdout'], self.repository)\n return self.show(ref), result\n\n def format_patch(self, ref, from_ref=None):\n if from_ref:\n result = call(self.repository, 'format-patch --stdout %s...%s' % (from_ref, ref))\n else:\n result = call(self.repository, 'format-patch -1 --stdout %s' % ref)\n return result['stdout']\n\n def detect_renamed(self, ref, path=None):\n return detect_renamed(self.repository, ref)\n\n def commit_file(self, *w, **kw):\n return create_commit(self.repository, *w, **kw)\n\n def diff(self, *w, **kw):\n ''' Jagare's diff wrapper '''\n try:\n kws = {}\n ignore_space = kw.get('ignore_space', None)\n if ignore_space:\n flags = kw.get('flags', 0)\n flags |= GIT_DIFF_IGNORE_WHITESPACE\n kws.update({'flags': flags})\n from_ref = kw.get('from_ref', None)\n if from_ref:\n kws.update({'from_ref': from_ref})\n context_lines = kw.get('context_lines', None)\n if context_lines:\n kws.update({'context_lines': context_lines})\n path = kw.get('path', None)\n paths = kw.get('paths', None)\n if path:\n kws.update({'paths': [path]})\n if paths:\n kws.update({'paths': paths})\n # call diff\n d = diff(self.repository, *w, **kws)\n rename_detection = kw.get('rename_detection', None)\n if rename_detection:\n d['diff'].find_similar()\n #d.find_similar()\n # return formated diff dict\n return format_diff(d)\n except JagareError:\n return []\n\n def resolve_commit(self, version):\n version = version.strip()\n return _resolve_version(self.repository, version)\n\n def resolve_type(self, version):\n version = version.strip()\n return _resolve_type(self.repository, version)\n\n def clone(self, path, bare=None, branch=None, mirror=None, env=None):\n # TODO: check clone result\n clone_repository(self.repository.path, path,\n bare=bare, checkout_branch=branch,\n mirror=mirror, env=env)\n jagare = Jagare(path)\n if bare:\n update_server_info(jagare.repository)\n return jagare\n\n @classmethod\n def mirror(cls, url, path, bare=None, branch=None, env=None):\n # TODO: check clone result\n clone_repository(url, path,\n bare=bare, checkout_branch=branch,\n mirror=True, env=env)\n jagare = Jagare(path)\n if bare:\n update_server_info(jagare.repository)\n return jagare\n\n @classmethod\n def init(cls, path, work_path=None, bare=None):\n # TODO: move to libs\n # if parent dir not exist, create it.\n # else git init will fail\n if not os.path.exists(path):\n os.makedirs(path)\n init_repository(path, work_path=work_path, bare=bare)\n return cls(path)\n\n def revparse_single(self, *w, **kw):\n try:\n return super(GitRepository, self).revparse_single(*w, **kw)\n except (KeyError, ValueError):\n raise JagareError(\"rev not found.\")\n\n def listall_references(self):\n return self.repository.listall_references()\n\n def lookup_reference(self, *w, **kw):\n return self.repository.lookup_reference(*w, **kw)\n\n def read(self, *w, **kw):\n try:\n return super(GitRepository, self).read(*w, **kw)\n except ValueError:\n raise JagareError(\"sha not found\")\n\n def add_remote(self, name, url):\n self.repository.create_remote(name, url)\n\n def update_ref(self, ref, newvalue):\n return update_ref(self.repository, ref, newvalue)\n\n def sha(self, rev='HEAD'):\n return _resolve_version(self.repository, rev)\n\n def merge_base(self, to_sha, from_sha):\n return self.repository.merge_base(to_sha, from_sha)\n\n def remotes(self):\n return self.repository.remotes\n\n def fetch_all(self):\n for remote in self.remotes():\n remote.fetch()\n\n def fetch(self, name):\n target = ''\n for remote in self.remotes():\n if remote.name == name:\n target = remote\n if target:\n target.fetch()\n\n def merge(self, ref, msg='automerge', commit_msg='', no_ff=False, _raise=True, _env=None):\n cmd = ['merge', ref]\n if msg:\n cmd.append('-m')\n cmd.append(msg)\n if commit_msg:\n cmd.append('-m')\n cmd.append(commit_msg)\n if no_ff:\n cmd.append('--no-ff')\n errcode = call(self.repository, cmd, env=_env)\n return errcode\n\n def push(self, remote, ref):\n cmd = ['push', remote, ref]\n errcode = call(self.repository, cmd)\n return errcode\n\n def archive(self, prefix):\n result = archive_repository(self.repository.path, prefix)\n return result['stdout']\n\n def delete_branch(self, name):\n branch = self.repository.lookup_branch(name)\n if branch:\n branch.delete()\n\n\ndef repository(path):\n try:\n repo = Repository(path)\n except KeyError:\n raise JagareError('repo %s not exists' % path)\n return repo\n","sub_path":"ellen/repo.py","file_name":"repo.py","file_ext":"py","file_size_in_byte":8442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"143879026","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n# Version: 1.0\n# Author: jtahstu\n# Contact: root@jtahstu.com\n# Site: blog.jtahstu.com\n# Software: PyCharm\n# Time: 2018/8/24 10:16\n\ndef singleNumber(nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n res = {}\n for k, v in enumerate(nums):\n if v in res:\n res[v] += 1\n else:\n res[v] = 1\n for (k, v) in res.items():\n if v == 1:\n return k\n\n # return sum(list(set(nums))) * 2 - sum(nums)\n\n # n = 0\n # for num in nums:\n # n ^= num\n # return n\n\ndef init():\n l = [4, 1, 2, 1, 2]\n print(singleNumber(l))\n\n\nif __name__ == '__main__':\n init()\n","sub_path":"2018/leetcode/single-number.py","file_name":"single-number.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"567198163","text":"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Make a 9x9 grid...\nnrows, ncols = 21,21\nimage = np.zeros((nrows,ncols))\n\n\nfor i in range(7):\t\n\timage[2:nrows-2,3*i+1] =np.ones((ncols-4,))\n\n\nrow_labels = range(nrows)\ncol_labels = range(ncols)\nplt.matshow(image)\nplt.xticks(range(ncols), col_labels)\nplt.yticks(range(nrows), row_labels)\nplt.show()","sub_path":"mapa.py","file_name":"mapa.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"124629785","text":"import cv2 as cv\n\n# Function to apply the captured frame to the classifiers and draw ROIs where detections are made.\ndef detect_faces_and_eyes(frame):\n frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n frame_gray = cv.equalizeHist(frame_gray)\n\n face_line_color = (0, 255, 0)\n face_line_type = cv.LINE_4\n\n eye_line_color = (0, 0, 255)\n eye_line_type = cv.LINE_4\n\n # Detect faces\n faces = face_cascade.detectMultiScale(frame_gray)\n\n for (x, y, w, h) in faces:\n top_left = (x, y)\n bottom_right = (x + w, y + h)\n\n # Draw face ROI\n frame = cv.rectangle(frame, top_left, bottom_right, face_line_color, lineType=face_line_type)\n faceROI = frame_gray[y:y+h, x:x+w]\n\n # Detect eyes within detected face\n eyes = eyes_cascade.detectMultiScale(faceROI)\n for (x2, y2, w2, h2) in eyes:\n eye_top_left = (x+x2, y+y2)\n eye_bottom_right = (x+x2 + w2, y+y2 + h2)\n\n # Draw eye ROI\n frame = cv.rectangle(frame, eye_top_left, eye_bottom_right, eye_line_color, lineType=eye_line_type)\n\n cv.imshow('Capture - Face detection', frame)\n\n\n# Initialise Face Cascade\nface_cascade = cv.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')\n\n# Initialise Eye Cascade\neyes_cascade = cv.CascadeClassifier('haarcascades/haarcascade_eye_tree_eyeglasses.xml')\n\n# Capture from camera\ncap = cv.VideoCapture(0)\n\nif not cap.isOpened:\n print('ERROR: Cannot open video capture.')\n exit(0)\n\nwhile True:\n ret, frame = cap.read()\n\n if frame is None:\n print(\"WARNING: No frame captured.\")\n break\n\n detect_faces_and_eyes(frame)\n\n if cv.waitKey(10) == 27:\n break","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"462065213","text":"import pandas as pd\nimport numpy as np\nimport sys\nimport random\nfrom STRATEGY.BaseTradeEngine import BaseTradeEngine\nfrom statsmodels.distributions.empirical_distribution import ECDF\nfrom statsmodels.regression.rolling import RollingOLS\nfrom scipy import stats\nfrom scipy.optimize import minimize\nfrom scipy import integrate as ig\n\ndef get_src_cls(source_name):\n return getattr(sys.modules[__name__], source_name)\n\nclass Copula(BaseTradeEngine):\n \n def __init__(self, *args, **kwargs):\n super(Copula, self).__init__(*args, **kwargs)\n \n # Run a backtest with given parameter inputs\n def process(self, windowOLS = 150, copula_lookback = 150, recalibrate_n = 50, \n cap_CL = 0.95, floor_CL = 0.05, lag = 0, rounding = 3, resample = 1, train_rng = [0,1], **kwargs):\n \n minWindowOLS = int(min(max(windowOLS/np.sqrt(resample),10),len(self.original_x)/2))\n \n minCopula = int(min(max(copula_lookback/np.sqrt(resample),recalibrate_n),len(self.original_x)/2))\n minRecalib = int(max(recalibrate_n/np.sqrt(resample),5))\n \n # Calibrate OLS if necessary\n if minCopula != self.copula_lookback or minRecalib != self.recalibrate_n \\\n or minWindowOLS != self.windowOLS or resample != self.resample:\n self.resampling(resample)\n self.calibrate(minWindowOLS,minCopula,minRecalib)\n self.resample = resample\n \n # Get start and end time for data\n start_hist, end_hist = self.get_indices(train_rng)\n \n #In any case, start data after enough data gathered for the OLS window\n min_start = (max(minWindowOLS,minCopula)-1) / len(self.x) \n train_rng[0] = max(train_rng[0],min_start)\n \n subsamples = [self.buy_x , self.buy_y, self.sell_x, self.sell_y, self.beta, \\\n self.timestamp, self.MI_u_v, self.MI_v_u]\n \n [buy_x, buy_y, sell_x, sell_y, beta, time, MI_u_v, MI_v_u] = \\\n self.get_sample(start_hist, end_hist, *subsamples)\n \n parameters = {'beta': beta, 'floor_CL':floor_CL , 'cap_CL': cap_CL,\n 'lag': lag, 'rounding': rounding, 'MI_u_v': MI_u_v, 'MI_v_u': MI_v_u}\n \n parameters.update(kwargs)\n \n # Create the record\n self.record = self.backtest(buy_x, buy_y, sell_x, sell_y, time, **parameters)\n \n # Legacy\n self.reward = sum(self.record.port_rets)\n \n \n def calibrate(self, windowOLS, copula_lookback, recalibrate_n, **kwargs):\n self.windowOLS = int(windowOLS)\n self.copula_lookback = int(copula_lookback)\n self.recalibrate_n = int(recalibrate_n)\n \n df = pd.DataFrame({'y':self.y,'x':self.x,'c':1})\n \n model = RollingOLS(endog =df['y'], exog=df['x'],window=self.windowOLS)\n rres = model.fit()\n \n self.beta = rres.params['x'].values.reshape(-1, )\n \n # Copula decision:\n df['x_log_ret']= np.log(df.x) - np.log(df.x.shift(1))\n df['y_log_ret']= np.log(df.y) - np.log(df.y.shift(1))\n \n # Convert the two returns series to two uniform values u and v using the empirical distribution functions\n ecdf_x, ecdf_y = ECDF(df.x_log_ret), ECDF(df.y_log_ret)\n u, v = [ecdf_x(a) for a in df.x_log_ret], [ecdf_y(a) for a in df.y_log_ret]\n \n # Compute the Akaike Information Criterion (AIC) for different copulas and choose copula with minimum AIC\n tau = stats.kendalltau(df.x_log_ret, df.y_log_ret)[0] # estimate Kendall'rank correlation\n AIC ={} # generate a dict with key being the copula family, value = [theta, AIC]\n\n for i in ['clayton', 'frank', 'gumbel']:\n param = self._parameter(i, tau)\n lpdf = [self._lpdf_copula(i, param, x, y) for (x, y) in zip(u, v)]\n # Replace nan with zero and inf with finite numbers in lpdf list\n lpdf = np.nan_to_num(lpdf) \n loglikelihood = sum(lpdf)\n AIC[i] = [param, -2 * loglikelihood + 2]\n # Choose the copula with the minimum AIC\n copula = min(AIC.items(), key = lambda x: x[1][1])[0]\n \n self.startIdx = copula_lookback + 1 # Because first is NAN\n \n df['MI_u_v'] = 0.5\n df['MI_v_u'] = 0.5\n \n for i in np.arange(self.startIdx , len(df)-recalibrate_n, recalibrate_n):\n \n window = range(i - copula_lookback, i) \n predWindow = range(i, i + recalibrate_n)\n \n x_hist = df.x_log_ret.iloc[window]\n y_hist = df.y_log_ret.iloc[window]\n x_forw = df.x_log_ret.iloc[predWindow]\n y_forw = df.y_log_ret.iloc[predWindow]\n \n # Estimate Kendall'rank correlation\n tau = stats.kendalltau(x_hist, y_hist)[0] \n\n # Estimate the copula parameter: theta\n theta = self._parameter(copula, tau)\n\n # Simulate the empirical distribution function for returns of selected trading pair\n ecdf_x, ecdf_y = ECDF(x_hist), ECDF(y_hist) \n\n # Now get future values\n a, b = self._misprice_index(copula, theta, ecdf_x(x_forw), ecdf_y(y_forw))\n \n df.MI_u_v.iloc[predWindow] = a\n df.MI_v_u.iloc[predWindow] = b\n \n self.MI_u_v = df.MI_u_v\n self.MI_v_u = df.MI_v_u\n \n \n def calculate_hr(self, record, beta, rounding, **kwargs):\n record['hr'] = beta.round(int(rounding))\n return record\n \n def calculate_signals(self, record, MI_u_v, MI_v_u, lag, **kwargs):\n \n record['MI_u_v'] = MI_u_v.values\n record['MI_v_u'] = MI_v_u.values \n \n return record\n \n \n def calculate_thresholds(self, record, cap_CL, floor_CL, **kwargs):\n record['cap'] = cap_CL\n record['floor'] = floor_CL\n return record\n \n def calculate_entry_exit(self, record):\n \n record['long_entry'] = (record.MI_u_v > record.cap) & (record.MI_v_u < record.floor)\n record['long_exit'] = (record.MI_v_u > (record.cap -0.1)) & (record.MI_u_v < (record.floor + 0.1))\n record['long_exit'][-1] = True\n \n # Set up num units short\n record['short_entry'] = (record.MI_v_u > record.cap) & (record.MI_u_v < record.floor)\n record['short_exit'] = (record.MI_u_v > (record.cap -0.1)) & (record.MI_v_u < (record.floor+0.1))\n record['short_exit'][-1] = True\n \n #shift n down\n #for i in range(5):\n # record['long_entry'] = record['long_entry'] | record['long_entry'].shift(i)\n # record['short_entry'] = record['short_entry'] | record['short_entry'].shift(i)\n \n return record\n \n def _parameter(self, family, tau):\n ''' Estimate the parameters for three kinds of Archimedean copulas\n according to association between Archimedean copulas and the Kendall rank correlation measure\n '''\n\n if family == 'clayton':\n return 2 * tau / (1 - tau)\n\n elif family == 'frank':\n\n '''\n debye = quad(integrand, sys.float_info.epsilon, theta)[0]/theta is first order Debye function\n frank_fun is the squared difference\n Minimize the frank_fun would give the parameter theta for the frank copula \n ''' \n integrand = lambda t: t / (np.exp(t) - 1) # generate the integrand\n frank_fun = lambda theta: ((tau - 1) / 4.0 -(ig.quad(integrand, sys.float_info.epsilon, theta)[0] \\\n / theta - 1) / theta) ** 2\n\n return minimize(frank_fun, 4, method='BFGS', tol=1e-5).x \n\n elif family == 'gumbel':\n return 1 / (1 - tau)\n\n def _lpdf_copula(self, family, theta, u, v):\n\n if family == 'clayton':\n pdf = (theta + 1) * ((u ** (-theta) + v ** (-theta) - 1) ** (-2 - 1 / theta)) *\\\n (u ** (-theta - 1) * v ** (-theta - 1))\n\n elif family == 'frank':\n num = -theta * (np.exp(-theta) - 1) * (np.exp(-theta * (u + v)))\n denom = ((np.exp(-theta * u) - 1) * (np.exp(-theta * v) - 1) + (np.exp(-theta) - 1)) ** 2\n pdf = num / denom\n\n elif family == 'gumbel':\n A = (-np.log(u)) ** theta + (-np.log(v)) ** theta\n c = np.exp(-A ** (1 / theta))\n pdf = c * (u * v) ** (-1) * (A ** (-2 + 2 / theta)) * ((np.log(u) * np.log(v)) \\\n ** (theta - 1)) * (1 + (theta - 1) * A ** (-1 / theta))\n return np.log(pdf)\n\n def _misprice_index(self, family, theta, u, v):\n\n if family == 'clayton':\n MI_u_v = v**(-theta-1) * (u**(-theta)+v**(-theta)-1)**(-1/theta-1) # P(U RS_MAX:\n # self.Rs = RS_MIN\n # elif self.Rs < RS_MIN:\n # self.Rs = RS_MAX\n\n # action is to revise the pid control with the DDPG method\n #self.Rs += learning_rate * action[0]\n #self.Ws -= learning_rate * action[1]\n # if self.Rs > 8 or self.Rs < 3 or self.Ws > 15 or self.Ws < 5:\n # self.Rs = np.random.uniform(3, 8, size=1)\n # self.Ws = np.random.uniform(5, 15, size=1)\n self.Rs = np.clip(self.Rs, 3, 8)\n self.Ws = np.clip(self.Ws, 5, 15)\n # update the history of Rs\n # index = int(self.counter % mylstm.TIMESTEPS)\n # print(index)\n self.Rs_list.append(self.Rs[0]) ## wrong ???\n self.Ws_list.append(self.Ws[0])\n # model\n # self.H_prediction = - self.Rs * 1.62 + self.Ws * 0.9 + 11.6\n # if self.counter < mylstm.TIMESTEPS:\n # self.H_prediction = 0\n # else:\n #\n self.H_prediction = mylstm.welding_pred(self.Rs_list[-mylstm.TIMESTEPS:], self.Ws_list[-mylstm.TIMESTEPS:])\n print(self.Rs_list[-1 : ], self.Ws_list[-1 : ], self.H_prediction, self.target)\n\n # reward\n if abs(self.target - self.H_prediction) < 0.05:\n self.on_goal += 1\n r = 1\n if self.on_goal > 80:\n done = True\n else:\n r = 1 / (1 + np.exp(abs(self.target - self.H_prediction))) - 0.5\n self.on_goal = 0\n # state\n s = np.hstack((self.error , self.del_error, self.Rs, self.Ws))\n #print(s,r)\n return s, r, done\n\n # set the initilize values\n def reset(self):\n self.error = 0\n self.del_error = 0\n self.error_last = 0\n self.Rs = 3\n self.Ws = 15\n #self.Wf = 10\n self.H_prediction = np.random.uniform(low=H_MIN, high=H_MAX, size=1)\n self.target = np.random.uniform(low=H_MIN, high=H_MAX, size=1)\n # self.target = 7.5\n # self.H_prediction = 5.5\n #s = np.concatenate((self.error, self.del_error, self.Rs))\n s = np.hstack((self.error, self.del_error, self.Rs, self.Ws))\n return s\n\n\n def render(self):\n if self.viewer is None:\n self.viewer = Viewer(self.H_prediction, self.target)\n\n self.viewer.render(self.H_prediction, self.target)\n\n def sample_action(self):\n return np.random.rand(2) - 0.5\n\n\n\nclass Viewer(pyglet.window.Window):\n\n def __init__(self, Y_t, goal):\n # vsync=False to not use the monitor FPS, we can speed up training\n super(Viewer, self).__init__(width=400, height=400, resizable=False, caption='Arm', vsync=False)\n pyglet.gl.glClearColor(1, 1, 1, 1)\n self.Y_t = Y_t\n # print(\"init\")\n self.goal_info = goal\n\n self.batch = pyglet.graphics.Batch() # display whole batch at once\n self.goal = self.batch.add(\n 4, pyglet.gl.GL_QUADS, None, # 4 corners\n ('v2f', [100, 100 + self.goal_info, # location\n 100, 105 + self.goal_info,\n 300, 105 + self.goal_info,\n 300, 100 + self.goal_info]),\n ('c3B', (86, 109, 249) * 4)) # color\n self.arm1 = self.batch.add(\n 4, pyglet.gl.GL_QUADS, None,\n ('v2f', [250, 250, # location\n 250, 255,\n 255, 255,\n 255, 250]),\n ('c3B', (249, 86, 86) * 4,)) # color\n\n def render(self, H_prediction, target):\n self.Y_t = H_prediction\n self._update_arm(H_prediction, target)\n self.switch_to()\n self.dispatch_events()\n self.dispatch_event('on_draw')\n self.flip()\n # print(self.goal_info['h'])\n\n def on_draw(self):\n self.clear()\n self.batch.draw()\n\n def _update_arm(self, H_prdiction, target):\n # update goal\n\n self.goal.vertices = (\n 100, 100 + target * 10,\n 100, 105 + target * 10,\n 300, 105 + target * 10,\n 300, 100 + target * 10)\n\n # update arm\n #height = self.Y_t\n #print(H_prdiction)\n self.arm1.vertices = (\n 195, 100 + H_prdiction * 10,\n 195, 105 + H_prdiction * 10,\n 205, 105 + H_prdiction * 10,\n 205, 100 + H_prdiction * 10)\n\nif __name__ == '__main__':\n env = Welding_Env()\n while True:\n print(\"new epoch\")\n s = env.reset()\n for i in range(100):\n env.render()\n env.step(env.sample_action())\n time.sleep(0.01)\n\n\n\n\n\n","sub_path":"New_env.py","file_name":"New_env.py","file_ext":"py","file_size_in_byte":6124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"213187909","text":"import psycopg2\r\nimport psycopg2.extras\r\n\r\nfrom flask import Flask, redirect, url_for, request, render_template\r\napp = Flask(__name__)\r\n\r\n@app.route('/success/')\r\ndef success(name):\r\n conn = None\r\n try:\r\n print('Connecting to the PostgreSQL database...')\r\n conn = psycopg2.connect(host=\"localhost\",database=\"postgres\", user=\"postgres\", password=\"poojasvi\")\r\n # create a cursor\r\n cur = conn.cursor()\r\n print(\"Connection established\")\r\n\r\n except (Exception, psycopg2.DatabaseError) as error:\r\n print(error) \r\n\r\n try:\r\n # execute a statement\r\n # a=\"RHAFUVJTG\"\r\n a=name\r\n sql = \"\"\"SELECT \"Quantity\" FROM medicines WHERE \"Medicine\" = %s ; \"\"\"\r\n # cur.execute(sql)\r\n cur.execute(sql,(a,))\r\n print(\"select executed\")\r\n print(cur.rowcount)\r\n res=cur.fetchone()[0]\r\n print(res)\r\n \r\n conn.commit()\r\n \r\n except (Exception, psycopg2.DatabaseError) as error:\r\n print(error)\r\n\r\n finally:\r\n cur.close()\r\n if conn is not None:\r\n conn.close()\r\n print('Database connection closed.') \r\n # return str(res)\r\n return render_template('get_med_qty.html', qty = str(res), e=1)\r\n\r\n@app.route('/med',methods = ['POST', 'GET'])\r\ndef med():\r\n if request.method == 'POST':\r\n med = request.form['med']\r\n return redirect(url_for('success',name = med))\r\n else:\r\n med = request.form['med']\r\n return redirect(url_for('success',name = med))\r\n\r\nif __name__ == '__main__':\r\n app.run(debug = True)","sub_path":"get_med_qty.py","file_name":"get_med_qty.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"366530624","text":"\"\"\"Handle activity that is not EAP on the same EAP interface\"\"\"\n\nimport struct\nfrom fcntl import ioctl\nfrom eventlet.green import socket\n\nfrom chewie.mac_address import MacAddress\n\nclass ActivitySocket:\n \"\"\"Handle the RADIUS socket\"\"\"\n SIOCGIFINDEX = 0x8933\n PACKET_MR_PROMISC = 1\n IP_ETHERTYPE = 0x0800\n SOL_PACKET = 263\n PACKET_ADD_MEMBERSHIP = 1\n\n DHCP_UDP_SRC = 68\n DHCP_UDP_DST = 67\n UDP_IPTYPE = b'\\x11'\n EAP_ADDRESS = MacAddress.from_string(\"01:80:c2:00:00:03\")\n\n def __init__(self, interface_name):\n self.socket = None\n self.interface_name = interface_name\n self.interface_index = None\n\n def setup(self):\n \"\"\"Set up the socket\"\"\"\n self.open()\n self.get_interface_index()\n self.set_interface_promiscuous()\n\n def send(self, data):\n \"\"\"Not Implemented -- This socket is purely for Listening\"\"\"\n raise NotImplementedError('Attempted to send data down the activity socket')\n\n def receive(self):\n \"\"\"Receive activity from supplicant-facing socket\"\"\"\n # Skip all packets that are not DHCP requests\n while True:\n ret_val = self.socket.recv(4096)\n\n if ret_val[23:24] == self.UDP_IPTYPE:\n src_port = struct.unpack('>H', ret_val[34:36])[0]\n dst_port = struct.unpack('>H', ret_val[36:38])[0]\n\n if src_port == self.DHCP_UDP_SRC and dst_port == self.DHCP_UDP_DST:\n return ret_val\n\n def open(self):\n \"\"\"Listen on the Socket for any form of Eth() / IP() frames \"\"\"\n self.socket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, # pylint: disable=no-member\n socket.htons(self.IP_ETHERTYPE)) # pylint: disable=no-member\n self.socket.bind((self.interface_name, 0))\n\n def get_interface_index(self):\n \"\"\"Get the interface index of the Socket\"\"\"\n # http://man7.org/linux/man-pages/man7/netdevice.7.html\n request = struct.pack('16sI', self.interface_name.encode(\"utf-8\"), 0)\n response = ioctl(self.socket, self.SIOCGIFINDEX, request)\n _ifname, self.interface_index = struct.unpack('16sI', response)\n\n def set_interface_promiscuous(self):\n \"\"\"Sets the activity interface to be able to receive messages with port_id in mac_dst\"\"\"\n request = struct.pack(\"IHH8s\", self.interface_index, self.PACKET_MR_PROMISC,\n len(self.EAP_ADDRESS.address), self.EAP_ADDRESS.address)\n\n self.socket.setsockopt(self.SOL_PACKET, self.PACKET_ADD_MEMBERSHIP, request)\n","sub_path":"chewie/activity_socket.py","file_name":"activity_socket.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"551071272","text":"# Copyright 2014 by Ethan Fritz. All Rights Reserved.\r\n\r\n\r\nclass Sound:\r\n def __init__(self, sound, text, image):\r\n self.sound = sound\r\n self.text = text\r\n self.image = image\r\n\r\n#\r\n# Constants\r\n#\r\nSOUNDS = [Sound('sfx/vroom.wav', 'Vroom', 'images/vroom.png'),\r\n Sound('sfx/screech.wav', 'Screech', 'images/screech.png')]\r\n\r\n\r\ndef get_sound_by_id(id):\r\n for sound in SOUNDS:\r\n if sound.id == id:\r\n return sound\r\n\r\n return None\r\n","sub_path":"core/sound.py","file_name":"sound.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"361484119","text":"# -*- coding : utf-8 -*-\n\n#------------------------------------------------------------------------\n# LIB IMPORT\n#------------------------------------------------------------------------\n\nimport eventHandler\nimport pygame\nimport loading\nfrom pygame.locals import *\nfrom classes.box import Box\nfrom classes.cell import Cell\nfrom classes.scrolls import Scrolls\nfrom classes.level import Level\nfrom classes.player import Player\n\n#------------------------------------------------------------------------\n# GLOBAL VARIABLE\n#------------------------------------------------------------------------\n\n\"\"\"\n\n:firstPixel: the first pixel of the width where the game (and not the screen) has to be displayed\n:playerAction: action that the player chooses in the titleScreen : string\n\n\"\"\"\n\nfirstPixel = None\nplayerAction = None\n\n#------------------------------------------------------------------------\n# METHODS\n#------------------------------------------------------------------------\n\ndef displayTitleScreen(titleScreenImgPath, window):\n \"\"\"Method where I set how the title screen is displayed \"\"\"\n\n playerAction = \"\"\n\n #Setting the image for the title screen\n #window = pygame.display.set_mode((640,480))\n titleScreen = pygame.image.load(titleScreenImgPath).convert()\n window.blit(titleScreen, (0,0))\n\n #Setting the new game button for the title screen\n \"\"\"newGame = pygame.image.load(\"resources/img/new_game_resized.png\").convert()\n window.blit(newGame, (320, 240))\"\"\"\n pygame.display.flip()\n\n continuer = 1\n\n #Display the title screen as long as the player presses wrong key\n while continuer:\n for event in pygame.event.get():\n \n if event.type == KEYDOWN: \n playerAction = eventHandler.eventTitleScreen(event)\n continuer = loading.exitTitleScreen(playerAction)\n \n elif event.type == QUIT:\n playerAction = \"Quit_the_game\"\n continuer = 0\n\n return playerAction\n \ndef displayGrille(level, firstPixel, window):\n \"\"\"We place all elements on the table\n\n Attributes:\n :firstPixel: first pixel to display on the width of the screen to get the game centered in the window\n :window: pygame.Surface object where the game is displayed\n \"\"\"\n\n for row in level._get_grille():\n for cell in row:\n if cell.element is not None:\n pos_x = firstPixel + (cell.pos_x * 30)\n pos_y = cell.pos_y * 30\n elementPNG = pygame.image.load(cell.element.skin).convert_alpha()\n window.blit(elementPNG, (pos_x, pos_y))\n\n else:\n pass\n\n \ndef displayLevel(numLevel, window):\n \"\"\"Display the level the player has selected \"\"\"\n print(\"Chargement du niveau \"+str(numLevel))\n loadLevel = True\n\n #While we need to reload the same level (for restart or a death for example)\n #We reload it\n while loadLevel == True:\n #String that will go back to main and let the program knows what to do next\n #Go to next level? Quit the game?\n action = \"\"\n \n #We create the object Level and load its elements\n level = Level(numLevel)\n\n #If it couldn't find a level in levelFile, it means the game is finished and\n #we display the title screen\n if level.csvPath is None:\n return \"Title_Screen\"\n else:\n level.loadingLevelForDisplay()\n\n #We calculate where should be the center of the game on the screen in order\n #to display correctly all elements\n gameWidth = len(level._get_grille()[0]) * 30\n firstPixel = centerTheGameOnTheScreen(window.get_width(), gameWidth)\n\n #We set a new background image\n window.fill(pygame.Color(\"black\"))\n background = pygame.image.load(\"resources/img/fond.jpg\").convert()\n window.blit(background, (firstPixel,0))\n pygame.display.flip()\n\n #We place each element with their pixels position on the screen\n displayGrille(level, firstPixel, window)\n\n #We place the player on the table\n player = Player()\n playerPNG = pygame.image.load(player.character.skin).convert_alpha()\n player.positionRect = playerPNG.get_rect(x = level.start[0], y = level.start[1])\n \n window.blit(playerPNG, (firstPixel+player.positionRect.x*30, player.positionRect.y*30))\n pygame.display.flip()\n \n continuer = 1\n\n #We display the level while the player hasn't finished it\n while continuer:\n\n #We display background and elements of the level again\n window.fill(pygame.Color(\"Black\"))\n\n #We load the background image\n window.blit(background, (firstPixel,0))\n\n #We load the table of elements with their graphics\n displayGrille(level, firstPixel, window)\n\n #We load the player character (donkey kong)\n playerPNG = pygame.image.load(player.character.skin).convert_alpha()\n window.blit(playerPNG, (firstPixel + player.positionRect.x * 30, player.positionRect.y*30))\n\n #If the player walked on a scroll, we display its message\n level.checkPlayerOnScroll(player, window)\n\n pygame.display.flip()\n \n for event in pygame.event.get():\n if event.type == QUIT:\n action = \"Quit_the_game\"\n loadLevel = False\n continuer = 0\n\n #If the player presses a key, we check if he can move\n elif event.type == KEYDOWN:\n if event.key == K_r:\n continuer = 0\n elif event.key == K_LEFT or event.key == K_RIGHT or event.key == K_UP or event.key == K_DOWN:\n #If the player will move on a cell where there is a box\n potentialBox = level.checkPlayerBoxes(player, event)\n if potentialBox is not None:\n box = potentialBox\n if box.canMove(player, level, event):\n player.move(level, event)\n\n else:\n player.move(level, event)\n\n #If the player dies, he goes back to the starting point of the current level\n if level.checkPlayerDies(player):\n continuer = 0\n \n #If player walks on the finish line, he goes to next level\n if level.checkEndLevel(player):\n continuer = 0\n loadLevel = False\n action = \"Next_level\"\n\n return action\n \n\n\ndef displayLevelSelection(window):\n \"\"\"Screen where all the levels unlocked are listed \"\"\"\n numberOfLevels = loading.howManyLevels()\n\n for x in range(0, numberOfLevels):\n messageFont = pygame.font.SysFont(\"comicsansms\", 18)\n messageRender = messageFont.render(\"Niveau \"+str(x+1), True, (255,255,255))\n window.blit(messageRender, (0,450))\n pygame.display.flip()\n \n\ndef centerTheGameOnTheScreen(windowWidth, gameWidth):\n \"\"\"Method that calculates where the first pixel of the width of the game\n has to be in order to have the game centered on the screen\n\n Attributes:\n :windowWidth: width of the window : int\n :gameWidth: width of the game : int \n \"\"\"\n\n blankSpace = windowWidth - gameWidth\n firstPixel = blankSpace / 2\n \n return firstPixel\n\n \n","sub_path":"displayDK.py","file_name":"displayDK.py","file_ext":"py","file_size_in_byte":7706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"307991978","text":"import re\nimport datetime\nfrom models.db_tables import PmrReport\n\nRE_FINGERPRINT = re.compile(r'Licensing=.+fingerprint +(\\w+)')\nRE_NODE_NAME = re.compile(r'\\S+ = (.+)')\nRE_PRODUCT_NAME = re.compile(r'productName +(.+)')\nRE_RNC_CAPACITY = re.compile(r'(MO .+|Object .+|Licensing=.+)')\nRE_PM_CAPACITY = re.compile(r'Licensing=1\\D+([\\d,]+)\\s+(\\d+)')\nRE_SUBRACKS = re.compile(r'Subrack=(.+)')\nRE_SUBRACK_SLOT = re.compile(r'Subrack=(.+),Slot=(\\d{1,2})')\nRE_SLOT_STATE = re.compile(r'(MO .+|Subrack=.+)')\nRE_DEVICE_STATE = re.compile(r'((?:Type|PDR|CC|DC).+)')\nRE_ETHERNET_PORT = re.compile(r'Subrack=(.+),Slot=(\\d{1,2}),.+Port=(\\d).+\\((\\w+)')\nRE_LOAD_CONTROL = re.compile(r'((?:Object |Subrack=).+)')\nRE_LOAD_CONTROL_ENTRY = re.compile(r'Subrack=(.+),Slot=(\\d{1,2}),.+LoadControl=1\\s+(.+)')\nRE_PMR_REPORT_PART = re.compile(r'(?:Object\\s+Counter|Date\\s+Time\\s+Counter).+', re.M)\nRE_PMR_REPORT_ENTRY = re.compile(r'^(?P.+)\\s+(?P(?:Mp|Cc|Dc|Pdr)Load|UsedCapacity)\\s+(?P.+)', re.M)\nRE_PMR_REPORT_START = re.compile(r'\\s?\\d{1,3}\\)\\sRNC\\s')\nRE_LKRA = re.compile(r'(?:Sr|[ME]S).+')\nRE_LKRA_ENTRY = re.compile(\n r'(?P^[ME]S\\d?)\\s+(?P\\d+)\\s+(?P\\S+)\\s+(?P[\\w\\d]+)?\\s+(?P\\d+)\\s+(?P\\d+)\\s+(?P\\d+)\\s+(?P\\d+)')\n\nCURRENT_CAPACITY = 'currentCapacityLimit'\n\n\ndef get_fingerprint(cmd_printout):\n return RE_FINGERPRINT.findall(cmd_printout)[0]\n\n\ndef get_nodeName(cmd_printout):\n try:\n return RE_NODE_NAME.findall(cmd_printout)[0]\n except IndexError:\n return None\n\n\ndef get_productName(cmd_printout):\n try:\n return RE_PRODUCT_NAME.findall(cmd_printout)[0]\n except IndexError:\n return None\n\n\ndef get_rncCapacity(cmd, cmd_printout):\n rncCapacity = {}\n attributes = []\n re_cmd_printout = RE_RNC_CAPACITY.findall(cmd_printout)\n for i in range(len(re_cmd_printout)):\n if i == 0:\n attributes = re_cmd_printout[i].split()[1:]\n else:\n record = re.sub(r'\\(|\\)', '', re_cmd_printout[i].strip('Licensing=1,RncCapacity=')).split()\n k = record[0]\n if CURRENT_CAPACITY in cmd:\n v = dict(zip(attributes, record[2:]))\n else:\n v = dict(zip(attributes, record[1:]))\n rncCapacity.update({k: v})\n return rncCapacity\n\n\ndef get_slotState(slots, cmd_printout):\n re_cmd_printout = RE_SLOT_STATE.findall(cmd_printout)\n for i in range(len(re_cmd_printout)):\n if i == 0:\n # attributes = re_cmd_printout[i].split()[1:]\n pass\n else:\n slotState = re.sub(r'\\(|\\)', '', re_cmd_printout[i].split()[-1])\n board = re_cmd_printout[i].split()[0]\n attribute = re_cmd_printout[i].split()[1]\n subrack, slot = RE_SUBRACK_SLOT.findall(board)[0]\n if subrack not in slots.keys():\n slots.update({subrack: {}})\n if slot not in slots.get(subrack).keys():\n slots.get(subrack).update({slot: {}})\n slots.get(subrack).get(slot).update({attribute: slotState})\n\n\ndef get_deviceState(cmd_printout):\n re_cmd_printout = RE_DEVICE_STATE.findall(cmd_printout)[-4:]\n deviceStates = {}\n attributes = []\n for i in range(len(re_cmd_printout)):\n if i == 0:\n s = re.sub('%|\\(.\\)', '', re_cmd_printout[i])\n attributes = s.split()[1:]\n else:\n entry = re_cmd_printout[i].split()\n k = entry[0]\n v = dict(zip(attributes, entry[1:]))\n if k not in deviceStates.keys():\n deviceStates.update({k: {}})\n deviceStates.get(k).update(v)\n return deviceStates\n\n\ndef get_ethernet_speed(cmd_printout):\n re_cmd_printout = RE_ETHERNET_PORT.findall(cmd_printout)\n ports = {}\n for i in range(len(re_cmd_printout)):\n port = '-'.join(re_cmd_printout[i][:-1])\n speed = re_cmd_printout[i][-1]\n ports.update({port: speed})\n return ports\n\n\ndef get_loadcontrol(cmd_printout):\n re_cmd_printout = RE_LOAD_CONTROL.findall(cmd_printout)\n result = {}\n attributes = []\n for i in range(len(re_cmd_printout)):\n if i == 0:\n attributes = re_cmd_printout[i].split()[1:]\n else:\n entry = re_cmd_printout[i]\n subrack, slot, counters = RE_LOAD_CONTROL_ENTRY.findall(entry)[0]\n slot = int(slot)\n counters = counters.split()\n if subrack not in result.keys():\n result.update({subrack: {}})\n if slot not in result.get(subrack).keys():\n result.get(subrack).update({slot: {}})\n result.get(subrack).get(slot).update(dict(zip(attributes, counters)))\n return result\n\n\ndef get_lkra(cmd_printout):\n cell_repartition = 'Cell repartition by '\n repartion_by = ''\n lkra = {}\n result = {}\n\n for line in cmd_printout.splitlines():\n if cell_repartition in line:\n repartion_by = re.sub('{0}|{1}'.format(cell_repartition, ':'), '', line)\n if repartion_by not in lkra.keys():\n lkra.update({repartion_by: []})\n continue\n if repartion_by and RE_LKRA.findall(line):\n lkra.get(repartion_by).append(line)\n\n byRncModule = lkra.get('rncModule')\n column_names = byRncModule[0].split()[2:]\n\n for line in byRncModule[1:]:\n subrack, module = RE_LKRA_ENTRY.search(line).groups()[0:2]\n values = RE_LKRA_ENTRY.search(line).groups()[2:]\n module = int(module)\n values = [int(i) if i and i.isdigit() else i for i in values]\n if subrack not in result.keys():\n result.update({subrack: {}})\n if module not in result.get(subrack).keys():\n result.get(subrack).update({module: {}})\n result.get(subrack).get(module).update(dict(zip(column_names, values)))\n\n return result\n\n\ndef get_pmr(cmd_printout):\n\n def get_max_sum(*args):\n max_sum = max(sum(filter(None, j)) for j in map(None, *args[0]))\n if result[report_name]['max'] < max_sum:\n result[report_name]['max'] = max_sum\n for j in map(None, *args[0]):\n if sum(filter(None, j)) == max_sum:\n result[report_name]['v'] = list(j)\n if not result.get(report_name).get('devices'):\n result[report_name]['devices'] = devices\n\n def get_max_value(*args):\n try:\n result.update(dict(zip(args[1], [{'max': 0.0, 'v': []}, {'max': 0.0, 'v': []}])))\n m = {}\n vals = {}\n m.update(dict(zip(args[1], [max(filter(None, j)) for j in map(None, *args[0])])))\n vals.update(dict(zip(args[1], map(None, *args[0]))))\n time = [datetime.datetime.strptime(dt, '%Y-%m-%d %H:%M').time() for dt in args[2]][:24]\n for k in args[1]:\n time_sorted, vals_sorted = zip(*sorted(zip(time, vals.get(k))))\n time_hm = [hm.strftime(format='%H:%M') for hm in time_sorted]\n result.get(k).update({'max': m.get(k), 'v': list(vals_sorted), 'time': time_hm})\n except ValueError:\n result.update(dict(zip(args[1], [{'max': 0, 'v': '', 'time': ''}, {'max': 0, 'v': '', 'time': ''}])))\n\n result.pop(report_name)\n\n def get_device(d):\n if re_mp.findall(d):\n sr, sl = re_mp.search(d).groups()\n d = '-'.join([sr.replace('-', ''), sl])\n elif re_dev.findall(d):\n dev, srsl = re_dev.search(d).groups()\n d = '-'.join([srsl, dev])\n return d\n\n result = {}\n report_name = ''\n headers = []\n entry = []\n devices = []\n\n re_mp = re.compile(r'(?P[ME].+),Slot=(?P\\d+)')\n re_dev = re.compile(r'(?:Cc|Dc|Pdr)Device=(?P\\d+)\\s\\((?P.+)\\)')\n\n report_processors = {\n 'MpLoad': ['devices', get_max_sum, re_mp], 'CcLoad': ['devices', get_max_sum], 'DcLoad': ['devices', get_max_sum],\n 'PdrLoad': ['devices', get_max_sum], 'UsedCapacity': ['time', get_max_value]}\n\n for line in cmd_printout.splitlines():\n if line and RE_PMR_REPORT_PART.findall(line):\n headers = RE_PMR_REPORT_PART.findall(line)[0]\n continue\n\n if line and RE_PMR_REPORT_ENTRY.search(line):\n (device, report_name, values) = RE_PMR_REPORT_ENTRY.search(line).groups()\n device = get_device(device)\n if report_name not in result.keys():\n result.update({report_name: {'max': 0.0, 'v': [], report_processors.get(report_name)[0]: ''}})\n v = []\n v.extend(0.0 if i == 'N/A' else float(i) for i in values.split())\n entry.append(v)\n devices.append(device)\n continue\n\n if entry:\n headers = headers.split()[3:]\n report_processors.get(report_name)[1](entry, headers, devices)\n entry = []\n devices = []\n headers = ''\n\n pmr_todb = {\n 'IubThroughput': [PmrReport.IubThroughput.db_column, PmrReport.vIub.db_column, PmrReport.tIub.db_column],\n 'FachDchHsUsers': [PmrReport.FachDchHsUsers.db_column, PmrReport.vUsers.db_column, PmrReport.tUsers.db_column],\n 'MpLoad': [PmrReport.MpLoad.db_column, PmrReport.vMpLoad.db_column, PmrReport.dMpLoad.db_column],\n 'CcLoad': [PmrReport.CcLoad.db_column, PmrReport.vCcLoad.db_column, PmrReport.dCcLoad.db_column],\n 'DcLoad': [PmrReport.DcLoad.db_column, PmrReport.vDcLoad.db_column, PmrReport.dDcLoad.db_column],\n 'PdrLoad': [PmrReport.PdrLoad.db_column, PmrReport.vPdrLoad.db_column, PmrReport.dPdrLoad.db_column]}\n\n for k, v in pmr_todb.items():\n try:\n if result.get(k).get('devices') and result.get(k).get('v'):\n d_sorted, v_sorted = zip(*sorted(zip(result.get(k).get('devices'), result.get(k).get('v'))))\n result.update(\n dict(zip(v, [result.get(k).get('max'), list(v_sorted), list(d_sorted)])))\n elif result.get(k).get('devices'):\n result.update(\n dict(zip(v, [result.get(k).get('max'), result.get(k).get('v'), result.get(k).get('devices')])))\n\n else:\n result.update(\n dict(zip(v, [result.get(k).get('max'), result.get(k).get('v'), result.get(k).get('time')])))\n\n except AttributeError:\n result.update(dict(zip(v, [0.0, '', ''])))\n\n return result\n","sub_path":"modules/command_parsers.py","file_name":"command_parsers.py","file_ext":"py","file_size_in_byte":10386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"360530806","text":"import pytest\nimport requests\n\n\ndef test_swagger():\n\n model_endpoint = 'http://localhost:5000/swagger.json'\n\n r = requests.get(url=model_endpoint)\n assert r.status_code == 200\n assert r.headers['Content-Type'] == 'application/json'\n\n json = r.json()\n assert 'swagger' in json\n assert json.get('info') and json.get('info').get('title') == 'Model Asset Exchange Server'\n\n\ndef test_metadata():\n\n model_endpoint = 'http://localhost:5000/model/metadata'\n\n r = requests.get(url=model_endpoint)\n assert r.status_code == 200\n\n metadata = r.json()\n assert metadata['id'] == 'ADD IN MODEL ID'\n assert metadata['name'] == 'ADD MODEL NAME'\n assert metadata['description'] == 'ADD MODEL DESCRIPTION'\n assert metadata['license'] == 'ADD MODEL LICENSE'\n\n\ndef test_response():\n model_endpoint = 'http://localhost:5000/model/predict'\n file_path = 'assets/SAMPLE_FILE.jpg'\n\n with open(file_path, 'rb') as file:\n file_form = {'image': (file_path, file, 'image/jpeg')}\n\n r = requests.post(url=model_endpoint, files=file_form)\n\n assert r.status_code == 200\n response = r.json()\n\n assert response['status'] == 'ok'\n\n # add sanity checks here\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"111467765","text":"#!/usr/bin/python\n#\n# Copyright 2018-2022 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nimport click\n\nfrom polyaxon import settings\nfrom polyaxon.api import POLYAXON_CLOUD_HOST\nfrom polyaxon.logger import clean_outputs\nfrom polyaxon.utils.formatting import Printer\nfrom polyaxon.utils.http_utils import clean_host\n\n\ndef get_dashboard_url(\n base: str = \"ui\", subpath: str = \"\", use_cloud: bool = False\n) -> str:\n host = POLYAXON_CLOUD_HOST if use_cloud else clean_host(settings.CLIENT_CONFIG.host)\n dashboard_url = \"{}/{}/\".format(host, base)\n if subpath:\n return \"{}{}/\".format(dashboard_url, subpath.rstrip(\"/\"))\n return dashboard_url\n\n\ndef get_dashboard(dashboard_url: str, url_only: bool, yes: bool):\n if url_only:\n Printer.print_header(\"The dashboard is available at: {}\".format(dashboard_url))\n sys.exit(0)\n if yes or click.confirm(\n \"Dashboard page will now open in your browser. Continue?\",\n default=True,\n ):\n click.launch(dashboard_url)\n\n\n@click.command()\n@click.option(\n \"--yes\",\n \"-y\",\n is_flag=True,\n default=False,\n help=\"Automatic yes to prompts. \"\n 'Assume \"yes\" as answer to all prompts and run non-interactively.',\n)\n@click.option(\n \"--url\", is_flag=True, default=False, help=\"Print the url of the dashboard.\"\n)\n@clean_outputs\ndef dashboard(yes, url):\n \"\"\"Open dashboard in browser.\"\"\"\n get_dashboard(dashboard_url=get_dashboard_url(), url_only=url, yes=yes)\n","sub_path":"core/polyaxon/cli/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"3816632","text":"import random\r\n# VARIABLES GLOBALES\r\n###################################\r\n\r\n########################################################\r\n# FUNCIONES\r\n########################################################\r\n# funcion para introducir numeros\r\ndef introducirNumero():\r\n while True:\r\n try:\r\n numero = int(input(\"Por favor ingrese un número: \"))\r\n return numero\r\n break\r\n except ValueError:\r\n print(\"Oops! No era válido. Intente nuevamente...\")\r\n# fin funcion ########################################################\r\n\r\n# funcion para comprobar repetidos en array\r\ndef comprobarRepetidosArrayNumero(arrayComp , numero):\r\n comp = False\r\n for elemento in arrayComp:\r\n if elemento == numero:\r\n comp = True\r\n return comp\r\n# fin funcion ########################################################\r\n\r\n# funcion para introducir mi apuesta\r\ndef introducirMiApuestaMultiple():\r\n # introducir tus numeros de euromillon\r\n print('Introduce tus numeros de euromillon. Pueden de 5 a 10 y no deben ser negativos, ni 0 ni mayor de 50')\r\n print('Introduce el numero de numeros de euromillon que quieres apostar')\r\n numeroApuestasNumero = introducirNumero()\r\n contadorNumerosPrimitiva = 1\r\n miPrimitiva = []\r\n miNumero = 0\r\n # ponemos numeros primitiva\r\n while contadorNumerosPrimitiva < (numeroApuestasNumero + 1):\r\n # ponemos el numero\r\n print('Escribe el numero ' + str(contadorNumerosPrimitiva))\r\n miNumero = introducirNumero()\r\n if miNumero < 1 or miNumero > 50:\r\n print('El número no es valido')\r\n else:\r\n if contadorNumerosPrimitiva == 1:\r\n miPrimitiva.append(miNumero)\r\n contadorNumerosPrimitiva = contadorNumerosPrimitiva + 1\r\n print('Numero introducido correctamente')\r\n else:\r\n # si es false el numero no esta repetido\r\n if comprobarRepetidosArrayNumero(miPrimitiva,miNumero) == False:\r\n miPrimitiva.append(miNumero)\r\n contadorNumerosPrimitiva = contadorNumerosPrimitiva + 1\r\n print('Numero introducido correctamente')\r\n else: # y si esta repetido\r\n print('El número está repetido')\r\n\r\n\r\n # introducir tus series o estrellas de euromillon\r\n print('Introduce tus series o estrellas de euromillon. Son de 2 a 5 y no deben ser negativos, ni 0 ni mayor de 12')\r\n print('Introduce el numero de series de euromillon que quieres apostar')\r\n numeroApuestasSeries = introducirNumero()\r\n contadorSeriesPrimitiva = 1\r\n misSeries = []\r\n miSerie = 0\r\n # ponemos numeros primitiva\r\n while contadorSeriesPrimitiva < (numeroApuestasSeries + 1):\r\n # ponemos el numero\r\n print('Escribe el numero de serie ' + str(contadorSeriesPrimitiva))\r\n miSerie = introducirNumero()\r\n if miSerie < 1 or miSerie > 12:\r\n print('El número de serie no es valido')\r\n else:\r\n if contadorSeriesPrimitiva == 1:\r\n misSeries.append(miSerie)\r\n contadorSeriesPrimitiva = contadorSeriesPrimitiva + 1\r\n print('Numero de serie introducido correctamente')\r\n else:\r\n # si es false el numero no esta repetido\r\n if comprobarRepetidosArrayNumero(misSeries,miSerie) == False:\r\n misSeries.append(miSerie)\r\n contadorSeriesPrimitiva = contadorSeriesPrimitiva + 1\r\n print('Numero introducido correctamente')\r\n else: # y si esta repetido\r\n print('El número de serie está repetido')\r\n\r\n\r\n miApuesta = [miPrimitiva,misSeries]\r\n # imprimir mis numeros\r\n print(\"Tus numeros de euromillon son = \" + str(miPrimitiva[0]) + \" - \" + str(miPrimitiva[1]) + \" - \" + str(miPrimitiva[2]) + \" - \" + str(miPrimitiva[3]) + \" - \" + str(miPrimitiva[4]))\r\n # imprimir mis numeros\r\n print(\"Tus series o estrellas de euromillon son = \" + str(misSeries[0]) + \" - \" + str(misSeries[1]))\r\n\r\n return miApuesta\r\n# fin funcion ########################################################\r\n\r\n\r\n# funcion para generar resultados euromillon\r\ndef crearResultadosEuromillon():\r\n print('Cuantas euromillones quieres crear para comparar tus numeros')\r\n numPrim = int(input('Escribe el numero de euromillones = '))\r\n contEuroMillones = 1\r\n numeroAleatorio = 0\r\n arrayNumerosEuroMillon = []\r\n NumerosEuroMillon = []\r\n\r\n arraySeriesEuroMillon = []\r\n SeriesEuroMillon = []\r\n\r\n # creo numeros euromillones\r\n for i in range(0, numPrim):\r\n while contEuroMillones < 6:\r\n numeroAleatorio = random.randint(1, 50)\r\n # todo esto es para comprobar que cada primitiva no tiene numeros repetidos\r\n if contEuroMillones == 1:\r\n NumerosEuroMillon.append(numeroAleatorio)\r\n contEuroMillones = contEuroMillones + 1\r\n else:\r\n # si es false el numero no esta repetido\r\n if comprobarRepetidosArrayNumero(NumerosEuroMillon,numeroAleatorio) == False:\r\n NumerosEuroMillon.append(numeroAleatorio)\r\n contEuroMillones = contEuroMillones + 1\r\n\r\n contEuroMillones = 1\r\n\r\n #######################################\r\n # print('EUROMILLON NUMEROS ' + str(i+1))\r\n # for j in range(len(NumerosEuroMillon)):\r\n # print(NumerosEuroMillon[j], end=' ')\r\n # print()\r\n\r\n #######################################\r\n # pongo NUMEROS euromillon en array arrayMultiple\r\n arrayNumerosEuroMillon.append(NumerosEuroMillon)\r\n # vacio array para la siguiente vuelta\r\n NumerosEuroMillon = []\r\n\r\n\r\n contEuroMillones = 0\r\n # creo series euromillones\r\n for i in range(0, numPrim):\r\n while contEuroMillones < 2:\r\n numeroAleatorio = random.randint(1, 50)\r\n # todo esto es para comprobar que cada primitiva no tiene numeros repetidos\r\n if contEuroMillones == 1:\r\n SeriesEuroMillon.append(numeroAleatorio)\r\n contEuroMillones = contEuroMillones + 1\r\n else:\r\n # si es false el numero no esta repetido\r\n if comprobarRepetidosArrayNumero(SeriesEuroMillon,numeroAleatorio) == False:\r\n SeriesEuroMillon.append(numeroAleatorio)\r\n contEuroMillones = contEuroMillones + 1\r\n\r\n contEuroMillones = 0\r\n\r\n #######################################\r\n # print('EUROMILLON SERIES ' + str(i+1))\r\n # for j in range(len(SeriesEuroMillon)):\r\n # print(SeriesEuroMillon[j], end=' ')\r\n # print()\r\n #\r\n #######################################\r\n # pongo NUMEROS euromillon en array arrayMultiple\r\n arraySeriesEuroMillon.append(SeriesEuroMillon)\r\n # vacio array para la siguiente vuelta\r\n SeriesEuroMillon = []\r\n\r\n # DEVUELVO RESULTADOS\r\n totalResultados = [arrayNumerosEuroMillon,arraySeriesEuroMillon]\r\n return totalResultados\r\n\r\n# fin funcion ########################################################\r\n\r\n# funcion para comprobar aciertos en array\r\ndef sumarRepeticionesDosArrays(arrayMisNumerosP, arrayEuro):\r\n numAciertos = 0\r\n for i in range(len(arrayEuro)):\r\n if comprobarRepetidosArrayNumero(arrayMisNumerosP , arrayEuro[i]) == True:\r\n numAciertos = numAciertos + 1\r\n return numAciertos\r\n# fin funcion ########################################################\r\n\r\n# funcion para comparar resultados\r\ndef comprobarAciertosEuroMillones(miApuesta,resultadosEuroMILLON):\r\n arrayNumeroSorteo = [] # estos son los arrays de numeros del sorteo\r\n arraySerieSorteo = [] # estos son los arrays de series del sorteo\r\n\r\n arrayNumeroApuesta = [] # estos son los arrays de numeros de la apuesta\r\n arraySerieApuesta = [] # estos son los arrays de series de la apuesta\r\n\r\n arrayNumeroApuesta = miApuesta[0] # numeros\r\n arraySerieApuesta = miApuesta[1] # series\r\n\r\n aciertosNumero0Serie0 = 0\r\n aciertosNumero1Serie0 = 0\r\n aciertosNumero2Serie0 = 0\r\n aciertosNumero3Serie0 = 0\r\n aciertosNumero4Serie0 = 0\r\n aciertosNumero5Serie0 = 0\r\n\r\n aciertosNumero0Serie1 = 0\r\n aciertosNumero1Serie1 = 0\r\n aciertosNumero2Serie1 = 0\r\n aciertosNumero3Serie1 = 0\r\n aciertosNumero4Serie1 = 0\r\n aciertosNumero5Serie1 = 0\r\n\r\n aciertosNumero0Serie2 = 0\r\n aciertosNumero1Serie2 = 0\r\n aciertosNumero2Serie2 = 0\r\n aciertosNumero3Serie2 = 0\r\n aciertosNumero4Serie2 = 0\r\n aciertosNumero5Serie2 = 0\r\n\r\n aciertosSeries0 = 0\r\n aciertosSeries1 = 0\r\n aciertosSeries2 = 0\r\n\r\n numeroTotalAciertos = 0\r\n numeroSerieAciertos = 0\r\n\r\n arrayTotalesResultadosNumSort = [] #\r\n\r\n for i in range(len(resultadosEuroMILLON)):\r\n if i == 0: # # estos son los arrays de numeros del sorteo # es doble array\r\n arrayNumeroSorteo = resultadosEuroMILLON[i] # es doble array\r\n arraySerieSorteo = resultadosEuroMILLON[1] # es doble array\r\n # compraramos con las apuestas de numeros\r\n for j in range(len(arrayNumeroSorteo)):\r\n numerosSorteo = arrayNumeroSorteo[j] # es un array\r\n seriesSorteo = resultadosEuroMILLON[1][j] # es un array\r\n # sumo los aciertos de las series por sorteo\r\n numeroSerieAciertos = sumarRepeticionesDosArrays(arrayNumeroApuesta, seriesSorteo)\r\n if numeroSerieAciertos == 0:\r\n # una vez sabemos los aciertos de las series por sorteo ,sumo los aciertos de numeroSerieAciertos\r\n # y los meto en un array\r\n # comparo con apuesta de numeros y sumo aciertos\r\n aciertosSeries0 = aciertosSeries0 + 1\r\n numeroTotalAciertos = sumarRepeticionesDosArrays(arrayNumeroApuesta, numerosSorteo)\r\n if numeroTotalAciertos == 0:\r\n aciertosNumero0Serie0 = aciertosNumero0Serie0 + 1\r\n elif numeroTotalAciertos == 1:\r\n aciertosNumero1Serie0 = aciertosNumero1Serie0 + 1\r\n elif numeroTotalAciertos == 2:\r\n aciertosNumero2Serie0 = aciertosNumero2Serie0 + 1\r\n elif numeroTotalAciertos == 3:\r\n aciertosNumero3Serie0 = aciertosNumero3Serie0 + 1\r\n elif numeroTotalAciertos == 4:\r\n aciertosNumero4Serie0 = aciertosNumero4Serie0 + 1\r\n elif numeroTotalAciertos == 5:\r\n aciertosNumero5Serie0 = aciertosNumero5Serie0 + 1\r\n elif numeroSerieAciertos == 1:\r\n aciertosSeries1 = aciertosSeries1 + 1\r\n numeroTotalAciertos = sumarRepeticionesDosArrays(arrayNumeroApuesta, numerosSorteo)\r\n if numeroTotalAciertos == 0:\r\n aciertosNumero0Serie1 = aciertosNumero0Serie1 + 1\r\n elif numeroTotalAciertos == 1:\r\n aciertosNumero1Serie1 = aciertosNumero1Serie1 + 1\r\n elif numeroTotalAciertos == 2:\r\n aciertosNumero2Serie1 = aciertosNumero2Serie1 + 1\r\n elif numeroTotalAciertos == 3:\r\n aciertosNumero3Serie1 = aciertosNumero3Serie1 + 1\r\n elif numeroTotalAciertos == 4:\r\n aciertosNumero4Serie1 = aciertosNumero4Serie1 + 1\r\n elif numeroTotalAciertos == 5:\r\n aciertosNumero5Serie1 = aciertosNumero5Serie1 + 1\r\n elif numeroSerieAciertos == 2:\r\n aciertosSeries2 = aciertosSeries2 + 1\r\n numeroTotalAciertos = sumarRepeticionesDosArrays(arrayNumeroApuesta, numerosSorteo)\r\n if numeroTotalAciertos == 0:\r\n aciertosNumero0Serie2 = aciertosNumero0Serie2 + 1\r\n elif numeroTotalAciertos == 1:\r\n aciertosNumero1Serie2 = aciertosNumero1Serie2 + 1\r\n elif numeroTotalAciertos == 2:\r\n aciertosNumero2Serie2 = aciertosNumero2Serie2 + 1\r\n elif numeroTotalAciertos == 3:\r\n aciertosNumero3Serie2 = aciertosNumero3Serie2 + 1\r\n elif numeroTotalAciertos == 4:\r\n aciertosNumero4Serie2 = aciertosNumero4Serie2 + 1\r\n elif numeroTotalAciertos == 5:\r\n aciertosNumero5Serie2 = aciertosNumero5Serie2 + 1\r\n\r\n\r\n\r\n # entonces imprimimos los resultados\r\n print('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')\r\n print('xxxxxxxRESULTADOSxxxxxxxxxxxxxxxxx')\r\n print('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')\r\n print( str(aciertosNumero0Serie0) + ' BOLETOS CON 0 ACIERTOS CON 0 SERIES ACERTADAS')\r\n print( str(aciertosNumero1Serie0) + ' BOLETOS CON 1 ACIERTOS CON 0 SERIES ACERTADAS')\r\n print( str(aciertosNumero2Serie0) + ' BOLETOS CON 2 ACIERTOS CON 0 SERIES ACERTADAS')\r\n print( str(aciertosNumero3Serie0) + ' BOLETOS CON 3 ACIERTOS CON 0 SERIES ACERTADAS')\r\n print( str(aciertosNumero4Serie0) + ' BOLETOS CON 4 ACIERTOS CON 0 SERIES ACERTADAS')\r\n print( str(aciertosNumero4Serie0) + ' BOLETOS CON 5 ACIERTOS CON 0 SERIES ACERTADAS')\r\n print('')\r\n print( str(aciertosNumero0Serie1) + ' BOLETOS CON 0 ACIERTOS CON 1 SERIES ACERTADAS')\r\n print( str(aciertosNumero1Serie1) + ' BOLETOS CON 1 ACIERTOS CON 1 SERIES ACERTADAS')\r\n print( str(aciertosNumero2Serie1) + ' BOLETOS CON 2 ACIERTOS CON 1 SERIES ACERTADAS')\r\n print( str(aciertosNumero3Serie1) + ' BOLETOS CON 3 ACIERTOS CON 1 SERIES ACERTADAS')\r\n print( str(aciertosNumero4Serie1) + ' BOLETOS CON 4 ACIERTOS CON 1 SERIES ACERTADAS')\r\n print( str(aciertosNumero4Serie1) + ' BOLETOS CON 5 ACIERTOS CON 1 SERIES ACERTADAS')\r\n print('')\r\n print( str(aciertosNumero0Serie2) + ' BOLETOS CON 0 ACIERTOS CON 2 SERIES ACERTADAS')\r\n print( str(aciertosNumero1Serie2) + ' BOLETOS CON 1 ACIERTOS CON 2 SERIES ACERTADAS')\r\n print( str(aciertosNumero2Serie2) + ' BOLETOS CON 2 ACIERTOS CON 2 SERIES ACERTADAS')\r\n print( str(aciertosNumero3Serie2) + ' BOLETOS CON 3 ACIERTOS CON 2 SERIES ACERTADAS')\r\n print( str(aciertosNumero4Serie2) + ' BOLETOS CON 4 ACIERTOS CON 2 SERIES ACERTADAS')\r\n print( str(aciertosNumero4Serie2) + ' BOLETOS CON 5 ACIERTOS CON 2 SERIES ACERTADAS')\r\n\r\n# fin funcion ########################################################\r\n\r\n# operaciones\r\nmiApuesta = introducirMiApuestaMultiple()\r\nresultadosEuroMILLON = crearResultadosEuromillon()\r\ncomprobarAciertosEuroMillones(miApuesta,resultadosEuroMILLON)\r\n","sub_path":"Listas/EuroMillon_ApuestaMultiple.py","file_name":"EuroMillon_ApuestaMultiple.py","file_ext":"py","file_size_in_byte":14894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"307864120","text":"#!/usr/bin/env python\n\nimport numpy as np\nfrom Sawyer_DynaGoalEnv_DAgger import demoEnv # REFACTOR TO REMOVE GOALS\nimport rospy\n# Leveraging demonstration is same as TD3\n\n\nfrom std_srvs.srv import Empty, EmptyRequest\nfrom running_mean_std import RunningMeanStd\nfrom SetupSummary import SummaryManager_HIRO as SummaryManager\nimport pickle\nimport os\nfrom collections import OrderedDict\nfrom state_action_space import *\n# intera imports \nfrom intera_interface import CHECK_VERSION\nfrom intera_interface import Limb\nfrom intera_interface import Gripper\n\nfrom tf_conversions import posemath\nfrom tf.msg import tfMessage\nfrom tf.transformations import quaternion_from_euler\nfrom intera_core_msgs.msg import (\n DigitalIOState,\n DigitalOutputCommand,\n IODeviceStatus\n)\n\nfrom geometry_msgs.msg import (\n PoseStamped,\n Pose,\n Point,\n Quaternion,\n)\n\nimport time as timer\nimport time\n\nfrom std_msgs.msg import Header\n# save running mean std for demo transition\nfrom SetupSummary import SummaryManager_HIRO as SummaryManager\nrms_path = '/home/irobot/catkin_ws/src/ddpg/scripts/ecsac_aux/'\ndemo_path = '/home/irobot/catkin_ws/src/ddpg/scripts/ecsac_aux/'\nMAN_BUF_FNAME = 'demo_manager_buffer.bin'\nCON_BUF_FNAME = 'demo_controller_buffer.bin'\n\nclass DemoReplayBuffer(object):\n \"\"\"\n A simple FIFO experience replay buffer for ECSAC + HIRO agents.\n For HIRO, we need seperate replay buffers for both manager and controller policies.\n low-level controller does not require state-action sequence\n high-level controller requires extra buffer for state-action \n \"\"\"\n\n def __init__(self, obs_dim, stt_dim, act_dim, aux_dim, size, manager=False):\n # 9 types of data in buffer\n self.obs_buf = np.zeros(shape=(size,)+ obs_dim, dtype=np.float32) # o_t, (-1,100,100,3)\n self.obs1_buf = np.zeros(shape=(size,)+ obs_dim, dtype=np.float32) # o_t+1, (-1,100,100,3)\n self.g_buf = np.zeros(shape=(size , stt_dim), dtype=np.float32) # g_t, (-1,21)\n self.g1_buf = np.zeros(shape=(size , stt_dim), dtype=np.float32) # g_t+1, (-1,21)\n self.stt_buf = np.zeros(shape=(size , stt_dim), dtype=np.float32) # s_t (-1,21), joint pos, vel, eff\n self.stt1_buf = np.zeros(shape=(size , stt_dim), dtype=np.float32) # s_t+1 for (-1,21), consists of pos, vel, eff self.act_buf = np.zeros(shape=(size, act_dim), dtype=np.float32) # A+t 3 dim for action\n if not manager:\n self.act_buf = np.zeros(shape=(size , act_dim), dtype=np.float32) # a_t (-1,8)\n else:\n self.act_buf = np.zeros(shape=(size , stt_dim), dtype=np.float32)\n self.rews_buf = np.zeros(shape=(size,), dtype=np.float32)\n self.done_buf = np.zeros(shape=(size,), dtype=np.float32)\n self.aux_buf = np.zeros(shape=(size , aux_dim), dtype=np.float32)\n self.aux1_buf = np.zeros( shape=(size , aux_dim), dtype=np.float32)\n self.ptr, self.size, self.max_size = 0, 0, size\n\n def store(self, obs, obs1, g, g1, stt, stt1, act, aux, aux1, rew, done, manager=False):\n \"\"\"store step transition in the buffer\n \"\"\"\n self.obs_buf[self.ptr] = obs\n self.obs1_buf[self.ptr] = obs1\n self.g_buf[self.ptr] = g\n self.g1_buf[self.ptr] = g1\n self.stt_buf[self.ptr] = stt\n self.stt1_buf[self.ptr] = stt1\n self.act_buf[self.ptr] = act\n self.rews_buf[self.ptr] = rew\n self.done_buf[self.ptr] = done\n self.aux_buf[self.ptr] = aux\n self.aux1_buf[self.ptr] = aux\n if not manager:\n self.ptr = (self.ptr+1) % self.max_size\n self.size = min(self.size+1, self.max_size)\n\n\n def get_episodic_subgoal(self, global_step, ep_len):\n \"\"\" Return the subgoal and fullstate batch of the episode.\n \n slicing index : buf[global_step - ep_len:global_step-1] => slice out only the transitions for this episode.\n \"\"\"\n return [self.stt_buf[global_step-ep_len:global_step], self.stt1_buf[global_step-ep_len:global_step], \n self.g_buf[global_step-ep_len:global_step], self.g1_buf[global_step-ep_len:global_step]]\n\n\n def return_buffer(self):\n \"\"\" Returns the whole numpy arrays containing demo transitions.\n \"\"\"\n return {'data':[self.obs_buf[:self.ptr], self.obs1_buf[:self.ptr], self.g_buf[:self.ptr], \n self.g1_buf[:self.ptr], self.stt_buf[:self.ptr], self.stt1_buf[:self.ptr],\n self.act_buf[:self.ptr], self.rews_buf[:self.ptr], self.done_buf[:self.ptr],\n self.aux_buf[:self.ptr], self.aux1_buf[:self.ptr]],\n 'size':self.size}\n\n\nclass DemoManagerReplayBuffer(DemoReplayBuffer):\n \"\"\"\n A simple FIFO experience replay buffer for ECSAC + HIRO agents.\n For HIRO, we need seperate replay buffers for both manager and controller policies.\n low-level controller does not require state-action sequence\n high-level controller requires extra buffer for state-action \n \"\"\"\n\n def __init__(self, obs_dim, stt_dim, act_dim, aux_dim, size, seq_len):\n \"\"\"full-state/ color_observation sequence lists' shape[1] is +1 longer than that of \n action seqence -> they are stored as s_t:t+c/o_t:t+c, while action is stored as a_t:t+c\n \"\"\"\n\n super(DemoManagerReplayBuffer, self).__init__(obs_dim, stt_dim, act_dim, aux_dim, size, manager=True)\n\n self.stt_seq_buf = np.zeros(shape=(size, seq_len+1, stt_dim), dtype=np.float32) # s_t (-1, 10+1, 21), joint pos, vel, eff\n self.obs_seq_buf = np.zeros(shape=(size, seq_len+1,)+ obs_dim, dtype=np.float32) # o_t, (-1, 10+1, 100, 100, 3)\n self.act_seq_buf = np.zeros(shape=(size, seq_len, act_dim), dtype=np.float32) # a_t (-1,10, 8)\n\n def store(self, stt_seq, obs_seq, act_seq, *args, **kwargs):\n \"\"\"store step transition in the buffer\n \"\"\"\n super(DemoManagerReplayBuffer, self).store(manager=True, *args, **kwargs)\n \n self.stt_seq_buf[self.ptr] = np.array(stt_seq)\n self.obs_seq_buf[self.ptr] = np.array(obs_seq)\n self.act_seq_buf[self.ptr] = np.array(act_seq)\n self.ptr = (self.ptr+1) % self.max_size\n self.size = min(self.size+1, self.max_size)\n\n def return_buffer(self):\n \"\"\" Returns the whole numpy arrays containing demo transitions, for manager transitions\n \"\"\"\n return {'data':[self.obs_buf[:self.ptr], self.obs1_buf[:self.ptr], self.g_buf[:self.ptr],\n self.g1_buf[:self.ptr], self.stt_buf[:self.ptr], self.stt1_buf[:self.ptr],\n self.act_buf[:self.ptr], self.rews_buf[:self.ptr], self.done_buf[:self.ptr],\n self.aux_buf[:self.ptr], self.aux1_buf[:self.ptr]],\n 'seq_data':[self.stt_seq_buf[:self.ptr], self.obs_seq_buf[:self.ptr], self.act_seq_buf[:self.ptr]],\n 'size':self.size}\n\n\ndef normalize(x, stats):\n if stats is None:\n return x\n return (x - stats.mean) / stats.std\n\n\n# def normalize_action(action_arr):\n# lb_array = ACTION_LOW_BOUND*np.ones(action_arr.shape)\n# hb_array = ACTION_HIGH_BOUND*np.ones(action_arr.shape)\n# _norm_action = lb_array + (action_arr+1.0*np.ones(action_arr.shape))*0.5*(hb_array - lb_array)\n# _norm_action = np.clip(_norm_action, lb_array, hb_array)\n# _norm_action = _norm_action.reshape(action_arr.shape)\n# return _norm_action\n\n\ndef randomize_world():\n \"\"\" Domain randomization for the environment's light and the color of robot link.\n \"\"\"\n rospy.wait_for_service('/dynamic_world_service') # randomize the light in the gazebo world\n dynamic_world_service_call = rospy.ServiceProxy('/dynamic_world_service', Empty)\n change_env_request = EmptyRequest()\n dynamic_world_service_call(change_env_request)\n\n # rospy.wait_for_service('/colorize_world_service') # randomize the model colors in the gazebo world\n # colorize_world_service_call = rospy.ServiceProxy('/colorize_world_service', Empty)\n # colorize_env_request = EmptyRequest()\n # colorize_world_service_call(colorize_env_request)\n\nif __name__ == '__main__':\n\n USE_CARTESIAN = True\n USE_GRIPPER = True\n IS_TRAIN = True # always true for DAgger script\n\n # define observation dimensions\n # for low_level controller\n obs_dim = (100, 100, 3) # for actor in POMDP\n stt_dim = 21# full_state of the robot (joint positions, velocities, and efforts) + ee position\n act_dim = 8 # 7 joint vels and gripper position \n aux_dim = 3 # target object's position\n # define joint vel_limits\n action_space = (-1.0, 1.0)\n ee_dim = 7 # 4 quaternion\n grip_dim = 1 # 1 dimension for the gripper position\n\n # for high_level controller\n des_goal_dim = 21 # (joint positions, velocities, and efforts) + ee position\n sub_goal_dim = 21 # (joint positions, velocities, and efforts) + ee position\n\n if USE_CARTESIAN: # append 7-dim\n stt_dim += ee_dim\n des_goal_dim += ee_dim\n sub_goal_dim += ee_dim\n\n if USE_GRIPPER: # append 1-dim\n stt_dim += grip_dim\n des_goal_dim += grip_dim\n sub_goal_dim += grip_dim\n\n # init node\n # rospy.init_node('hierarchical_DAgger')\n\n # demo quantity related\n total_epi = 1\n max_ep_len = 500\n total_steps = total_epi * max_ep_len\n buffer_size = int(1e4) # 50000 steps : is it enough?\n manager_propose_freq = 10\n\n isDemo = True\n ep_ret, ep_len = 0, 0\n t = 0 # counted steps [0:total_steps - 1]\n timesteps_since_manager = 0 # to count c-step elapse for training manager\n timesteps_since_subgoal = 0 # to count c-step elapse for subgoal proposal\n episode_num = 0 # incremental episode counter\n done = True\n reset = False\n manager_temp_transition = list() # temp manager transition\n\n # demoEnv inherits robotEnv\n env = demoEnv(max_steps=max_ep_len, control_mode='velocity', isPOMDP=True, isGripper=USE_GRIPPER, isCartesian=USE_CARTESIAN, train_indicator=IS_TRAIN)\n controller_buffer = DemoReplayBuffer(obs_dim=obs_dim, stt_dim=stt_dim, act_dim=act_dim, aux_dim=aux_dim, size=buffer_size)\n manager_buffer = DemoManagerReplayBuffer(obs_dim=obs_dim, stt_dim=stt_dim, act_dim=act_dim, aux_dim=aux_dim, size=buffer_size, seq_len=manager_propose_freq)\n obs_shape_list = [(100,100,3), (7), (7), (7), (1), (7), (3)]\n summary_manager = SummaryManager(obs_shape_list=obs_shape_list) # manager for rms\n\n # create instances for the arm and gripper\n limb = Limb()\n gripper = Gripper()\n\n def update_rms(full_stt=None, c_obs=None, aux=None, act=None):\n \"\"\"Update the mean/stddev of the running mean-std normalizers.\n Normalize full-state, color_obs, and auxiliary observation.\n Caution on the shape!\n \"\"\"\n summary_manager.s_t0_rms.update(c_obs) # c_obs\n summary_manager.s_t1_rms.update(full_stt[:7]) # joint_pos\n summary_manager.s_t2_rms.update(full_stt[7:14]) # joint_vel\n summary_manager.s_t3_rms.update(full_stt[14:21]) # joint_eff\n summary_manager.s_t4_rms.update(full_stt[21:22]) # gripper_position\n summary_manager.s_t5_rms.update(full_stt[22:]) # ee_pose\n summary_manager.s_t6_rms.update(aux) # aux\n summary_manager.a_t_rms.update(act) # ee_pose\n\n\n def load_rms():\n rospy.logwarn('Loads the mean and stddev for test time')\n summary_manager.s_t0_rms.load_mean_std(rms_path+'mean_std0_demo.bin')\n summary_manager.s_t1_rms.load_mean_std(rms_path+'mean_std1_demo.bin')\n summary_manager.s_t2_rms.load_mean_std(rms_path+'mean_std2_demo.bin')\n summary_manager.s_t3_rms.load_mean_std(rms_path+'mean_std3_demo.bin')\n summary_manager.s_t4_rms.load_mean_std(rms_path+'mean_std4_demo.bin')\n summary_manager.s_t5_rms.load_mean_std(rms_path+'mean_std5_demo.bin')\n summary_manager.s_t6_rms.load_mean_std(rms_path+'mean_std6_demo.bin')\n summary_manager.a_t_rms.load_mean_std(rms_path+'mean_std7_demo.bin')\n \n\n def save_rms(step):\n rospy.logwarn('Saves the mean and stddev @ step %d', step)\n summary_manager.s_t0_rms.save_mean_std(rms_path+'mean_std0_demo.bin')\n summary_manager.s_t1_rms.save_mean_std(rms_path+'mean_std1_demo.bin')\n summary_manager.s_t2_rms.save_mean_std(rms_path+'mean_std2_demo.bin')\n summary_manager.s_t3_rms.save_mean_std(rms_path+'mean_std3_demo.bin')\n summary_manager.s_t4_rms.save_mean_std(rms_path+'mean_std4_demo.bin')\n summary_manager.s_t5_rms.save_mean_std(rms_path+'mean_std5_demo.bin')\n summary_manager.s_t6_rms.save_mean_std(rms_path+'mean_std6_demo.bin')\n summary_manager.a_t_rms.save_mean_std(rms_path+'mean_std7_demo.bin')\n \n\n def get_demo_temp_subgoal():\n \"\"\" return the temporary subgoal for demo transition.\n \"\"\"\n return np.zeros(sub_goal_dim)\n\n\n def demo_manager_sg_transition(manager_transition):\n \"\"\" This function is called every C-step.\n replace the temp subgoals in temp manager buffer with expert subgoals for the rollout batches.\n The demo agent achieves the subgoal for every step, i.e. h = s + g - s'\n g_t == s_t+c (action of the manager)\n *manager_transition :\n [s_seq, o_seq, a_seq, obs, obs_1, dg, dg_1, stt, stt_1, act, aux, aux_1, rew, done]\n replace the temp. subgoal (a_t of the manager) with s_t+c-1\n \"\"\"\n manager_transition[-5] = manager_transition[0][-1]\n return manager_transition\n\n\n def demo_controller_sg_transition(controller_buffer, global_step, ep_len):\n \"\"\" This function is called at the end of each episode.\n *controller_buffer :\n controller_buffer.store(c_obs, next_c_obs, subgoal, \n next_subgoal, full_stt, next_full_stt, action, aux, next_aux, intrinsic_reward, done)\n subgoal for the controller -> for every C-step sequence,\n * h = s + g - s'\n g_t == s_t+c, g' = h(s,g,s'), g'' = h(s',g',s'') ...\n *don't do subgoal_transition and intrinsic reward computation while doing rollouts. \n controller transition (s_t||s_t+c & s_t+1||s_t+c+1) -> TD learning\n in terms of controller -> g_t:t+c-1\n index : 2 & 3\n \"\"\"\n # s_t+c should be the g_t for s_t. (e.g. s_10 == g_0 -> induces new proposal)\n sb, s1b, gb, g1b = controller_buffer.get_episodic_subgoal(global_step, ep_len) # returns each batch of the transition e\n # ep_len - ep_len % manager_propose_freq\n # Example : if an episode is 647 length, iterate till 640 (idx 639). Then, gb[640:646] should all be the terminal state. \n # 1. replace the subgoal proposals in 'gb'\n\n remainder = ep_len % manager_propose_freq\n for idx in range(0, ep_len - remainder - manager_propose_freq, manager_propose_freq): # iterate until the full proposal period is met.\n gb[idx] = s1b[idx + manager_propose_freq - 1] # s1b[idx + manager_propose_freq - 1] has the s_(idx + manager_propose_freq)\n for i in range(1, manager_propose_freq): #[t+1:t+c-1]\n gb[idx + i] = env.env_goal_transition(sb[idx + i], s1b[idx + i], gb[idx])\n # 2. fill ther remaining transitions with terminal state observations\n # here, gb[-1], gb[-2], ... gb[-7] = sT in example. \n sT = s1b[-1]\n for idx in range(1,remainder + 1):\n gb[-idx] = sT\n # 3. copy the gb into g1b, with the index offset of 1. Then the last element of g1b is sT.\n g1b[:-1] = gb[1:]\n g1b[-1] = sT\n \n\n def get_action():\n \"\"\" return the action inference from the external controller.\n \"\"\"\n return env._get_demo_action()\n \n\n # divide the loop into two phase.\n # 1. rollout (collecting normal transition data (s, a, r, s', d))\n\n while not rospy.is_shutdown() and t task specific goal!\n done = False\n reset = False\n ep_len = 0 # length of the episode\n ep_ret = 0 # episode return for the manager\n ep_low_ret = 0 # return of the intrinsic reward for low level controller \n episode_num += 1 # for every env.reset()\n\n # process observations\n full_stt = np.concatenate(obs['observation']['full_state'], axis=0) # s_0\n c_obs = obs['observation']['color_obs'] #o_0\n des_goal = np.concatenate(obs['desired_goal']['full_state'], axis=0) # g_des\n aux = obs['auxiliary'] # g_des\n\n # infer subgoal for low-level policy\n # args for get_subgoal : obs, next_obs, sub_goal\n subgoal = get_demo_temp_subgoal() # action_dim = (1, stt_dim) -> defaults to 25-dim\n timesteps_since_subgoal = 0\n # apply noise on the subgoal\n # create a temporal high-level transition : requires off-policy correction\n # buffer store arguments : s_seq, o_seq, a_seq, obs, obs_1, dg, dg_1, stt, stt_1, act, aux, aux_1, rew, done \n manager_temp_transition = [[full_stt], [c_obs], [], c_obs, None, des_goal, des_goal, full_stt, None, subgoal, aux, None, 0, False]\n\n action = get_action() # a_t\n # TODO: make action on the gripper as categorical policy\n # action[-1] = reloc_rescale_gripper(action[-1])\n next_obs, manager_reward, done = env.step_demo(action, time_step=ep_len) # reward R_t-> for high-level manager -> for sum(R_t:t+c-1)\n randomize_world()\n # update episodic logs\n # Ignore the \"done\" signal if it comes from hitting the time\n # horizon (that is, when it's an artificial terminal signal\n # that isn't based on the agent's state) -> if done = False for the max_timestep\n # DO NOT Make done = True when it hits timeout\n ep_ret += manager_reward # reward in terms of achieving episodic task\n done = False if ep_len== max_ep_len else done\n if done:\n rospy.logwarn('=============== Now epsiode %d ends with done signal! ====================', episode_num)\n next_full_stt = np.concatenate(next_obs['observation']['full_state']) # s_t\n next_c_obs = next_obs['observation']['color_obs'] #o_t\n next_aux = obs['auxiliary'] # g_des\n\n # append manager transition\n manager_temp_transition[-1] = float(True)\n manager_temp_transition[-2] += manager_reward # sum(R_t:t+c)\n manager_temp_transition[0].append(next_full_stt) # append s_seq\n manager_temp_transition[1].append(next_c_obs) # append o_seq\n manager_temp_transition[2].append(action) # append a_seq \n # compute intrinsic reward\n intrinsic_reward = env.compute_intrinsic_reward(full_stt, next_full_stt, subgoal)\n\n # subgoal transition\n next_subgoal = env.env_goal_transition(full_stt, next_full_stt, subgoal)\n # add transition for low-level policy\n # (obs, obs1, sg, sg1, stt, stt1, act, aux, rew, done)\n controller_buffer.store(c_obs, next_c_obs, subgoal, \n next_subgoal, full_stt, next_full_stt, action, aux, next_aux, intrinsic_reward, done)\n # update observations and subgoal\n obs = next_obs\n subgoal = next_subgoal\n aux = next_aux\n\n # update logging steps\n ep_len += 1\n t +=1\n timesteps_since_manager += 1\n timesteps_since_subgoal += 1\n\n if timesteps_since_subgoal % manager_propose_freq == 0:\n # for every c-step, renew the subgoal estimation from the manager policy.\n timesteps_since_subgoal = 0\n manager_temp_transition[4] = c_obs # save o_t+c\n manager_temp_transition[8] = full_stt # save s_t+c\n manager_temp_transition[11] = aux # save aux_t+c\n manager_temp_transition[-1] = float(True) # done = True for manager, regardless of the episode \n \n # intentional seq appending is not required here since it always satisfies c step.\n\n # rospy.logwarn('Debug manager transitions. Debug manager transitions.')\n # print (manager_temp_transition[2])\n # rospy.logwarn('Debug manager transitions. Debug manager transitions.')\n manager_buffer.store(*manager_temp_transition)\n subgoal = get_demo_temp_subgoal() # action_dim = (1, stt_dim) -> defaults to 25-dim\n # Create a high level transition : note that the action of manager policy is subgoal\n # buffer store arguments : s_seq, o_seq, a_seq, obs, obs_1, dg, dg_1, stt, stt_1, act, aux, aux_1, rew, done \n manager_temp_transition = [[full_stt], [c_obs], [], c_obs, None, des_goal, des_goal, full_stt, None, subgoal, aux, None, 0, False]\n # update running mean-std normalizer\n update_rms(full_stt=full_stt, c_obs=c_obs, aux=aux, act=action) # do it.\n\n # if all the demo episodes have ended.\n\n os.chdir(demo_path)\n if os.path.exists(MAN_BUF_FNAME):\n print ('Deletes the manger buffer')\n ans = input(\"Delete the manager buffer? '1' / '0' \")\n rospy.logwarn('=======================================================')\n if ans == 1:\n os.remove(MAN_BUF_FNAME)\n if os.path.exists(CON_BUF_FNAME):\n print ('Deletes the controller buffer')\n ans = input(\"Delete the controller buffer? '1' / '0' \")\n if ans == 1:\n os.remove(CON_BUF_FNAME)\n rospy.logwarn('=======================================================')\n\n print ('Now saves the manager buffer in pickle format')\n with open (MAN_BUF_FNAME, 'wb') as f: \n pickle.dump(manager_buffer.return_buffer(), f)\n print ('Now saves the controller buffer in pickle format')\n with open (CON_BUF_FNAME, 'wb') as f2: \n pickle.dump(controller_buffer.return_buffer(), f2)\n\n\n ","sub_path":"hierarchical_DAgger.py","file_name":"hierarchical_DAgger.py","file_ext":"py","file_size_in_byte":24203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"229615194","text":"import math\nfrom coldtype.beziers import CurveCutter, splitCubicAtT\n\n\ndef bend(pen, curve, tangent=True):\n def _bend(pen):\n cc = CurveCutter(curve)\n ccl = cc.length\n dpl = pen.bounds().point(\"SE\").x\n xf = ccl/dpl\n\n def bender(x, y):\n p, tan = cc.subsegmentPoint(end=x*xf)\n px, py = p\n if tangent:\n a = math.sin(math.radians(180+tan)) * y\n b = math.cos(math.radians(180+tan)) * y\n return (px+a, py+b)\n #return (px, y+py)\n else:\n return (px, y+py)\n return pen.nonlinear_transform(bender)\n return _bend\n\n\ndef bend2(curve, tangent=True, offset=(0, 1)):\n def _bend(pen):\n bw = pen.bounds().w\n a = curve.value[0][-1][0]\n b, c, d = curve.value[1][-1]\n def bender(x, y):\n c1, c2 = splitCubicAtT(a, b, c, d, offset[0] + (x/bw)*offset[1])\n _, _a, _b, _c = c1\n if tangent:\n tan = math.degrees(math.atan2(_c[1] - _b[1], _c[0] - _b[0]) + math.pi*.5)\n ax = math.sin(math.radians(90-tan)) * y\n by = math.cos(math.radians(90-tan)) * y\n return _c[0]+ax, (y+_c[1])+by\n return _c[0], y+_c[1]\n return pen.nonlinear_transform(bender)\n return _bend\n\n\ndef bend3(curve, tangent=False, offset=(0, 1)):\n def _bend(pen):\n a = curve.value[0][-1][0]\n b, c, d = curve.value[1][-1]\n bh = pen.bounds().h\n \n def bender(x, y):\n c1, c2 = splitCubicAtT(a, b, c, d, offset[0] + (y/bh)*offset[1])\n _, _a, _b, _c = c1\n if tangent:\n tan = math.degrees(math.atan2(_c[1] - _b[1], _c[0] - _b[0]) + math.pi*.5)\n ax = math.sin(math.radians(90-tan)) * y\n by = math.cos(math.radians(90-tan)) * y\n return x+_c[0]+ax, (y+_c[1])+by\n return x+_c[0], _c[1]\n return pen.nonlinear_transform(bender)\n return _bend","sub_path":"coldtype/fx/warping.py","file_name":"warping.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"357518764","text":"\"\"\"empty message\n\nRevision ID: 66eb78fa61a0\nRevises: 1ffb6ceffcb8\nCreate Date: 2019-02-15 23:48:26.494307\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '66eb78fa61a0'\ndown_revision = '1ffb6ceffcb8'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('awards_db',\n sa.Column('awards_id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=128), nullable=True),\n sa.Column('password_hash', sa.String(length=128), nullable=True),\n sa.Column('awarding_body', sa.String(length=21), nullable=True),\n sa.Column('awarding_details', sa.String(length=100), nullable=True),\n sa.Column('member_name', sa.String(length=21), nullable=True),\n sa.Column('year', sa.Integer(), nullable=True),\n sa.Column('research_Profile', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['research_Profile'], ['researcher_profile.researcher_id'], ),\n sa.PrimaryKeyConstraint('awards_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('awards_db')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/66eb78fa61a0_.py","file_name":"66eb78fa61a0_.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"249240656","text":"# ------------------------------------\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n# ------------------------------------\nfrom typing import Any, Callable, Optional\n\nfrom azure.core.credentials import AccessToken\nfrom .._internal import AadClient, AsyncContextManager\nfrom .._internal.get_token_mixin import GetTokenMixin\n\n\nclass ClientAssertionCredential(AsyncContextManager, GetTokenMixin):\n \"\"\"Authenticates a service principal with a JWT assertion.\n\n This credential is for advanced scenarios. :class:`~azure.identity.CertificateCredential` has a more\n convenient API for the most common assertion scenario, authenticating a service principal with a certificate.\n\n :param str tenant_id: ID of the principal's tenant. Also called its \"directory\" ID.\n :param str client_id: The principal's client ID\n :param func: A callable that returns a string assertion. The credential will call this every time it\n acquires a new token.\n :paramtype func: Callable[[], str]\n\n :keyword str authority: Authority of an Azure Active Directory endpoint, for example\n \"login.microsoftonline.com\", the authority for Azure Public Cloud (which is the default).\n :class:`~azure.identity.AzureAuthorityHosts` defines authorities for other clouds.\n :keyword List[str] additionally_allowed_tenants: Specifies tenants in addition to the specified \"tenant_id\"\n for which the credential may acquire tokens. Add the wildcard value \"*\" to allow the credential to\n acquire tokens for any tenant the application can access.\n\n .. admonition:: Example:\n\n .. literalinclude:: ../samples/credential_creation_code_snippets.py\n :start-after: [START create_client_assertion_credential_async]\n :end-before: [END create_client_assertion_credential_async]\n :language: python\n :dedent: 4\n :caption: Create a ClientAssertionCredential.\n \"\"\"\n\n def __init__(self, tenant_id: str, client_id: str, func: Callable[[], str], **kwargs: Any) -> None:\n self._func = func\n self._client = AadClient(tenant_id, client_id, **kwargs)\n super().__init__(**kwargs)\n\n async def __aenter__(self):\n await self._client.__aenter__()\n return self\n\n async def close(self) -> None:\n \"\"\"Close the credential's transport session.\"\"\"\n await self._client.close()\n\n async def _acquire_token_silently(self, *scopes: str, **kwargs: Any) -> Optional[AccessToken]:\n return self._client.get_cached_access_token(scopes, **kwargs)\n\n async def _request_token(self, *scopes: str, **kwargs: Any) -> AccessToken:\n assertion = self._func()\n token = await self._client.obtain_token_by_jwt_assertion(scopes, assertion, **kwargs)\n return token\n","sub_path":"sdk/identity/azure-identity/azure/identity/aio/_credentials/client_assertion.py","file_name":"client_assertion.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"335974199","text":"# encoding: utf-8\n\n# Author: Zhang Huangbin \n\nimport web\nfrom libs import languages, iredutils\nfrom libs.mysql import decorators, admin as adminlib, domain as domainlib\n\ncfg = web.iredconfig\nsession = web.config.get('_session')\n\nclass List:\n @decorators.require_global_admin\n @decorators.require_login\n def GET(self, cur_page=1):\n i = web.input()\n cur_page = int(cur_page)\n\n if cur_page == 0:\n cur_page == 1\n\n adminLib = adminlib.Admin()\n result = adminLib.listAccounts(cur_page=cur_page)\n if result[0] is True:\n (total, records) = (result[1], result[2])\n\n # Get list of global admins.\n allGlobalAdmins = []\n qr = adminLib.getAllGlobalAdmins()\n if qr[0] is True:\n allGlobalAdmins = qr[1]\n\n return web.render(\n 'mysql/admin/list.html',\n cur_page=cur_page,\n total=total,\n admins=records,\n allGlobalAdmins=allGlobalAdmins,\n msg=i.get('msg', None),\n )\n else:\n return web.seeother('/domains?msg=%s' % result[1])\n\n @decorators.require_global_admin\n @decorators.require_login\n def POST(self):\n i = web.input(_unicode=False, mail=[])\n\n self.mails = i.get('mail', [])\n self.action = i.get('action', None)\n msg = i.get('msg', None)\n\n adminLib = adminlib.Admin()\n\n if self.action == 'delete':\n result = adminLib.delete(mails=self.mails,)\n msg = 'DELETED_SUCCESS'\n elif self.action == 'disable':\n result = adminLib.enableOrDisableAccount(accounts=self.mails, active=False,)\n msg = 'DISABLED_SUCCESS'\n elif self.action == 'enable':\n result = adminLib.enableOrDisableAccount(accounts=self.mails, active=True,)\n msg = 'ENABLED_SUCCESS'\n else:\n result = (False, 'INVALID_ACTION')\n\n if result[0] is True:\n return web.seeother('/admins?msg=%s' % msg)\n else:\n return web.seeother('/admins?msg=?' + result[1])\n\nclass Profile:\n @decorators.require_login\n def GET(self, profile_type, mail):\n i = web.input()\n self.mail = web.safestr(mail)\n self.profile_type = web.safestr(profile_type)\n\n if not iredutils.isEmail(self.mail):\n return web.seeother('/admins?msg=INVALID_MAIL')\n\n if session.get('domainGlobalAdmin') is not True and session.get('username') != self.mail:\n # Don't allow to view/update other admins' profile.\n return web.seeother('/profile/admin/general/%s?msg=PERMISSION_DENIED' % session.get('username'))\n\n adminLib = adminlib.Admin()\n result = adminLib.profile(mail=self.mail)\n\n if result[0] is True:\n domainGlobalAdmin, profile = result[1], result[2]\n\n # Get all domains.\n self.allDomains = []\n\n domainLib = domainlib.Domain()\n resultOfAllDomains = domainLib.getAllDomains()\n if resultOfAllDomains[0] is True:\n self.allDomains = resultOfAllDomains[1]\n\n # Get managed domains.\n self.managedDomains = []\n\n qr = adminLib.getManagedDomains(admin=self.mail, domainNameOnly=True, listedOnly=True,)\n if qr[0] is True:\n self.managedDomains += qr[1]\n\n return web.render(\n 'mysql/admin/profile.html',\n mail=self.mail,\n profile_type=self.profile_type,\n domainGlobalAdmin=domainGlobalAdmin,\n profile=profile,\n languagemaps=languages.getLanguageMaps(),\n allDomains=self.allDomains,\n managedDomains=self.managedDomains,\n min_passwd_length=cfg.general.get('min_passwd_length', '0'),\n max_passwd_length=cfg.general.get('max_passwd_length', '0'),\n msg=i.get('msg'),\n )\n else:\n return web.seeother('/admins?msg=' + result[1])\n\n\n @decorators.require_login\n def POST(self, profile_type, mail):\n self.profile_type = web.safestr(profile_type)\n self.mail = web.safestr(mail)\n i = web.input(domainName=[],)\n\n if session.get('domainGlobalAdmin') is not True and session.get('username') != self.mail:\n # Don't allow to view/update others' profile.\n return web.seeother('/profile/admin/general/%s?msg=PERMISSION_DENIED' % session.get('username'))\n\n adminLib = adminlib.Admin()\n result = adminLib.update(\n profile_type=self.profile_type,\n mail=self.mail,\n data=i,\n )\n\n if result[0] is True:\n return web.seeother('/profile/admin/%s/%s?msg=PROFILE_UPDATED_SUCCESS' % (self.profile_type, self.mail))\n else:\n return web.seeother('/profile/admin/%s/%s?msg=%s' % (self.profile_type, self.mail, result[1],))\n\n\nclass Create:\n @decorators.require_global_admin\n @decorators.require_login\n def GET(self):\n i = web.input()\n return web.render(\n 'mysql/admin/create.html',\n languagemaps=languages.getLanguageMaps(),\n default_language=cfg.general.get('lang', 'en_US'),\n min_passwd_length=cfg.general.get('min_passwd_length'),\n max_passwd_length=cfg.general.get('max_passwd_length'),\n msg=i.get('msg'),\n )\n\n @decorators.require_global_admin\n @decorators.require_login\n def POST(self):\n i = web.input()\n self.mail = web.safestr(i.get('mail'))\n\n adminLib = adminlib.Admin()\n result = adminLib.add(data=i)\n\n if result[0] is True:\n # Redirect to assign domains.\n return web.seeother('/profile/admin/general/%s?msg=CREATED_SUCCESS' % self.mail)\n else:\n return web.seeother('/create/admin?msg=' + result[1])\n\n\n","sub_path":"controllers/mysql/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":5983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"45770409","text":"import base64\nfrom odoo import http\nfrom odoo.http import request\n\nfrom odoo.addons.website.controllers.main import Website\n\nclass Main(Website):\n\n\t#homepage\n\t\"\"\"@http.route('/', type='http', auth='public', website=True)\n\tdef index(self, **kw):\n\t\treturn request.render('lhr_portal.accueil', {} )\"\"\"\n\t@http.route('/page', auth=\"public\", website=True)\n\tdef blank(self):\n\t\treturn request.render('lhr_portal.blank', {})\n\n\n\t@http.route('//formulaire-contact', auth=\"public\", website=True, csrf=False)\n\tdef formulaire_devis(self, lang=None, **post):\n\t\t#récupération des pays\n\t\tcountry_environment = request.env['res.country'] \n\t\tcountries = country_environment.sudo().search([])\n\t\tlanguage = \"fr\" if lang == \"fr_FR\" else \"en\" if lang == \"en_EN\" else \"pt\"\n\t\ttitle = \"Demande de devis\" if language == \"fr\" else \"Quote request\" if language == \"en\" else \"Orçamento personalizado\"\n\t\treturn request.render('lhr_portal.create_operation', { 'countries' : countries, 'lang':language, 'source':post.get('source'), 'title':title,} )\n\n\n\t@http.route('/success', type='http', auth='public', website=True)\n\tdef create_devis(self, **post):\n\n\t\tcontact_environment = request.env['res.partner']\n\n\t\t#do we have to create this contact\n\t\tfullname = ' '.join([post.get('lastname'), post.get('firstname')])\n\n\t\tdomain = ['&', ('name', '=', fullname), ('email', '=', post.get('email'))]\n\t\texisting_contact = contact_environment.sudo().search(domain)\n\n\t\tcontact_id = 0\n\n\t\tif not existing_contact :\n\t\t\tcontact = contact_environment.sudo().create({\n\t\t\t\t'name': fullname,\n\t\t\t\t'phone': post.get('phone'),\n\t\t\t\t'mobile': post.get('mobile'),\n\t\t\t\t'street': post.get('street'),\n\t\t\t\t'zip': post.get('zip'),\n\t\t\t\t'city': post.get('city'),\n\t\t\t\t'email': str(post.get('email')),\n\t\t\t\t'country_id' : int(post.get('country')),\n\t\t\t\t'm_gender': 'man' if str(post.get('gender')) == 'man' else 'woman',\n\t\t\t\t'm_years_old': int(post.get('yo')),\n\t\t\t\t'm_language': post.get('lang'),\n\t\t\t\t'm_graft': False if str(post.get('grafted')) == \"no\" else True,\n\t\t\t\t'm_last_intervention': False if str(post.get('grafted')) == \"no\" else True,\n\t\t\t\t'm_intervention_type': 'fue' if str(post.get('grafted')) == \"fue\" else \"fut\" if str(post.get('grafted')) == \"fut\" else \"\",\n\t\t\t})\n\t\t\tcontact_id = int(contact.id)\n\t\telse :\n\t\t\tcontact_id = int(existing_contact.id)\n\n\t\t#determine baldness degree\n\t\tbaldness_environment = request.env['graft.baldness']\n\t\tdomain = ['&', ('m_gender', '=', post.get('gender')), ('m_case', '=', post.get('case'))]\n\t\tbaldness_id = baldness_environment.sudo().search(domain)\n\n\t\t#determine origin\n\t\torigin = \"\"\n\t\tif post.get('source', False):\n\t\t\t_website = str(post.get('source')).split('.')[1]\n\t\t\torigin = \"jalis\" if _website == \"lisboahair\" else \"arpega\" if _website == \"lisboa-hair\" else \"ehi\" if _website == \"ehi-company\" else \"\"\n\n\t\t#then create new operation with status\n\t\toperation_environment = request.env['graft.operation']\n\t\toperation = operation_environment.sudo().create({\n\t\t\t'm_patient': contact_id,\n\t\t\t'm_message': post.get('message'),\n\t\t\t'm_baldness': int(baldness_id),\n\t\t\t'm_patient_origin' : origin, \n\t\t\t'm_donor_neck_filename' : str(post.get('donor_neck').filename) if post.get('donor_neck',False) else None,\n\t\t\t'm_donor_neck' : base64.b64encode(post.get('donor_neck').read()) if post.get('donor_neck',False) else None,\n\t\t\t'm_donor_side_filename' : str(post.get('donor_side').filename) if post.get('donor_side',False) else None,\n\t\t\t'm_donor_side' : base64.b64encode(post.get('donor_side').read()) if post.get('donor_side',False) else None,\n\t\t\t'm_treat_face_filename' : str(post.get('treat_face').filename) if post.get('treat_face',False) else None,\n\t\t\t'm_treat_face' : base64.b64encode(post.get('treat_face').read()) if post.get('treat_face',False) else None,\n\t\t\t'm_treat_side_filename' : str(post.get('treat_side').filename) if post.get('treat_side',False) else None,\n\t\t\t'm_treat_side' : base64.b64encode(post.get('treat_side').read()) if post.get('treat_side',False) else None,\n\t\t\t'm_treat_top_filename' : str(post.get('treat_top').filename) if post.get('treat_top',False) else None,\n\t\t\t'm_treat_top' : base64.b64encode(post.get('treat_top').read()) if post.get('treat_top',False) else None,\n\t\t})\n\t\treturn request.render('lhr_portal.success', {'source':post.get('source'),'lang':post.get('lang'),} )","sub_path":"lhr_portal/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"300575700","text":"'''\nUnit test the classes in elementy_types module\n\nOnly contains tests for ArrayType since the remaining array_types classes\nare trivial. They are tested in the integrated_tests\n'''\n\nimport numpy as np\n\nfrom gnome.basic_types import world_point_type, oil_status, \\\n status_code_type\n\nfrom gnome.array_types import ArrayType\n\n\nclass TestArrayType_eq(object):\n\n \"\"\"\n contains functions that test __eq__ for ArrayType object\n \"\"\"\n\n def test_eq_wrong_shape(self):\n \"\"\" array shape is different for two ArrayType objects \"\"\"\n\n positions = ArrayType((), world_point_type)\n positions2 = ArrayType((3, ), world_point_type)\n assert positions != positions2\n\n def test_eq_wrong_dtype(self):\n \"\"\" dtype is different for two ArrayType objects \"\"\"\n\n positions = ArrayType((3, ), world_point_type)\n positions2 = ArrayType((3, ), np.int)\n assert positions != positions2 # wrong dtype\n\n def test_eq_wrong_init_value(self):\n \"\"\" initial_value is different for two ArrayType objects \"\"\"\n\n status_codes = ArrayType((), status_code_type,\n oil_status.in_water)\n status_codes2 = ArrayType((), status_code_type)\n assert status_codes != status_codes2 # no init conditions\n\n def test_eq_wrong_attr(self):\n \"\"\" added an attribute so two ArrayType objects are diffferent \"\"\"\n\n positions = ArrayType((), world_point_type)\n positions2 = ArrayType((3, ), world_point_type)\n positions2.test = 'test'\n assert positions != positions2 # wrong number of attributes\n\n def test_eq(self):\n \"\"\" both ArrayType objects are the same \"\"\"\n\n positions = ArrayType((3, ), world_point_type)\n positions2 = ArrayType((3, ), world_point_type)\n assert positions == positions2 # wrong shape\n\n\n","sub_path":"py_gnome/tests/unit_tests/test_array_types.py","file_name":"test_array_types.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"186504504","text":"#!/usr/bin/env python3\n# This is a light wrapper around https://github.com/ttscoff/gather-cli\n# It does not overwrite the file if it already exists.\n# And it appends the current date / time of download as a comment to the end of the file.\nimport argparse\nimport datetime\nimport os\nimport re\nimport subprocess\n\nHOME = os.environ.get(\"HOME\")\n\n\nclass Settings:\n DEBUG = False\n GATHER_DIR = f\"{HOME}/x/_gather/\"\n\n\ndef run(cmd: list[str]) -> str:\n _cmd = \" \".join(cmd)\n if Settings.DEBUG:\n print(\"------------\")\n print(\"running:\")\n print(_cmd)\n result = subprocess.run(cmd, capture_output=True, text=True)\n if result.returncode == 1:\n print(f\"[ERROR]: {_cmd}\")\n print(result.stdout)\n raise SystemExit\n if Settings.DEBUG:\n print(result.stdout)\n return result.stdout\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"\"\"Download and save markdown for the URL in the clipboard using gather.\n \"\"\"\n )\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n args = parser.parse_args()\n Settings.debug = args.verbose\n\n filename = \"%slug.md\"\n tmp_path = f\"{HOME}/tmp/{filename}\"\n final_dir = Settings.GATHER_DIR\n\n cmd = [\"gather\", \"-p\", \"--metadata-yaml\", \"-f\", tmp_path]\n out = run(cmd)\n\n # grab the actual tmp_path since filename uses %s\n m = re.search(r\"Saved to file: ([\\S]+)$\", out)\n fn = None\n if m:\n tmp_path = m.group(1)\n # grab the actual markdown filename off th tmp_path since filename uses %s\n fn = re.search(r\"([^\\/]+$)\", tmp_path).group(1)\n # only move to final destination if target file does not already exist\n cmd = [\"mv\", \"-n\", tmp_path, final_dir]\n run(cmd)\n else:\n print(\"[ERROR] No tmp file........\")\n raise SystemExit(1)\n\n # cleanup tmp file\n if os.path.exists(tmp_path):\n os.remove(tmp_path)\n\n # append downloaded date as comment to end of file\n dt = datetime.datetime.now().strftime(\"%Y-%m-%d--%H-%M\")\n msg = f\"\\n\"\n final_path = os.path.join(final_dir, fn)\n with open(final_path, \"a\") as f:\n f.write(msg)\n\n print(fn)\n cmd = [\"code\", final_path]\n run(cmd)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bin/dl-markdown.py","file_name":"dl-markdown.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"620510526","text":"import PySimpleGUI as psg\nimport datetime\n\n\n\nnull = ''\nspaces = ' '\nupdate = '09/18/2021'\nmoney_sign = ''\nmonth = datetime.datetime.today().month\n\n\n\nmonth_names = {\n 1 : 'January',\n 2 : 'February',\n 3 : 'March',\n 4 : 'April',\n 5 : 'May',\n 6 : 'June',\n 7 : 'July',\n 8 : 'August',\n 9 : 'September',\n 10 : 'October',\n 11 : 'November',\n 12 : 'December'\n}\n\n\n\ndef main():\n global money_sign\n\n background = '#D8D8D8'\n\n psg.SetOptions(background_color = background,\n element_background_color = background,\n text_element_background_color = background,\n window_location = (640, 480),\n margins=(5,5),\n text_color = 'Black',\n input_text_color = 'Black',\n button_color = ('Black', 'gainsboro'))\n\n layout = [\n [psg.Text('Made By : Alexandre0911@github.com'), psg.Text('{}Last Update Released : {}'.format(spaces*42, update))],\n [psg.Text('Amount Of Money At The Start Of The Month', size=(33)), psg.InputText(size=(42)), psg.Text('€ / $')],\n [psg.Text('Basic Necessities Money Percentage', size=(33)), psg.InputText(size=(42)), psg.Text('%')],\n [psg.Text('Free Time Money Percentage ', size=(33)), psg.InputText(size=(42)), psg.Text('%')],\n [psg.Text('Financial Liberty Money Percentage ', size=(33)), psg.InputText(size=(42)), psg.Text('%')],\n [psg.Text('Long-Term Expenses Money Percentage', size=(33)), psg.InputText(size=(42)), psg.Text('%')],\n [psg.Text('Financial Instruction Money Percentage', size=(33)), psg.InputText(size=(42)), psg.Text('%')],\n [psg.Text('Donations Money Percentage', size=(33)), psg.InputText(size=(42)), psg.Text('%')],\n [psg.Text('Investments Money Percentage', size=(33)), psg.InputText(size=(42)), psg.Text('%')],\n [psg.Text('Select Currency >>>'), psg.Button('Dollars'), psg.Text('-'), psg.Button('Euros')],\n [psg.Button('Do The Math!'), psg.Text(' '*113), psg.Button('Cancel')]\n ]\n\n window = psg.Window('Money Manager v1.1', layout)\n\n\n\n while True:\n\n event, values = window.read()\n\n if event == psg.WIN_CLOSED or event == 'Cancel':\n\n break\n\n elif event == 'Dollars':\n\n money_sign = ' $'\n psg.PopupOK(' Currency was set to Dollars.', keep_on_top=True)\n\n elif event == 'Euros':\n\n money_sign = ' €'\n psg.PopupOK(' Currency was set to Euros.', keep_on_top=True)\n\n elif event == 'Do The Math!':\n print('First Step >>> {}'.format(values))\n\n try:\n \n if '' in values.values():\n \n psg.PopupOK(' Some box(es) need to be filled!')\n\n except ValueError:\n pass\n\n if money_sign == '':\n psg.PopupOK(' You need to choose a currency!')\n\n elif money_sign != '':\n print('Second Step >>> {}'.format(values))\n\n total_percentage = float(values[1]) + float(values[2]) + float(values[3]) + float(values[4]) + float(values[5]) + float(values[6]) + float(values[7])\n\n if total_percentage == 100.0:\n print('Third Step >>> {}'.format(values))\n\n my_number = float(values[0])\n psg.PopupOK(math(my_number, money_sign, float(values[1]), float(values[2]), float(values[3]), float(values[4]), float(values[5]), float(values[6]), float(values[7])), keep_on_top=True)\n\n else:\n\n psg.PopupOK('The percentages need to add up to 100.0 and they are adding up to {}!'.format(total_percentage))\n \n window.close()\n\n\n\ndef math(x, y, bn, ft, fl, lte, fe, d, i):\n basic_necessities = x * (bn/100)\n free_time = x * (ft/100)\n financial_liberty = x * (fl/100)\n long_term_expenses = x * (lte/100)\n financial_instruction = x * (fe/100)\n donations = x * (d/100)\n investments = x * (i/100)\n\n\n\n file = open('C:\\\\Users\\\\Public\\\\Desktop\\\\Money Management ({}).txt'.format(month_names[month]), mode='w+', encoding='utf-8')\n\n try:\n file.write('''Money For Basic Necessities ({}%) >>> {:.2f}{}\nMoney For Free Time ({}%) >>> {:.2f}{}\nMoney For Financial Liberty ({}%) >>> {:.2f}{}\nMoney For Long-Term Expenses ({}%) >>> {:.2f}{}\nMoney For Financial Education ({}%) >>> {:.2f}{}\nMoney For Donations ({}%) >>> {:.2f}{}\nMoney For Investments ({}%) >>> {:.2f}{}'''.format(bn, basic_necessities, y, ft, free_time, y, fl, financial_liberty, y, lte, long_term_expenses, y, fe, financial_instruction, y, d, donations, y, i, investments, y))\n finally:\n file.close()\n\n\n\n a = 'Money For Basic Necessities >>> {:.2f}{}'.format(basic_necessities, y)\n b = 'Money For Free Time >>> {:.2f}{}'.format(free_time, y)\n c = 'Money For Financial Liberty >>> {:.2f}{}'.format(financial_liberty, y)\n d = 'Money For Long-Term Expenses >>> {:.2f}{}'.format(long_term_expenses, y)\n e = 'Money For Financial Education >>> {:.2f}{}'.format(financial_instruction, y)\n f = 'Money For Donations >>> {:.2f}{}'.format(donations, y)\n g = 'Money For Investments >>> {:.2f}{}'.format(investments, y)\n\n saved = 'Document Saved to Desktop as Money Management ({}).txt'.format(month_names[month])\n\n return '{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n\\n{}'.format(a, b, c, d, e, f, g, saved)\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"413435360","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/1/30 9:31\n# @Author : 马飞\n# @File : sync_mysql2mongo.py\n# @Software: PyCharm\n# @func:通过urllib库以以节字泫发送(https协议)post请求\n\nimport sys,time\nimport traceback\nimport configparser\nimport warnings\nimport pymysql\nimport datetime\nimport smtplib\nimport json\nfrom email.mime.text import MIMEText\nimport urllib.parse\nimport urllib.request\nimport ssl\n#ssl._create_default_https_context = ssl._create_unverified_context\n\ndef send_mail465(p_from_user,p_from_pass,p_to_user,p_title,p_content):\n to_user=p_to_user.split(\",\")\n try:\n msg = MIMEText(p_content,'html','utf-8')\n msg[\"Subject\"] = p_title\n msg[\"From\"] = p_from_user\n msg[\"To\"] = \",\".join(to_user)\n server = smtplib.SMTP_SSL(\"smtp.exmail.qq.com\", 465)\n server.set_debuglevel(0)\n server.login(p_from_user, p_from_pass)\n server.sendmail(p_from_user, to_user, msg.as_string())\n server.quit()\n return 0\n except smtplib.SMTPException as e:\n print(e)\n return -1\n\ndef send_mail(p_from_user,p_from_pass,p_to_user,p_title,p_content):\n to_user=p_to_user.split(\",\")\n try:\n msg = MIMEText(p_content,'html','utf-8')\n msg[\"Subject\"] = p_title\n msg[\"From\"] = p_from_user\n msg[\"To\"] = \",\".join(to_user)\n server = smtplib.SMTP(\"smtp.exmail.qq.com\", 25)\n server.set_debuglevel(0)\n server.login(p_from_user, p_from_pass)\n server.sendmail(p_from_user, to_user, msg.as_string())\n server.quit()\n except smtplib.SMTPException as e:\n print(e)\n\ndef exception_info():\n e_str=traceback.format_exc()\n return e_str[e_str.find(\"pymysql.err.\"):]\n\ndef get_now():\n return datetime.datetime.now().strftime(\"%H:%M:%S\")\n\ndef get_time():\n return datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\ndef get_date():\n return datetime.datetime.now().strftime(\"%Y%m%d\")\n\ndef get_ds_mysql(ip,port,service ,user,password):\n conn = pymysql.connect(host=ip, port=int(port), user=user, passwd=password, db=service, charset='utf8')\n return conn\n\ndef get_db_mysql(config):\n return get_ds_mysql(config['db_mysql_ip'],config['db_mysql_port'],config['db_mysql_service'],\\\n config['db_mysql_user'],config['db_mysql_pass'])\n\ndef get_config(fname):\n config = {}\n cfg=configparser.ConfigParser()\n cfg.read(fname,encoding=\"utf-8-sig\")\n db_mysql = cfg.get(\"sync\",\"db_mysql\")\n config['db_mysql_ip'] = db_mysql.split(':')[0]\n config['db_mysql_port'] = db_mysql.split(':')[1]\n config['db_mysql_service'] = db_mysql.split(':')[2]\n config['db_mysql_user'] = db_mysql.split(':')[3]\n config['db_mysql_pass'] = db_mysql.split(':')[4]\n config['send_gap'] = cfg.get(\"sync\", \"send_gap\")\n config['send_user'] = cfg.get(\"sync\", \"send_mail_user\")\n config['send_pass'] = cfg.get(\"sync\", \"send_mail_pass\")\n config['acpt_user'] = cfg.get(\"sync\", \"acpt_mail_user\")\n config['mail_title'] = cfg.get(\"sync\", \"mail_title\")\n config['hopson_interface'] = cfg.get(\"sync\",\"hopson_interface\")\n config['db_mysql_string'] = config['db_mysql_ip'] +':'+config['db_mysql_port'] +'/'+config['db_mysql_service']\n return config\n\ndef check_mysql_tab_exists(config,tab):\n db=config['db_mysql_desc']\n cr=db.cursor()\n sql=\"\"\"select count(0) from information_schema.tables\n where table_schema=database() and table_name='{0}'\"\"\".format(tab )\n cr.execute(sql)\n rs=cr.fetchone()\n cr.close()\n db.commit()\n return rs[0]\n\ndef get_mysql_tab_rows(config,tab):\n db=config['db_mysql_desc3']\n cr=db.cursor()\n sql=\"\"\"select count(0) from {0}\"\"\".format(tab )\n cr.execute(sql)\n rs=cr.fetchone()\n cr.close()\n db.commit()\n return rs[0]\n\ndef check_mysql_tab_exists_pk(config,tab):\n db=config['db_mysql_sour']\n cr=db.cursor()\n sql = \"\"\"select count(0) from information_schema.columns\n where table_schema=database() and table_name='{0}' and column_key='PRI'\"\"\".format(tab)\n cr.execute(sql)\n rs=cr.fetchone()\n cr.close()\n db.commit()\n return rs[0]\n\ndef get_seconds(b):\n a=datetime.datetime.now()\n return int((a-b).total_seconds())\n\ndef print_dict(config):\n print('-'.ljust(85,'-'))\n print(' '.ljust(3,' ')+\"name\".ljust(20,' ')+'value')\n print('-'.ljust(85,'-'))\n for key in config:\n print(' '.ljust(3,' ')+key.ljust(20,' ')+'=',config[key])\n print('-'.ljust(85,'-'))\n\ndef format_sql(v_sql):\n return v_sql.replace(\"\\\\\",\"\\\\\\\\\").replace(\"'\",\"\\\\'\")\n\ndef init(config):\n config = get_config(config)\n #print dict\n print_dict(config)\n return config\n\n#判断待办任务是否推送过消息\ndef isSend(config,id):\n db = get_db_mysql(config)\n cr = db.cursor()\n sql = \"select count(0) from ot_workitem_ext where id={0} and isSend='Y'\".format(id)\n cr.execute(sql)\n rs=cr.fetchone()\n if rs[0]>0:\n return True\n else:\n return False\n\ndef get_relationId(config,v_tab,v_bizid):\n db = get_db_mysql(config)\n cr = db.cursor()\n sql = \"select id from {0} where objectid='{1}'\".format(v_tab,v_bizid)\n try:\n cr.execute(sql)\n rs = cr.fetchone()\n cr.close()\n return rs[0]\n except:\n return ''\n\ndef get_itemComment(config,objectid):\n db = get_db_mysql(config)\n cr = db.cursor()\n sql = \"select itemComment from ot_workitemfinished where objectid='{0}'\".format(objectid)\n cr.execute(sql)\n rs1 = cr.fetchone()\n if rs1 is None or rs1[0]=='':\n sql='''\n SELECT TEXT FROM ot_comment m \n WHERE m.WorkItemId='{0}'\n and m.modifiedtime=(select max(modifiedtime) from ot_comment \n where WorkItemId='{1}') limit 1\n '''.format(objectid,objectid)\n cr.execute(sql)\n rs2 = cr.fetchone()\n cr.close()\n if rs2 is None or rs2[0] == '':\n return ''\n else:\n return rs2[0]\n else:\n cr.close()\n return rs1[0]\n\n\n#处理消息类型:1.延时闭店,2.携物出门,3.营运期施工\ndef send_message_easylife(config,debug):\n db = get_db_mysql(config)\n db2 = get_db_mysql(config)\n cr = db.cursor()\n cr2 = db.cursor()\n\n sql= \"\"\"\n SELECT \n b.workflowCode AS '流程模板编码',\n CONCAT('i_',b.workflowCode) AS '流程模板表',\n b.state AS '实例状态',\n b.bizobjectid AS '流程模板表ID', \n a.participantName AS '参与者姓名',\n a.displayName AS '活动显示名称',\n #a.ActionName AS '操作名称',\n IF(a.ActionName='' AND a.approval!=0,'Submit',a.ActionName) AS '操作名称', \n a.itemComment AS '当前征询意见填写的意见',\n a.finishtime AS '接受时间',\n e.id AS '扩展表主键',\n a.objectid as 'OBJECTID',\n a.InstanceId as 'instanceId'\n FROM ot_workitemfinished a,\n ot_instancecontext b,\n ot_workitemfinished_ext e \n WHERE a.InstanceId=b.objectid \n AND e.task_id= a.objectID \n AND a.FinishTime>'2019-07-15'\n AND b.workflowcode LIKE 'hsh_%'\n AND e.isSend='N' \n order by a.finishtime \n \"\"\"\n\n cr.execute(sql)\n rs = cr.fetchall()\n for i in list(rs):\n n_relationId = get_relationId(config, i[1],i[3])\n v_itemComment = get_itemComment(config, i[10])\n message = {\n 'relationId' : n_relationId,\n 'state' : i[2],\n 'workflowCode' : i[0],\n 'participantName': i[4],\n 'displayName' : i[5],\n 'actionName' : i[6],\n 'itemComment' : v_itemComment, #i[7]\n 'receiveTime' : str(i[8]),\n 'instanceId' : str(i[11])\n }\n\n v_message = json.dumps(message)\n print('v_message=',v_message)\n\n values = {\n 'message': v_message\n }\n\n #调用接口推送消息\n n_failure_time = 0\n while True:\n try:\n url = config['hopson_interface']\n context = ssl._create_unverified_context()\n data = urllib.parse.urlencode(values).encode(encoding='UTF-8')\n print('data=',data)\n req = urllib.request.Request(url,data=data)\n res = urllib.request.urlopen(req,context=context)\n res = json.loads(res.read())\n print(res,res['code'])\n if res['code'] == 200:\n print('接口调用成功!')\n cr.execute(\"update ot_workitemfinished_ext t set isSend='Y' where id={0}\".format(i[9]))\n db.commit()\n print('扩展表状态更新成功!')\n n_failure_time=0\n break\n else:\n print(res['msg'])\n break\n except:\n print(traceback.format_exc())\n print('接口调用失败,第{0}次重试中...!'.format(str(n_failure_time+1)))\n n_failure_time = n_failure_time + 1\n time.sleep(60)\n time.sleep(5)\n cr.close()\n\n#查询是否有加载待办任务扩展信息\ndef get_undone_task_ext(config):\n db=get_db_mysql(config)\n cr=db.cursor()\n sql='''select count(0)\n FROM ot_workitemfinished t\n WHERE t.receiveTime>='2019-07-03'\n AND t.workflowcode LIKE 'hsh%'\n AND NOT EXISTS(SELECT 1 FROM ot_workitemfinished_ext e\n WHERE e.task_id= t.objectID)\n '''\n cr.execute(sql)\n rs=cr.fetchone()\n cr.close()\n return rs[0]\n\n#加载待办任务至扩展表【合生通】\ndef write_undone_task_ext(config):\n db=get_db_mysql(config)\n cr=db.cursor()\n if get_undone_task_ext(config)==0:\n print('未找到新的待办任务!')\n else:\n sql='''INSERT INTO ot_workitemfinished_ext(inst_id,task_id) \n SELECT t.instanceid, t.objectID\n FROM ot_workitemfinished t\n WHERE t.receiveTime>='2019-07-03'\n AND t.workflowcode LIKE 'hsh%'\n AND NOT EXISTS(SELECT 1 FROM ot_workitemfinished_ext e\n WHERE e.task_id= t.objectID)\n '''\n cr.execute(sql)\n db.commit()\n print('采集到新的待办任务!')\n cr.close()\n\n#消息推送\ndef push(config,debug):\n #循环临听待办任务表变化,变更新扩展表进行消息推送处理\n while True:\n #将未推送消息待办任务写入扩展表\n write_undone_task_ext(config)\n #推送合生通消息\n send_message_easylife(config,debug)\n #休眠\n print('休眠 {0}秒...'.format(config['send_gap']))\n time.sleep(int(config['send_gap']))\n\ndef main():\n #init variable\n config = \"\"\n debug = False\n warnings.filterwarnings(\"ignore\")\n #get parameter from console\n for p in range(len(sys.argv)):\n if sys.argv[p] == \"-conf\":\n config = sys.argv[p + 1]\n elif sys.argv[p] == \"-debug\":\n debug = True\n\n #初始化\n config=init(config)\n\n #process\n push(config,debug)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"h3bpm_sender/h3bpm_easylife_sender_gray/h3bpm_easylife_sender.py","file_name":"h3bpm_easylife_sender.py","file_ext":"py","file_size_in_byte":11721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"286635707","text":"\r\n'''\r\nO código inicial para a parte de data augentation foi retirado deste site:\r\nhttp://sigmoidal.ai/reduzindo-overfitting-com-data-augmentation/\r\ne feitas algumas adaptações\r\n'''\r\n\r\n# importar os pacotes necessários\r\nimport numpy as np\r\nfrom keras.preprocessing.image import load_img\r\nfrom keras.preprocessing.image import img_to_array\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom os import listdir\r\nimport os\r\n\r\npastas = [arq for arq in listdir(\"caracteres/\")]\r\n\r\nfor pasta in pastas:\r\n\r\n\r\n\tarquivos = [arq for arq in listdir(\"caracteres/\"+pasta+\"/\")]\r\n\tos.mkdir(\"aumenta/\"+pasta)\r\n\timagens = [arq for arq in arquivos if arq.lower().endswith(\".jpg\")]\r\n\timagensCriadas=0\r\n\r\n\tnumeroDeImagens=len(imagens)\r\n\r\n\tif numeroDeImagens<30:\r\n\t\treplicas=25\r\n\telif numeroDeImagens<40:\r\n\t\treplicas=17\r\n\telif numeroDeImagens<50:\r\n\t\treplicas=13\r\n\telif numeroDeImagens<60:\r\n\t\treplicas=10\r\n\telif numeroDeImagens<80:\r\n\t\treplicas=9\r\n\r\n\telif numeroDeImagens<90:\r\n\t\treplicas=6\r\n\r\n\telif numeroDeImagens<110:\r\n\t\treplicas=5\r\n\r\n\telif numeroDeImagens<150:\r\n\t\treplicas=4\r\n\r\n\telif numeroDeImagens<210:\r\n\t\treplicas=3\r\n\r\n\telif numeroDeImagens<270:\r\n\t\treplicas=2\r\n\r\n\telse:\r\n\t\treplicas=1\r\n\r\n\tfor imagemCaracter in imagens:\r\n\t\t# definir caminhos da imagem original e diretório do output\r\n\t\tIMAGE_PATH = \"caracteres/\"+pasta+\"/\"+imagemCaracter\r\n\t\tOUTPUT_PATH = \"aumenta/\"+pasta+\"/\"\r\n\t\t \r\n\t\t# carregar a imagem original e converter em array\r\n\t\timage = load_img(IMAGE_PATH)\r\n\t\timage = img_to_array(image)\r\n\t\t \r\n\t\t# adicionar uma dimensão extra no array\r\n\t\timage = np.expand_dims(image, axis=0)\r\n\t\t \r\n\t\t# criar um gerador (generator) com as imagens do\r\n\t\t# data augmentation\r\n\t\timgAug = ImageDataGenerator( rotation_range=8,\r\n\t\t\t\t\t\tzoom_range=[0.9,1.0],\r\n\t\t\t\t\t\t\t\t\tbrightness_range=[0.1,3],\r\n\t\t\t\t fill_mode='nearest', horizontal_flip=False)\r\n\t\timgGen = imgAug.flow(image, save_to_dir=OUTPUT_PATH,\r\n\t\t\t\t save_format='jpg', save_prefix='0000001')\r\n\r\n\t\t# gerar 10 imagens por data augmentation\r\n\t\tcounter = 0\r\n\t\tfor (i, newImage) in enumerate(imgGen):\r\n\t\t\tcounter += 1\r\n\t\t\timagensCriadas+=1\r\n\r\n\t\t\t# ao gerar 10 imagens, parar o loop\r\n\t\t\tif counter == replicas:\r\n\t\t\t\tbreak\r\n\r\n\t\tif imagensCriadas>500:\r\n\t\t\tbreak\r\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"597166420","text":"# coding: utf8\n\nfrom com.handler.base_handler import BaseHandler\nfrom com.manager.article_manager import ArticleManager\nfrom com.manager.category_manager import CategoryManager\nimport com.util.constant as constant\n\nclass MainHandler(BaseHandler):\n \n __article_manager = ArticleManager()\n __category_manager = CategoryManager()\n \n def __init__(self, *args, **argkw):\n super(MainHandler, self).__init__(*args, **argkw)\n self.article_manager = self.__article_manager\n self.category_manager = self.__category_manager\n \n def get(self, param=None):\n try:\n page = int(self.get_argument('page', 1))\n except:\n page = 1\n \n start_index = (page - 1) * constant.PAGE_SIZE\n \n # 获取所有目录\n cats = self.category_manager.get_category()\n \n # 取前5篇最新的文章 \n articles = self.article_manager.get_article(start_index=start_index, count=constant.PAGE_SIZE)\n target_index = 'index.html'\n if self.get_platform() != 'PC':\n target_index = 'index_mobile.html' \n self.render(target_index, cats=cats, articles=articles, current_page=page)\n","sub_path":"com/handler/main_handler.py","file_name":"main_handler.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"362695902","text":"from bs4 import BeautifulSoup\nimport requests\nimport re\nimport os\nimport csv\nimport unittest\n\n#Worked with: Grace Coleman and Zita Jameson\n\n\ndef get_titles_from_search_results(filename):\n \"\"\"\n Write a function that creates a BeautifulSoup object on \"search_results.htm\". Parse\n through the object and return a list of tuples containing book titles (as printed on the Goodreads website) \n and authors in the format given below. Make sure to strip() any newlines from the book titles and author names.\n\n [('Book title 1', 'Author 1'), ('Book title 2', 'Author 2')...]\n \"\"\"\n f=open(filename, 'r')\n fileData= f.read()\n f.close()\n\n soup= BeautifulSoup(fileData, 'lxml')\n bookTitles= soup.find_all('a', class_= 'bookTitle')\n bookInfo= []\n for tag in bookTitles:\n bookInfo.append(tag.text.strip())\n authorsList=[]\n authorTags=soup.find_all('div', class_='authorName__container')\n for item in authorTags:\n authorsList.append(item.text.strip())\n information=[]\n for i in range(len(bookTitles)):\n tup=(bookInfo[i], authorsList[i])\n information.append(tup)\n return information\n\n\ndef get_search_links(): \n \"\"\"\n Write a function that creates a BeautifulSoup object after retrieving content from\n \"https://www.goodreads.com/search?q=fantasy&qid=NwUsLiA2Nc\". Parse through the object and return a list of\n URLs for each of the first ten books in the search using the following format:\n\n ['https://www.goodreads.com/book/show/84136.Fantasy_Lover?from_search=true&from_srp=true&qid=NwUsLiA2Nc&rank=1', ...]\n\n Notice that you should ONLY add URLs that start with \"https://www.goodreads.com/book/show/\" to \n your list, and , and be sure to append the full path to the URL so that the url is in the format \n “https://www.goodreads.com/book/show/kdkd\".\n\n \"\"\"\n\n url_lst= []\n url= 'https://www.goodreads.com/search?q=fantasty&qid=NwUsLiA2Nc'\n r=requests.get(url)\n soup= BeautifulSoup(r.text, 'html.parser')\n anchor= soup.find_all('a', class_= 'bookTitle')\n for x in anchor:\n link= x['href']\n if link.startswith('/book/show/'):\n i='https://www.goodreads.com'+ str(link)\n url_lst.append(i)\n return url_lst[:10]\n \ndef get_book_summary(book_url):\n \"\"\"\n Write a function that creates a BeautifulSoup object that extracts book\n information from a book's webpage, given the URL of the book. Parse through\n the BeautifulSoup object, and capture the book title, book author, and number \n of pages. This function should return a tuple in the following format:\n\n ('Some book title', 'the book's author', number of pages)\n\n HINT: Using BeautifulSoup's find() method may help you here.\n You can easily capture CSS selectors with your browser's inspector window.\n Make sure to strip() any newlines from the book title and number of pages.\n \"\"\"\n r=requests.get(book_url)\n soup= BeautifulSoup(r.text, 'lxml')\n anchor= soup.find('h1', class_='gr-h1 gr-h1--serif')\n\n title= anchor.text.strip()\n anchor2=soup.find('a', class_='authorName')\n\n author= anchor2.text.strip()\n anchor3= soup.find('span', itemprop='numberOfPages')\n page_count= int(anchor3.text.strip(' pages'))\n tup= (title, author, page_count)\n return tup\n\n \n\ndef summarize_best_books(filepath):\n \"\"\"\n Write a function to get a list of categories, book title and URLs from the \"BEST BOOKS OF 2020\"\n page in \"best_books_2020.htm\". This function should create a BeautifulSoup object from a \n filepath and return a list of (category, book title, URL) tuples.\n \n For example, if the best book in category \"Fiction\" is \"The Testaments (The Handmaid's Tale, #2)\", with URL\n https://www.goodreads.com/choiceawards/best-fiction-books-2020, then you should append \n (\"Fiction\", \"The Testaments (The Handmaid's Tale, #2)\", \"https://www.goodreads.com/choiceawards/best-fiction-books-2020\") \n to your list of tuples.\n \"\"\"\n clist=[]\n blist=[]\n ulist=[]\n tups=[]\n\n file1= open(filepath, 'r')\n data= file1.read()\n file1.close()\n soup=BeautifulSoup(data, \"html.parser\")\n cats=soup.find_all('h4', class_= 'category__copy')\n for category in cats:\n clist.append(category.text.strip())\n bestb=soup.find_all('img', class_= \"category__winnerImage\")\n for book in bestb:\n title=book['alt']\n blist.append(title)\n urls= soup.find_all('div', class_= 'category clearFix')\n for url in urls:\n ulist.append(url.find('a')['href'])\n for x in range(len(ulist)):\n tup= (clist[x],blist[x], ulist[x])\n tups.append(tup)\n return tups\n\n\ndef write_csv(data, filename):\n \"\"\"\n Write a function that takes in a list of tuples (called data, i.e. the\n one that is returned by get_titles_from_search_results()), writes the data to a \n csv file, and saves it to the passed filename.\n\n The first row of the csv should contain \"Book Title\" and \"Author Name\", and\n respectively as column headers. For each tuple in data, write a new\n row to the csv, placing each element of the tuple in the correct column.\n\n When you are done your CSV file should look like this:\n\n Book title,Author Name\n Book1,Author1\n Book2,Author2\n Book3,Author3\n ......\n\n This function should not return anything.\n \"\"\"\n with open(filename, 'w', newline='', encoding='utf-8') as f:\n f= csv.writer(f, delimiter=',')\n f.writerow(['Book title', \"Author Name\"])\n for line in data:\n f.writerow(line)\n\n\ndef extra_credit(filepath):\n \"\"\"\n EXTRA CREDIT\n\n Please see the instructions document for more information on how to complete this function.\n You do not have to write test cases for this function.\n \"\"\"\n pass\n\nclass TestCases(unittest.TestCase):\n\n # call get_search_links() and save it to a static variable: search_urls\n\n\n def test_get_titles_from_search_results(self):\n # call get_titles_from_search_results() on search_results.htm and save to a local variable\n search_urls=get_titles_from_search_results('search_results.htm')\n # check that the number of titles extracted is correct (20 titles)\n self.assertEqual(len(search_urls), 20)\n # check that the variable you saved after calling the function is a list\n self.assertIsInstance(search_urls, list)\n # check that each item in the list is a tuple\n for x in search_urls:\n self.assertIsInstance(x, tuple)\n # check that the first book and author tuple is correct (open search_results.htm and find it)\n self.assertEqual(search_urls[0], (\"Harry Potter and the Deathly Hallows (Harry Potter, #7)\", 'J.K. Rowling'))\n # check that the last title is correct (open search_results.htm and find it)\n self.assertEqual(search_urls[-1][0], 'Harry Potter: The Prequel (Harry Potter, #0.5)')\n \n def test_get_search_links(self):\n # check that TestCases.search_urls is a list\n search_urls=get_search_links()\n # check that the length of TestCases.search_urls is correct (10 URLs)\n self.assertEqual(len(search_urls), 10)\n\n # check that each URL in the TestCases.search_urls is a string\n for x in search_urls:\n self.assertIsInstance(x, str)\n # check that each URL contains the correct url for Goodreads.com followed by /book/show/\n for x in search_urls:\n self.assertTrue(\"/book/show/\" in x)\n\n def test_get_book_summary(self):\n # create a local variable – summaries – a list containing the results from get_book_summary()\n # for each URL in TestCases.search_urls (should be a list of tuples)\n search_urls= get_search_links()\n summaries=[]\n for url in search_urls:\n summaries.append(get_book_summary(url))\n\n \n\n\n # check that the number of book summaries is correct (10)\n self.assertEqual(len(summaries), 10)\n # check that each item in the list is a tuple\n for x in summaries:\n self.assertEqual(type(x), tuple)\n self.assertEqual(len(x), 3)\n \n # check that each tuple has 3 elements\n\n # check that the first two elements in the tuple are string\n self.assertIsInstance(summaries[0][0], str)\n self.assertIsInstance(summaries[0][1], str) \n # check that the third element in the tuple, i.e. pages is an int\n self.assertIsInstance(summaries[0][2], int)\n # check that the first book in the search has 337 pages\n self.assertEqual(summaries[0][2], 337)\n\n def test_summarize_best_books(self):\n # call summarize_best_books and save it to a variable\n summarize=summarize_best_books('best_books_2020.htm')\n # check that we have the right number of best books (20)\n self.assertEqual(len(summarize), 20)\n # assert each item in the list of best books is a tuple\n for x in summarize:\n self.assertEqual(type(x), tuple)\n self.assertEqual(len(x), 3)\n # check that each tuple has a length of 3\n\n # check that the first tuple is made up of the following 3 strings:'Fiction', \"The Midnight Library\", 'https://www.goodreads.com/choiceawards/best-fiction-books-2020'\n self.assertEqual(summarize[0], ('Fiction', \"The Midnight Library\", 'https://www.goodreads.com/choiceawards/best-fiction-books-2020'))\n # check that the last tuple is made up of the following 3 strings: 'Picture Books', 'Antiracist Baby', 'https://www.goodreads.com/choiceawards/best-picture-books-2020'\n self.assertEqual(summarize[-1],('Picture Books', 'Antiracist Baby', 'https://www.goodreads.com/choiceawards/best-picture-books-2020'))\n\n def test_write_csv(self):\n # call get_titles_from_search_results on search_results.htm and save the result to a variable\n titles=get_titles_from_search_results('search_results.htm')\n # call write csv on the variable you saved and 'test.csv'\n write_csv(titles, 'test.csv')\n # read in the csv that you wrote (create a variable csv_lines - a list containing all the lines in the csv you just wrote to above)\n\n csv_lines=[]\n with open('test.csv') as file:\n csv_f= csv.reader(file)\n for x in csv_f:\n csv_lines.append(x)\n # check that there are 21 lines in the csv\n self.assertEqual(len(csv_lines), 21)\n # check that the header row is correct\n self.assertEqual(csv_lines[0], [\"Book title\", \"Author Name\"])\n # check that the next row is 'Harry Potter and the Deathly Hallows (Harry Potter, #7)', 'J.K. Rowling'\n self.assertEqual(csv_lines[1], ['Harry Potter and the Deathly Hallows (Harry Potter, #7)', 'J.K. Rowling'])\n # check that the last row is 'Harry Potter: The Prequel (Harry Potter, #0.5)', 'Julian Harrison (Introduction)'\n self.assertEqual(csv_lines[-1], ['Harry Potter: The Prequel (Harry Potter, #0.5)', 'Julian Harrison (Introduction)'])\n\nif __name__ == '__main__':\n print(extra_credit(\"extra_credit.htm\"))\n unittest.main(verbosity=2)","sub_path":"Project2.py","file_name":"Project2.py","file_ext":"py","file_size_in_byte":11203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"1117896","text":"from django.contrib import admin\n\nfrom home.models import Setting, ContactFormMessage, UserProfile, FAQ\n\n\nclass ContactFormMessageAdmin(admin.ModelAdmin):\n list_display = ['name','email','subject','status']\n list_filter = ['status']\nclass UserProfileAdmin(admin.ModelAdmin):\n list_display = ['user','phone','university','image_tag']\n list_filter = ['university']\nclass FaqAdmin(admin.ModelAdmin):\n list_display = ['ordernumber', 'question', 'answer', 'status']\n list_filter = ['status']\nadmin.site.register(Setting)\nadmin.site.register(ContactFormMessage,ContactFormMessageAdmin)\nadmin.site.register(UserProfile,UserProfileAdmin)\nadmin.site.register(FAQ,FaqAdmin)","sub_path":"home/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"268687772","text":"import re\r\nimport time\r\nfrom slackclient import SlackClient\r\n\r\n\r\n# instantiate Slack client\r\nslack_client = SlackClient('your bot token goes here')\r\n\r\n# constants\r\nRTM_READ_DELAY = 1 # 1 second delay between reading from RTM\r\nEXAMPLE_COMMAND = \"Question:\"\r\nEXAMPLE_COMMAND2 = \"yes\"\r\nEXAMPLE_COMMAND3 = \"no\"\r\nEXAMPLE_COMMAND4 = \"answer:\"\r\nMENTION_REGEX = \"^<@(|[WU].+?)>(.*)\"\r\n\r\n\r\n\r\n\r\ndef parse_bot_commands(slack_events):\r\n \"\"\"\r\n Parses a list of events coming from the Slack RTM API to find bot commands.\r\n If a bot command is found, this function returns a tuple of command and channel.\r\n If its not found, then this function returns None, None.\r\n \"\"\"\r\n for event in slack_events:\r\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\r\n user_id, message = parse_direct_mention(event[\"text\"])\r\n if user_id == starterbot_id:\r\n return message, event[\"channel\"]\r\n return None, None\r\n\r\ndef parse_direct_mention(message_text):\r\n \"\"\"\r\n Finds a direct mention (a mention that is at the beginning) in message text\r\n and returns the user ID which was mentioned. If there is no direct mention, returns None\r\n \"\"\"\r\n matches = re.search(MENTION_REGEX, message_text)\r\n # the first group contains the username, the second group contains the remaining message\r\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)\r\n\r\ndef define_question(command):\r\n global question \r\n question = command\r\n\r\ndef handle_command(command, channel):\r\n \r\n \"\"\"\r\n Executes bot command if the command is known\r\n \"\"\"\r\n if channel == \"Questions channel id\":\r\n answer = \"Not sure what you mean. Try *{}*\".format(EXAMPLE_COMMAND)\r\n if command.startswith(EXAMPLE_COMMAND):\r\n answer = \"Sending your question to specialists!\"\r\n define_question(command)\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=\"#canal0\",\r\n text=\"New {} do you know how to answer it? (Say yes or no)\".format(command)\r\n )\r\n if command.startswith(EXAMPLE_COMMAND2):\r\n answer = \"Thanks for the feedback :heart:\"\r\n if command.startswith(EXAMPLE_COMMAND3):\r\n answer = \"Sorry for that , fell free to send me another question!\"\r\n if channel == \"channel0 id\":\r\n answer = \"Not sure what you mean. Try *{}* or *{}*.\".format(EXAMPLE_COMMAND2,EXAMPLE_COMMAND3)\r\n if command.startswith(EXAMPLE_COMMAND2):\r\n answer = \"Waiting for your answer (use {} to respond), Thank you!\".format(EXAMPLE_COMMAND4)\r\n if command.startswith(EXAMPLE_COMMAND3):\r\n answer = \"Sending question to next channel...\"\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=\"#canal1\",\r\n text=\"New {} do you know how to answer it? (Say yes or no)\".format(question)\r\n )\r\n #send to next channel\r\n if command.startswith(EXAMPLE_COMMAND4):\r\n answer = \"Sending your answer for the requester, Thank you! :heart: \"\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=\"#python-bot\",\r\n text=\"{} is this answer satisfactory? (say *yes* ou *no*)\".format(command)\r\n )\r\n if channel == \"channel1 id\":\r\n answer = \"Not sure what you mean. Try *{}* or *{}*.\".format(EXAMPLE_COMMAND2,EXAMPLE_COMMAND3)\r\n if command.startswith(EXAMPLE_COMMAND2):\r\n answer = \"Waiting for your answer (use {} to respond), Thank you!\".format(EXAMPLE_COMMAND4)\r\n if command.startswith(EXAMPLE_COMMAND3):\r\n answer = \"Sending question to next channel...\"\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=\"#canal2\",\r\n text=\"New {} do you know how to answer it? (Say yes or no)\".format(question)\r\n )\r\n if command.startswith(EXAMPLE_COMMAND4):\r\n answer = \"Sending your answer for the requester, Thank you! :heart: \"\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=\"#python-bot\",\r\n text=\"{} is this answer satisfactory? (say *yes* ou *no*)\".format(command)\r\n )\r\n \r\n\r\n # Sends the answer back to the channel\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=channel,\r\n text=answer\r\n )\r\n\r\nif __name__ == \"__main__\":\r\n if slack_client.rtm_connect(with_team_state=False):\r\n print(\"Starter Bot connected and running!\")\r\n\r\n # Read bot's user ID by calling Web API method `auth.test`\r\n starterbot_id = slack_client.api_call(\"auth.test\")[\"user_id\"]\r\n while True:\r\n command, channel = parse_bot_commands(slack_client.rtm_read())\r\n if command:\r\n handle_command(command, channel)\r\n time.sleep(RTM_READ_DELAY)\r\n else:\r\n print(\"Connection failed. Exception traceback printed above.\")","sub_path":"code/slackbot-en.py","file_name":"slackbot-en.py","file_ext":"py","file_size_in_byte":5026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"75470745","text":"# Copyright 2019 Open Source Robotics Foundation, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\nfrom ament_index_python import get_resource\nfrom ament_index_python import get_resources\nfrom ament_index_python import has_resource\n\nfrom rosidl_runtime_py import utilities\nfrom rosidl_runtime_py.convert import message_to_yaml\n\n\ndef get_all_interface_packages():\n return get_resources('rosidl_interfaces')\n\n\ndef get_interfaces(package_name):\n if not has_resource('packages', package_name):\n raise LookupError('Unknown package {}'.format(package_name))\n try:\n content, _ = get_resource('rosidl_interfaces', package_name)\n except LookupError:\n return []\n interface_names = content.splitlines()\n return list(sorted({\n n.rsplit('.', 1)[0]\n for n in interface_names\n if '_' not in n}))\n\n\ndef get_interface_path(parts):\n prefix_path = has_resource('packages', parts[0])\n joined = '/'.join(parts)\n if len(parts[-1].rsplit('.', 1)) == 1:\n joined += '.idl'\n interface_path = os.path.join(\n prefix_path, 'share', joined)\n if not os.path.exists(interface_path):\n raise LookupError('Could not find the interface {!r}'.format(interface_path))\n return interface_path\n\n\ndef package_name_completer(**kwargs):\n \"\"\"Callable returning a list of types containing messages, services, and action.\"\"\"\n return get_all_interface_packages()\n\n\ndef type_completer(**kwargs):\n \"\"\"Callable returning a list of message, service, and action types.\"\"\"\n types = []\n for package_name, service_names in get_all_service_types().items():\n for service_name in service_names:\n types.append(\n '{package_name}/srv/{service_name}'.format_map(locals()))\n\n for package_name, message_names in get_all_message_types().items():\n for message_name in message_names:\n types.append(\n '{package_name}/msg/{message_name}'.format_map(locals()))\n\n for package_name, action_names in get_all_action_types().items():\n for action_name in action_names:\n types.append(\n '{package_name}/action/{action_name}'.format_map(locals()))\n\n return sorted(types)\n\n\ndef get_all_action_types():\n all_action_types = {}\n for package_name in get_resources('rosidl_interfaces'):\n action_types = get_action_types(package_name)\n if action_types:\n all_action_types[package_name] = action_types\n return all_action_types\n\n\ndef get_action_types(package_name):\n if not has_resource('packages', package_name):\n raise LookupError('Unknown package name')\n try:\n content, _ = get_resource('rosidl_interfaces', package_name)\n except LookupError:\n return []\n interface_names = content.splitlines()\n # TODO(jacobperron) this logic should come from a rosidl related package\n # Only return actions in action folder\n return list(sorted({\n n[7:].rsplit('.', 1)[0]\n for n in interface_names\n if n.startswith('action/') and (n[-4:] == '.idl' or n[-7:] == '.action')}))\n\n\ndef get_all_message_types():\n all_message_types = {}\n for package_name in get_resources('rosidl_interfaces'):\n message_types = get_message_types(package_name)\n if message_types:\n all_message_types[package_name] = message_types\n return all_message_types\n\n\ndef get_message_types(package_name):\n if not has_resource('packages', package_name):\n raise LookupError('Unknown package name')\n try:\n content, _ = get_resource('rosidl_interfaces', package_name)\n except LookupError:\n return []\n interface_names = content.splitlines()\n # TODO(dirk-thomas) this logic should come from a rosidl related package\n # Only return messages in msg folder\n return list(sorted({\n n[4:-4]\n for n in interface_names\n if n.startswith('msg/') and n[-4:] in ('.idl', '.msg')}))\n\n\ndef get_all_service_types():\n all_service_types = {}\n for package_name in get_resources('rosidl_interfaces'):\n service_types = get_service_types(package_name)\n if service_types:\n all_service_types[package_name] = service_types\n return all_service_types\n\n\ndef get_service_types(package_name):\n if not has_resource('packages', package_name):\n raise LookupError('Unknown package name')\n try:\n content, _ = get_resource('rosidl_interfaces', package_name)\n except LookupError:\n return []\n interface_names = content.splitlines()\n # TODO(dirk-thomas) this logic should come from a rosidl related package\n # Only return services in srv folder\n return list(sorted({\n n[4:-4]\n for n in interface_names\n if n.startswith('srv/') and n[-4:] in ('.idl', '.srv')}))\n\n\ndef get_message_path(package_name, message_name):\n message_types = get_message_types(package_name)\n if message_name not in message_types:\n raise LookupError('Unknown message name')\n prefix_path = has_resource('packages', package_name)\n # TODO(dirk-thomas) this logic should come from a rosidl related package\n return os.path.join(\n prefix_path, 'share', package_name, 'msg', message_name + '.msg')\n\n\ndef interface_to_yaml(identifier):\n interface = utilities.get_interface(identifier)\n if utilities.is_action(interface):\n instance = interface.Goal()\n elif utilities.is_service(interface):\n instance = interface.Request()\n else:\n instance = interface()\n\n return message_to_yaml(instance)\n","sub_path":"ros2interface/ros2interface/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"294764493","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pymysql\n\nclass bookSpider(object):\n def __init__(self,url):\n self.url=url;\n self.headers={\n 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',\n 'Referer': 'http://i.meizitu.net'\n }\n def getHtml(self):\n res=requests.get(self.url,self.headers)\n soup=BeautifulSoup(res.text,\"lxml\")\n tt=soup.select('body > div:nth-child(5) > div.firs.d.l.topk > div.topli > ul > li>a')\n for url in tt:\n dmurl=\"http://www.fengchedm.com\"+url.get('href')\n title=url.get('title')\n print(dmurl,title)\n self.saveDb(dmurl,title)\n\n def saveDb(self, dmurl,title):\n db = pymysql.connect('localhost', 'root', '123456', 'pyData')\n cursor = db.cursor()\n sql = \"insert into dmlist(dmurl,title)\" \\\n \"values ('%s','%s')\" % (dmurl,title)\n try:\n cursor.execute(sql)\n db.commit()\n except:\n db.rollback()\n cursor.close()\n\ndef createDb():\n db = pymysql.connect(\"localhost\", \"root\", \"123456\", \"pyData\")\n\n cursor = db.cursor()\n # 如果数据表已经存在使用 execute() 方法删除表。\n cursor.execute(\"DROP TABLE IF EXISTS dmlist\")\n\n # 创建数据表SQL语句\n sql = \"\"\"CREATE TABLE dmlist (\n ID integer (255) NOT NULL AUTO_INCREMENT primary key,\n dmurl varchar (255),\n titile varchar (255)\n )\"\"\"\n\n cursor.execute(sql)\n\n # 关闭数据库连接\n db.close()\n\ndef main():\n base_url=\"http://www.fengchedm.com/paiming/137.html\"\n bookspider=bookSpider(base_url)\n bookspider.getHtml()\nif __name__ == '__main__':\n main()","sub_path":"booktest.py","file_name":"booktest.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"502802980","text":"import pprint\nimport unittest\n\nimport numpy\nimport skil_client\nfrom skil_client import *\nfrom skil_client.rest import ApiException\n\ndebug = False\n\nhost = \"localhost\" # Rename this to the host you are using \n\nconfig = Configuration()\nconfig.host = \"{}:9008\".format(host) # change this if you're using a different port number for the general API!\nconfig.debug = debug\napi_client = ApiClient(configuration=config)\n# create an instance of the API class\napi_instance = skil_client.DefaultApi(api_client=api_client)\n\nconfig_mh = Configuration()\nconfig_mh.host = \"{}:9100\".format(host) # change this if you're using a different port number for the model server!\nconfig_mh.debug = debug\napi_client_mh = ApiClient(configuration=config_mh)\n# create an instance of the Model History API class\napi_instance_mh = skil_client.DefaultApi(api_client=api_client_mh)\n\n# authenticate\npp = pprint.PrettyPrinter(indent=4)\ntry:\n print(\"Authenticating with SKIL API...\")\n credentials = skil_client.Credentials(user_id=\"admin\", password=\"admin\") # Update this with the ID and password you're using for your SKIL server\n token = api_instance.login(credentials)\n pp.pprint(token)\n # add credentials to config\n config.api_key['authorization'] = token.token\n config.api_key_prefix['authorization'] = \"Bearer\"\n # for model history\n config_mh.api_key['authorization'] = token.token\n config_mh.api_key_prefix['authorization'] = \"Bearer\"\nexcept ApiException as e:\n print(\"Exception when calling DefaultApi->login: %s\\n\" % e)\n\nprint(\"Uploading model, please wait...\")\nmodelFile = \"/model.pb\"\nuploads = api_instance.upload(file=modelFile)\npp.pprint(uploads)\n\nmodel_file_path = \"file://\" + uploads.file_upload_response_list[0].path\npp.pprint(model_file_path)\n\ndeployment_name = \"mnist\"\ncreate_deployment_request = CreateDeploymentRequest(deployment_name)\ndeployment_response = api_instance.deployment_create(create_deployment_request)\n\npp.pprint(deployment_response)\n\nmodel_name = \"tf_model_mnist\"\nuris = [\"{}/model/{}/default\".format(deployment_name, model_name),\n \"{}/model/{}/v1\".format(deployment_name, model_name)]\n\ndeploy_model_request = ImportModelRequest(model_name,\n 1, \n file_location=model_file_path,\n model_type=\"model\",\n uri=uris,\n input_names=[\"input_node\", \"keep_prob_input\"], \n output_names=[\"output_node\"])\n\nmodel_deployment_response = api_instance.deploy_model(deployment_response.id, deploy_model_request)\npp.pprint(model_deployment_response)\n\nmodel_state_change_response = api_instance.model_state_change(deployment_response.id,\n model_deployment_response.id,\n SetState(\"start\"))\npp.pprint(model_state_change_response)\n\nimport time\n\n# Checking if the model is already started\nprint(\"\\nStart serving model...\")\nwhile True:\n time.sleep(5)\n \n # Query the model state\n model_state = api_instance.model_state_change(deployment_response.id, \n model_deployment_response.id, \n SetState(\"start\")).state\n \n if model_state == \"started\":\n print(\"Model server started successfully!\")\n break\n else:\n print(\"wait...\")\n","sub_path":"Docker-deployment/docker/deploy_model.py","file_name":"deploy_model.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"631916194","text":"# from torch import im\n\n\ndef weights_init(module):\n classname = module.__class__.__name__\n if classname.find('Conv') != -1:\n module.weight.data.normal_(0.0, 0.1)\n elif classname.find('Linear') != -1:\n module.weight.data.normal_(0.0, 0.1)\n module.bias.data.fill_(0.0)\n\n\ndef model_weights_init(model):\n for module in model.modules():\n weights_init(module)\n\n\n","sub_path":"LearnDistance/with_augmentation/helperFunctions.py","file_name":"helperFunctions.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"522152036","text":"def pale(n):\n n_string = str(n) #convert the provided number into a string\n fourdigits = len(n_string) == 4 # with the string version of the input, determine if the value indeed has 4 digits\n rem = n % 10 #use modulo to place the last digit into a another formula\n divisble = rem % 4 == 0 and rem != 0 #determine if the last digit of the input is divisble by 4 and not 0.\n n = int(n / 10)\n rem1 = n % 10\n n = int(n / 10)\n rem2 = n % 10\n n = int(n / 10)\n rem3 = n % 10\n n = int(n / 10)\n rem4 = n % 10 # repeatedly separete reach four digits one by one\n threethree = (rem and rem1) or (rem1 and rem2) or (rem2 and rem4) or (rem3 and rem4) == 3 #verfify if any consecutive digits each equal to 3\n pale_verficiation = (threethree != divisble == fourdigits) or (fourdigits == threethree != divisble) #determine if the results will equal to a pale or not\n return pale_verficiation #output the determination\n","sub_path":"Pale_Function.py","file_name":"Pale_Function.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"329966968","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 11 18:03:26 2020\n\n@author: milly\n\"\"\"\n\nimport numpy as np\nfrom tomophantom import TomoP2D \nfrom tomophantom.TomoP2D import Objects2D\nimport astra\nimport os\n\nnum_gen = 1000\n\n#make this automatic\ntrainset = 3\nmin_obs = 5\nmax_obs = 30\n\nsinogram_folder = \"sinograms\"\nphantom_folder = \"phantoms\"\n\ntrainset_list = list(filter(lambda x: \"sinograms\" in x, os.listdir(os.getcwd())))\ntrainset_list = list(map(lambda x: int(x.split(\"_\")[-1]), trainset_list))\nwhile trainset in trainset_list:\n trainset += 1\n \nroot_dir = os.getcwd()\n\nsinogram_folder = os.path.join(root_dir, sinogram_folder + \"_{}_{}_{}\".format(min_obs, max_obs, trainset))\nphantoms_folder = os.path.join(root_dir, phantom_folder + \"_{}_{}_{}\".format(min_obs, max_obs, trainset))\nos.mkdir(sinogram_folder)\nos.mkdir(phantoms_folder)\n\nsino_name = \"sino_{}\"\nphantom_name = \"ground_{}\"\n\n#keeps the generated objects within a circle with a diameter 90% of the \nwidth = 0.9\n\nnum_objects = np.random.randint(min_obs, max_obs)\nob_list = np.empty(num_objects, dtype=dict)\n\nfor i in range(num_gen):\n print(i)\n for ob_index in range(num_objects):\n \n density = np.random.rand()\n shape = [Objects2D.ELLIPSE, Objects2D.RECTANGLE][np.random.randint(0,2)]\n \n if shape == Objects2D.ELLIPSE:\n R = width\n x = np.random.rand() * 2 - 1\n y = np.random.rand() * 2 - 1\n while x**2 + y**2 > R ** 2:\n x = np.random.rand() * 2 - 1\n y = np.random.rand() * 2 - 1\n max_length = R - (x**2 + y**2) ** 0.5\n long_length = np.random.rand() * max_length\n short_length = np.random.rand() * long_length\n \n if shape == Objects2D.RECTANGLE:\n R = width/2\n x = np.random.rand() * 2\n y = np.random.rand() * 2\n while x**2 + y**2 > R ** 2:\n x = np.random.rand() * 2\n y = np.random.rand() * 2\n max_length = R - (x**2 + y**2) ** 0.5\n max_length *= 4/(2**0.5)\n long_length = np.random.rand() * max_length\n short_length = np.random.rand() * long_length\n \n rot = np.random.randint(0, 360)\n ob = {'Obj': shape,\n 'C0' : density,\n 'x0' : x,\n 'y0' : y,\n 'a' : long_length,\n 'b' : short_length,\n 'phi': rot}\n \n ob_list[ob_index] = ob\n \n #make these choices\n phantom = TomoP2D.Object(1499, ob_list)\n \n vol_geom = astra.creators.create_vol_geom(1499, 1499, -256, 256, -256, 256)\n proj_geom = astra.creators.create_proj_geom('parallel',1, \n 512, np.linspace(0, np.pi, 512, False))\n proj_id = astra.create_projector(\"cuda\",proj_geom,vol_geom)\n vol_geom_rec = astra.create_vol_geom(512,512)\n sino_id, sinogram = astra.create_sino(phantom,proj_id, gpuIndex=1)\n \n np.save(os.path.join(phantoms_folder, phantom_name.format(i)), phantom)\n np.save(os.path.join(sinogram_folder, sino_name.format(i)), sinogram)\n","sub_path":"new_rand_set.py","file_name":"new_rand_set.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"406078987","text":"from __future__ import unicode_literals\n\nfrom re_scan import Scanner\n\n\nscanner = Scanner([\n ('bold', r'\\*\\*'),\n ('link_special', r'\\[\\[(?P.*?)\\|(?P.*?)\\]\\]'),\n ('link', r'\\[\\[(.*?)\\]\\]'),\n ('underline', r'_'),\n])\n\ninput_text = 'Hello **World**! [[Stuff|extra]] _[[Stuff]]_.'\n\nfor token, match in scanner.scan_with_holes(input_text):\n if token is None:\n print('hole', match)\n else:\n print('token', (token, match.groups(),\n match.groupdict(), match.group()))\n","sub_path":"examples/wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"158526209","text":"'''\n#40 Combination Sum II\nGiven a collection of candidate numbers (C) and a target number (T), find all unique combinations in C where the candidate numbers sums to T.\n\nEach number in C may only be used once in the combination.\n\nNote:\nAll numbers (including target) will be positive integers.\nThe solution set must not contain duplicate combinations.\nFor example, given candidate set [10, 1, 2, 7, 6, 1, 5] and target 8, \nA solution set is: \n[\n [1, 7],\n [1, 2, 5],\n [2, 6],\n [1, 1, 6]\n]\n'''\nclass Solution(object):\n def combinationSum2(self, candidates, target):\n \"\"\"\n :type candidates: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n res = list()\n if len(candidates) == 0:\n return res\n candidates.sort()\n i = 0\n while i < len(candidates):\n if i > 0 and candidates[i] == candidates[i-1]: #skip the duplicate one\n i += 1\n continue\n candi = candidates[i]\n if candi > target:\n break\n if candi == target:\n temp = [candi]\n res.append(temp)\n break\n \n temp_list =list()\n temp_list.append(candi)\n temp_target = target - candi\n temp_res = self.combinationSum2(candidates[i+1:],temp_target)\n for item in temp_res:\n temp = temp_list + item\n res.append(temp)\n i += 1\n return res\n ","sub_path":"Algorithms/N40-Combination Sum2.py","file_name":"N40-Combination Sum2.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"219877633","text":"import codecs\n\nfile = codecs.open(\"TDK_kelimeler_anlamlar_ceviriler.txt\", \"r\")\n\nkac_tek_kelime = 0\n\ntr_kelimeler = []\n\nfor satir in file:\n \n parcali_satir = satir.split(\";;\")\n tr_kelime = parcali_satir[0]\n en_kelime = parcali_satir[2]\n tr_anlami = parcali_satir[1]\n \n tr_kelime_lower = tr_kelime.lower()\n en_kelime_lower = en_kelime.lower()\n \n def non_turkish(_nt_string):\n \n return _nt_string.replace(\"ç\", \"c\").replace(\"ğ\", \"g\").replace(\"ş\",\"s\").replace(\"ü\",\"u\").replace(\"ö\",\"o\").replace(\"ı\",\"i\").replace(\"â\",\"a\").replace(\"î\",\"i\").replace(\"û\",\"u\").replace(\"\\r\",\"\").replace(\"\\n\",\"\")\n \n tr_kelime_lower_edited = non_turkish(tr_kelime_lower)\n en_kelime_lower_edited = non_turkish(en_kelime_lower)\n \n \n \n if \" \" in en_kelime:\n \n continue\n \n else:\n \n if tr_kelime_lower_edited == en_kelime_lower_edited:\n \n continue\n \n else:\n \n if \"\\\"\" in en_kelime:\n \n continue\n \n else:\n \n if not \"if\" == en_kelime[:2]:\n \n if not \"don't\" == en_kelime[:5]:\n \n if not \"does'\" == en_kelime[:5]:\n \n satir_kelime_anlam_en = \"%s;;%s;;%s\" % (tr_kelime, tr_anlami, en_kelime)\n satir_kelime_anlam_en = satir_kelime_anlam_en.replace(\"\\r\",\"\").replace(\"\\n\",\"\")\n \n if tr_kelime not in tr_kelimeler: \n \n kac_tek_kelime += 1\n \n tr_kelimeler.append(tr_kelime)\n \n print(satir_kelime_anlam_en, file=codecs.open(\"TDK_tek_kelimeler_anlamlar_cevriler.txt\", \"a\"))","sub_path":"apptrogren/just_one_words.py","file_name":"just_one_words.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"626524369","text":"# -*- coding: utf-8 -*-\n\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport time\nimport os\nimport sys\n\nif sys.version_info[0] == 3:\n import urllib.request\nelse:\n import urllib\n# http://stackoverflow.com/questions/17960942/attributeerror-module-object-has-no-attribute-urlretrieve\n\n\nfrom ..drivers import Oscillo, Spectrum\nfrom .oscillo_widget import OscilloWidget\nfrom .spectrum_widget import SpectrumWidget\nfrom .connect_widget import ConnectWidget\nfrom koheron import connect\n\nclass WelcomeWidget(QtGui.QWidget):\n \"\"\" This widget allows to connect to one of the available drivers.\n \"\"\"\n def __init__(self, parent, ip_path):\n super(WelcomeWidget, self).__init__()\n\n self.parent = parent\n self.app_list = self.parent.app_list\n self.instrument_list = [''] * len(self.app_list)\n\n self.ip_path = ip_path\n self.opened = True\n self.select_opened = True\n\n # Define layouts\n self.lay = QtGui.QHBoxLayout()\n self.left_layout = QtGui.QVBoxLayout()\n self.right_layout = QtGui.QVBoxLayout()\n\n # Connection (ip address and password)\n self.connect_layout = QtGui.QVBoxLayout()\n self.connect_widget = ConnectWidget(self, self.ip_path)\n self.connect_layout.addWidget(self.connect_widget)\n\n # Select between drivers\n self.drivers_layout = QtGui.QVBoxLayout()\n\n self.app_buttons = []\n for i, app in enumerate(self.instrument_list):\n self.app_buttons.append(self.set_button(''))\n self.drivers_layout.addWidget(self.app_buttons[i], 1, QtCore.Qt.AlignCenter)\n def make_callback(i):\n return lambda : self.app_onclick(i)\n self.app_buttons[i].clicked.connect(make_callback(i))\n self.update_buttons()\n\n # Right layout\n self.right_layout.addLayout(self.connect_layout)\n self.right_layout.addLayout(self.drivers_layout)\n self.right_layout.addStretch(1)\n self.right_frame = QtGui.QFrame(self)\n self.right_frame.setFrameShape(QtGui.QFrame.StyledPanel)\n self.right_frame.setLayout(self.right_layout)\n\n # Add layouts to main layout\n self.lay.addLayout(self.left_layout, 1)\n self.lay.addWidget(self.right_frame)\n self.setLayout(self.lay)\n\n def update(self):\n pass\n\n def set_button(self, name):\n button = QtGui.QPushButton(name)\n button.setStyleSheet('QPushButton {color: green;}')\n button.setFixedWidth(200)\n button.setFixedHeight(150)\n return button\n\n def update_buttons(self):\n for i, button in enumerate(self.app_buttons):\n button.setText(self.parent.app_list[i].capitalize() +\n (' not available ' if (self.instrument_list[i] == '') else ''))\n\n def app_onclick(self, app_idx):\n app = self.app_list[app_idx]\n instrument = self.instrument_list[app_idx]\n if instrument != '':\n QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))\n self.connect_widget.client = connect(self.connect_widget.host, name=instrument)\n driver = globals()[app.capitalize()](self.connect_widget.client)\n driver.init()\n QtGui.QApplication.restoreOverrideCursor()\n index = self.parent.stacked_widget.addWidget(globals()[app.capitalize()+'Widget'](driver, self.parent))\n self.parent.stacked_widget.setCurrentIndex(index)\n","sub_path":"ldk/gui/welcome_widget.py","file_name":"welcome_widget.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"522466984","text":"import logging\r\nimport inspect\r\nimport os\r\nimport functools\r\nimport time\r\n\r\n\r\nclass EndOfNameFilter(logging.Filter):\r\n def filter(self, record):\r\n record.trunc_name = record.name[-35:]\r\n return True\r\n\r\n\r\ndef get_logger(name=None, logging_format=None):\r\n if not logging_format:\r\n json_logging_format = \"\"\"{\"class_name\": \"%(name)s\", \"time\": \"%(asctime)s\", \"level\": \"%(levelname)s\", \"message\": \"%(message)s\"}\"\"\"\r\n logging_format = (\r\n \"%(trunc_name)35.35s - %(asctime)-15s - %(levelname)5.5s: %(message)s\"\r\n )\r\n if not name:\r\n name = os.path.basename(inspect.stack()[1].filename)\r\n logger = logging.getLogger(name)\r\n if not logger.handlers:\r\n sh = logging.StreamHandler()\r\n sh.setFormatter(logging.Formatter(logging_format))\r\n logger.addHandler(sh)\r\n sh.addFilter(EndOfNameFilter())\r\n logger.setLevel(logging.INFO)\r\n logger.propagate = False\r\n return logger\r\n\r\n\r\ndef logger_wrapper(func):\r\n @functools.wraps(func)\r\n def wrapped(*args, **kwargs):\r\n if hasattr(func, \"__self__\"):\r\n if func.__self__.logger_level != \"off\":\r\n func.__self__.logger.debug(\r\n \"{func.__qualname__!s} called with arguments: {a}, and kwargs: {k}\".format(\r\n func=func,\r\n a=\", \".join([str(a) for a in args]),\r\n k=\", \".join(kwargs),\r\n )\r\n )\r\n time0 = time.time()\r\n func_out = func(*args, **kwargs)\r\n func.__self__.logger.debug(\"Function returned %s\", func_out)\r\n func.__self__.logger.debug(\r\n \"Took %ss to execute\", round(time.time() - time0, 3)\r\n )\r\n return func_out\r\n else:\r\n return func(*args, **kwargs)\r\n else:\r\n return func(*args, **kwargs)\r\n\r\n return wrapped\r\n\r\n\r\nclass test:\r\n def __init__(self, logger):\r\n self.logger = logger\r\n self.logger_level = \"DEBUG\"\r\n self.debug = True\r\n if self.debug:\r\n self.wrap_debug()\r\n\r\n def wrap_debug(self):\r\n for k in self.__dir__():\r\n v = getattr(self, k)\r\n if callable(v) and hasattr(v, \"__self__\"): # only bound methods\r\n setattr(self, k, logger_wrapper(v))\r\n\r\n def poop(self, h):\r\n time.sleep(1)\r\n return h + 1\r\n","sub_path":"src/fast_krig/_log/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"436854734","text":"import numpy as np\nfrom pennylane.operation import DiagonalOperation\nfrom . import torch_ops\nfrom . import DefaultQubit\nfrom pytorch import torch\n\n#code to check the pytorch version\n\n\n#Made some changes, make it correct.\nclass DefaultQubitTorch(DefaultQubit):\n \"\"\"Simulator plugin based on ``\"default.qubit\"``, written using PyTorch.\n\n **Short name:** ``default.qubit.torch``\n\n This device provides a pure-state qubit simulator written using PyTorch.\n As a result, it supports classical backpropagation as a means to compute the Jacobian. This can\n be faster than the parameter-shift rule for analytic quantum gradients\n when the number of parameters to be optimized is large.\n \n #Write the crct installation of Pytorch, current one is just dummy\n To use this device, you will need to install Pytorch:\n\n .. code-block:: console\n\n pip install pytoch \n\n **Example**\n\n The ``default.qubit.torch`` is designed to be used with end-to-end classical backpropagation\n (``diff_method=\"backprop\"``) with the PyTorch interface. This is the default method\n of differentiation when creating a QNode with this device.\n\n Using this method, the created QNode is a 'white-box', and is\n tightly integrated with your TensorFlow computation:\n\n >>> dev = qml.device(\"default.qubit.tf\", wires=1)\n >>> @qml.qnode(dev, interface=\"tf\", diff_method=\"backprop\")\n ... def circuit(x):\n ... qml.RX(x[1], wires=0)\n ... qml.Rot(x[0], x[1], x[2], wires=0)\n ... return qml.expval(qml.PauliZ(0))\n >>> weights = tf.Variable([0.2, 0.5, 0.1])\n >>> with tf.GradientTape() as tape:\n ... res = circuit(weights)\n >>> print(tape.gradient(res, weights))\n tf.Tensor([-2.2526717e-01 -1.0086454e+00 1.3877788e-17], shape=(3,), dtype=float32)\n\n Autograph mode will also work when using classical backpropagation:\n\n >>> @tf.function\n ... def cost(weights):\n ... return tf.reduce_sum(circuit(weights)**3) - 1\n >>> with tf.GradientTape() as tape:\n ... res = cost(weights)\n >>> print(tape.gradient(res, weights))\n tf.Tensor([-3.5471588e-01 -1.5882589e+00 3.4694470e-17], shape=(3,), dtype=float32)\n\n There are a couple of things to keep in mind when using the ``\"backprop\"``\n differentiation method for QNodes:\n\n * You must use the ``\"tf\"`` interface for classical backpropagation, as TensorFlow is\n used as the device backend.\n\n * Only exact expectation values, variances, and probabilities are differentiable.\n When instantiating the device with ``analytic=False``, differentiating QNode\n outputs will result in ``None``.\n\n\n If you wish to use a different machine-learning interface, or prefer to calculate quantum\n gradients using the ``parameter-shift`` or ``finite-diff`` differentiation methods,\n consider using the ``default.qubit`` device instead.\n\n\n Args:\n wires (int, Iterable[Number, str]): Number of subsystems represented by the device,\n or iterable that contains unique labels for the subsystems as numbers (i.e., ``[-1, 0, 2]``)\n or strings (``['ancilla', 'q1', 'q2']``). Default 1 if not specified.\n\n shots (None, int): How many times the circuit should be evaluated (or sampled) to estimate\n the expectation values. Defaults to ``None`` if not specified, which means\n that the device returns analytical results.\n If ``shots > 0`` is used, the ``diff_method=\"backprop\"``\n QNode differentiation method is not supported and it is recommended to consider\n switching device to ``default.qubit`` and using ``diff_method=\"parameter-shift\"``.\n \"\"\"\n\n name = \"Default qubit (PyTorch) Pennylane plugin\"\n short_name = \"default.qubit.torch\"\n pennylane_requires = '2'\n version = '0.0.1'\n author = 'Abhinav M Hari and Daniel Wang'\n\n parametric_ops = {\n \"PhaseShift\": torch_ops.PhaseShift,\n \"ControlledPhaseShift\": torch_ops.ControlledPhaseShift,\n \"RX\": torch.RX,\n \"RY\": torch.RY,\n \"RZ\": torch.RZ,\n \"Rot\": torch.Rot,\n \"MultiRZ\": torch.MultiRZ,\n \"CRX\": torch.CRX,\n \"CRY\": torch.CRY,\n \"CRZ\": torch.CRZ,\n \"CRot\": torch.CRot,\n \"SingleExcitation\": torch.SingleExcitation,\n \"SingleExcitationPlus\": torch.SingleExcitationPlus,\n \"SingleExcitationMinus\": torch.SingleExcitationMinus,\n \"DoubleExcitation\": torch.DoubleExcitation,\n \"DoubleExcitationPlus\": torch.DoubleExcitationPlus,\n \"DoubleExcitationMinus\": torch.DoubleExcitationMinus,\n \n }\n\n C_DTYPE = torch.complex128\n R_DTYPE = torch.float64\n #_asarray confusion, not sure\n _asarray = staticmethod(torch.tensor)\n _dot = staticmethod(lambda x, y: torch.tensordot(x, y, dim=1))\n _abs = staticmethod(torch.abs)\n #find alternative for tf.reduce_sum, or create one, \n _reduce_sum = staticmethod()\n _reshape = staticmethod(torch.reshape)\n _flatten = staticmethod(lambda tensor: torch.reshape(tensor, [-1])) #not sure\n _gather = staticmethod(torch.gather)\n _einsum = staticmethod(torch.einsum)\n _cast = staticmethod(torch.tensor) #also check the torch.to function\n _transpose = staticmethod(torch.transpose)\n _tensordot = staticmethod(torch.tensordot)\n _conj = staticmethod(torch.conj)\n _imag = staticmethod(torch.imag)\n _roll = staticmethod(torch.roll) \n _stack = staticmethod(torch.stack) #check if it is same as tf.stack)\n\n #maybe a extra static method for _asarray like in default_quibt_tf.py\n\n #special apply method\n @staticmethod\n def __init__(self, shots=1024, hardware_options=None):\n super().__init__(wires=24, shots=shots, analytic=False)\n self.hardware_options = hardware_options\n\n\n @classmethod\n def capabilities(cls):\n capabilities = super().capabilities().copy()\n capabilities.update(\n passthru_interface=\"torch\",\n supports_reversible_diff=False\n )\n return capabilities\n\n @staticmethod\n #another static method for _scatter. Don't know what to do\n\n def _get_unitary_matrix(self, unitary):\n \"\"\"Return the matrix representing a unitary operation.\n\n Args:\n unitary (~.Operation): a PennyLane unitary operation\n\n Returns:\n torch.tensor[complex] or array[complex]: Returns a 2D matrix representation of\n the unitary in the computational basis, or in the case of a diagonal unitary,\n a 1D array representing the matrix diagonal. For non-parametric unitaries,\n the return type will be a ``np.ndarray``. For parametric unitaries, a ``torch.tensor``\n object will be returned.\n \"\"\"\n op_name = unitary.name.split(\".inv\")[0]\n\n if op_name in self.parametric_ops:\n if op_name == \"MultiRz\":\n mat = self.parametric_ops[op_name](*unitary.parameters, len(unitary.wires))\n else:\n mat = self.parametric_ops[op_name](*unitary.parameters)\n\n if unitary.inverse:\n mat = self._transpose(self._conj(mat))\n\n return mat\n\n if isinstance(unitary, DiagonalOperation):\n return unitary.eigvals\n\n return unitary.matrix\n","sub_path":"pennylane/devices/default_qubit_torch.py","file_name":"default_qubit_torch.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"419998526","text":"from pysimplesoap.server import SoapDispatcher, SOAPHandler, WSGISOAPHandler\nimport logging\nimport const\nfrom BaseHTTPServer import HTTPServer\ndispatcher = SoapDispatcher(\n'TransServer',\nlocation = \"http://%s:8050/\" % const.TARGET_IP,\naction = 'http://%s:8050/' % const.TARGET_IP, # SOAPAction\nnamespace = \"http://example.com/sample.wsdl\", prefix=\"ns0\",\ntrace = True,\nns = True)\n\ndef on():\n return \"on\"\ndef off():\n return \"off\"\n\ndef status():\n return \"1024\"\n\n# register the user function\n\ndispatcher.register_function('on', on,\n args={},\n returns={'result': str} \n )\n\ndispatcher.register_function('off', off,\n args={},\n returns={'result': str} \n )\n\ndispatcher.register_function('status', status,\n args={},\n returns={'humidity': str} \n )\n\nlogging.info(\"Starting server...\")\nhttpd = HTTPServer((\"\", 8050),SOAPHandler)\nhttpd.dispatcher = dispatcher\nhttpd.serve_forever()\n\n","sub_path":"http_server_soap.py","file_name":"http_server_soap.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"8507460","text":"import ride, car, readFile, router\r\n\r\n# --------------------------------------------\r\n# Input File with Rides\r\n\r\ninputFile = 'test.in' #'b_should_be_easy.in'\r\n\r\nrides = readFile.getRides(inputFile)\r\nparameters = readFile.getParameters(inputFile)\r\n\r\nRouter = router.Router()\r\n\r\ncarList = []\r\n\r\nfor i in range (parameters[\"vehicles\"]):\r\n carList.append (car.Car(i))\r\n print(carList[len(carList)-1].id)\r\n\r\nprint(\"List populated\")\r\n\r\nfor j in range (parameters[\"steps\"]):\r\n for current_car in carList:\r\n if (current_car.carActive):\r\n current_car.moveCar()\r\n\r\n for current_ride in rides:\r\n print(\"ride_loop\")\r\n if (not current_ride.assigned()):\r\n Router.assignCar(current_ride, carList)\r\n\r\nfor vehicle in carList:\r\n print (current_car.currentLocationX)\r\n print (current_car.currentLocationY)\r\n print (\"\")\r\n\r\n# Create rides in for loop\r\n# Store in List\r\n\r\n# Loop through each ride, Loop through available cars\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"187420230","text":"import json\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport math\nimport time\nimport argparse\n\ndef ROC_Data(T0, Tf, N, stat):\n\n with open(\"all_dir/run_uniques/essentials.json\", \"r\") as f:\n essentials = json.load(f)\n AFG_PAIR = essentials[\"essentials\"][6]\n trials = essentials[\"essentials\"][7]\n \n with open(\"all_dir/Merged_jsons/Merged_Peaks.json\", \"r\") as f:\n RHO_MOD = json.load(f)\n\n PSPACE_LEN=len(AFG_PAIR)\n\n # Stats per threshold\n Detection_Prob = []\n New_False_Prob = []\n\n #sets current threshold value\n for thrshld in np.linspace(T0,Tf,N):\n\n # Detection/ False Alarm probability counters\n Detect_count = 0\n False_count = 0\n\n #loops from trial/ parameter space pairs \n for i in range(trials):\n\n # Detection Probability\n Max_FG_ij = max(RHO_MOD[str(i)][1][stat]) # max of onsources per trial\n if Max_FG_ij > thrshld:\n Detect_count += 1\n\n for j in range(PSPACE_LEN):\n\n RM_ij=np.array(RHO_MOD[str(i)][0][stat][j])\n\n # False Alarm probability\n falses_ij = len(RM_ij[RM_ij > thrshld])\n False_count += falses_ij\n\n # Detection/False Alarm probability stats\n Detect_stat = Detect_count / trials\n False_stat = False_count / (len(RHO_MOD[str(i)][0][stat][0]) * PSPACE_LEN * trials)\n\n # Appending stat per threshold\n Detection_Prob.append(Detect_stat)\n New_False_Prob.append(False_stat)\n \n return(Detection_Prob, New_False_Prob)\n\ndef ROC_Curve(N, outputfile=\"ROC_test\"):\n \n with open(\"all_dir/run_uniques/essentials.json\", \"r\") as f:\n essentials = json.load(f)\n stat_list = essentials[\"essentials\"][8]\n\n \n with open(\"all_dir/Merged_jsons/Merged_thresholds.json\", \"r\") as f:\n Thresholds = json.load(f)\n \n tempn = len(Thresholds)\n \n for s in range(tempn):\n \n thresholds = Thresholds[str(s)]\n\n Detection_Prob, New_False_Prob = ROC_Data(min(thresholds), max(thresholds), N, s)\n plt.plot(New_False_Prob, Detection_Prob, label=stat_list[s])\n\n plt.xlabel(\"New_False_Probs\")\n plt.ylabel(\"Detection_Probs\")\n plt.title(\"ROC Curve:N={}\".format(N))\n plt.legend()\n plt.savefig(\"all_dir/plots/{}.png\".format(outputfile))\n\n# def ROC_Curve(N, outputfile=\"ROC_test\"):\n\nif __name__==\"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--N', type=int)\n parser.add_argument('--outputfile', nargs='?', const=1, type=str, default=\"ROC_test\")\n args = parser.parse_args()\n\n ROC_Curve(args.N,args.outputfile)\n","sub_path":"Statistic_Analysis/ROC_Curve.py","file_name":"ROC_Curve.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"292935650","text":"\"\"\"\r\nThe view layer of the API, handle string beautify and stuff\r\n\"\"\"\r\n\r\nimport datetime\r\nimport logging\r\nimport time\r\nimport os\r\nfrom django.http import HttpResponse, JsonResponse\r\nfrom .utils import LookupNotFoundError, InvalidDateError\r\nfrom .parse import parse_merchandise, parse_retail\r\nfrom .crocs import cross_origin\r\nfrom .models import Merchandise, Retail\r\n\r\n# get the current date\r\ncurrent_date = time.strftime(\"%Y-%m-%d\")\r\ncurrent_log_file = \"{}.log\".format(current_date)\r\n\r\n# header to add to the start of log file\r\nlog_header = \"Australian Statistics API\\nLog file for date: {}\\nDeveloper Team: Eleven51\\n\\n\".format(current_date)\r\n\r\n# add header if current date's log file does not exist or is empty\r\nif not os.path.isfile(current_log_file) or os.stat(current_log_file).st_size==0:\r\n file = open(current_log_file, 'w+')\r\n file.write(log_header)\r\n file.close()\r\n\r\n# configure logging formatting\r\nlogging.basicConfig(filename=\"{}.log\".format(current_date), level=logging.DEBUG, format=\"%(asctime)s: %(levelname)s: %(message)s\")\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\n@cross_origin\r\ndef index(request):\r\n \"\"\"\r\n # Index route, only echo the request\r\n :param request: http request\r\n :return: http response\r\n \"\"\"\r\n return HttpResponse('This is the API end point v3. Request is:' + str(request))\r\n\r\n\r\n@cross_origin\r\ndef show_merchandise_data(request, categories, states=\"Total\"):\r\n \"\"\"\r\n get the request, return merchandise data\r\n :param request: contain date\r\n :param categories: Categories string\r\n :param states: str, List of states\r\n :return: JSON of merch data\r\n \"\"\"\r\n\r\n # begin timer and log request\r\n start_time = time.time()\r\n logger.info(\"New API request: {}\".format(request.get_full_path()))\r\n\r\n start_date = request.GET.get('startDate')\r\n end_date = request.GET.get('endDate')\r\n\r\n # string to list\r\n categories_list = categories.split(',')\r\n states_list = states.split(',')\r\n\r\n try:\r\n merch = Merchandise(categories_list, states_list, start_date, end_date)\r\n except (LookupNotFoundError, InvalidDateError) as error:\r\n logger.info(\"HTTP 404 ERROR: Request '{}': {}\".format(request.get_full_path(), str(error)))\r\n return JsonResponse(error.to_json(), status=404)\r\n\r\n merch_json = merch.get_json()\r\n if merch.response_status == 'error':\r\n return JsonResponse(merch_json, status=404)\r\n\r\n # end timer and log successful response\r\n end_time = time.time()\r\n ms_elapsed = (end_time - start_time)*1000\r\n\r\n result = parse_merchandise(merch_json,request, start_date, end_date, ms_elapsed)\r\n logger.info(\"HTTP 200 OK: Request '{}' successfully returned. Time taken: {}ms\".format(request.get_full_path(), ms_elapsed))\r\n\r\n return JsonResponse(result)\r\n\r\n\r\n@cross_origin\r\ndef show_retail_data(request, categories, states='AUS'):\r\n \"\"\"\r\n get the request, return retail data\r\n :param request: contain date\r\n :param categories: Categories string\r\n :param states: str, List of states\r\n :return: JSON of retail data\r\n \"\"\"\r\n\r\n # begin timer and log request\r\n start_time = time.time()\r\n logger.info(\"New API request: {}\".format(request.get_full_path()))\r\n\r\n start_date = request.GET.get('startDate')\r\n end_date = request.GET.get('endDate')\r\n\r\n # string to list\r\n categories_list = categories.split(',')\r\n states_list = states.split(',')\r\n\r\n # init a Retail Object\r\n # get the JSON file with the get_data method or something like that\r\n try:\r\n retail = Retail(categories_list, states_list, start_date, end_date)\r\n except (LookupNotFoundError, InvalidDateError) as error:\r\n logger.info(\"HTTP 404 ERROR: Request '{}': {}\".format(request.get_full_path(), str(error)))\r\n return JsonResponse(error.to_json(), status=404)\r\n\r\n retail_json = retail.get_json()\r\n if retail.response_status == 'error':\r\n return JsonResponse(retail_json, status=404)\r\n\r\n\r\n # end timer and log successful response\r\n end_time = time.time()\r\n ms_elapsed = (end_time - start_time)*1000\r\n\r\n result = parse_retail(retail_json,request, start_date, end_date, ms_elapsed)\r\n\r\n logger.info(\"HTTP 200 OK: Request '{}' successfully returned. Time taken: {}ms\".format(request.get_full_path(), ms_elapsed))\r\n\r\n return JsonResponse(result)\r\n","sub_path":"apiv3/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"511065731","text":"import json\nfrom google.cloud import bigquery\nfrom google.oauth2 import service_account\nfrom google.cloud import bigquery_storage\nfrom utils.helperutils import HelperUtils\n\nclass BiqQueryHelper:\n def __inti__(self, table_id = None, job_config = None):\n utils = HelperUtils()\n bigquery_config = utils.get_bigquery_config()\n bigquery_key = utils.get_bigquery_key()\n\n if bigquery_key != None:\n bigquery_credentials = json.loads(bigquery_key)\n self.credentials = service_account.Credentials.from_service_account_info(\n bigquery_credentials)\n self.client = bigquery.Client(\n credentials=self.credentials, project=bigquery_config['project_id'])\n self.storage_client = bigquery_storage.BigQueryReadClient(\n credentials=self.credentials)\n else:\n raise Exception('Environment variable `BIGQUERY_KEY` is not set.')\n\n self.table_id = table_id\n self.job_config = job_config\n\n def load_table(self, dataframe):\n self.job = self.client.load_table_from_dataframe(\n dataframe, self.table_id, job_config=self.job_config)\n self.job.result()\n \n def get_table(self):\n if (self.job != None and self.job.done()):\n return self.client.get_table(self.table_id)\n else:\n return None\n\n def get_dataframe_from_query(self, query):\n return self.client.query(query).result().to_dataframe(self.storage_client)\n\n def close_bigquery_client(self):\n self.client.close()","sub_path":"src/bigquery_helper.py","file_name":"bigquery_helper.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"540861841","text":"# -*- coding: utf-8 -*-\nimport subprocess\nfrom airflow import DAG\nfrom airflow.exceptions import AirflowException # signal ERROR\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.sensors import BaseSensorOperator\nfrom datetime import datetime, timedelta\n\nclass ReportsRawReadySensor(BaseSensorOperator):\n def poke(self, context):\n retcode = subprocess.call(['sudo', '--non-interactive', '/usr/local/bin/docker-trampoline', 'reports_raw_sensor',\n context['ds'], context['execution_date'].isoformat(), (context['execution_date'] + context['dag'].schedule_interval).isoformat()])\n if retcode == 42:\n return True\n elif retcode == 13:\n return False\n else:\n raise AirflowException('Unexpected `is-reports-raw-ready` exit code: {:d}'.format(retcode))\n\ndag = DAG(\n dag_id='hist_canning',\n schedule_interval=timedelta(days=1),\n start_date=datetime(2012, 12, 5),\n #end_date=datetime(2017, 7, 7), # NB: end_date is included\n default_args={\n 'email': 'leonid@openobservatory.org', # prometheus/alertmanager sends to team@ but airflow is more chatty\n 'retries': 1,\n })\n\n# NB: removing an Operator from DAG leaves some trash in the database tracking\n# old state of that operator, but it seems to trigger no issues with 1.8.0\n\nReportsRawReadySensor(task_id='reports_raw_sensor', poke_interval=5*60, timeout=12*3600, dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='canning', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='tar_reports_raw', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='reports_tgz_s3_sync', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='reports_tgz_s3_ls', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='reports_tgz_cleanup', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='canned_s3_sync', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='canned_s3_ls', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='canned_cleanup', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='autoclaving', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='meta_pg', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='reports_raw_cleanup', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='sanitised_s3_ls', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='sanitised_check', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='sanitised_cleanup', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='autoclaved_tarlz4_s3_sync', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='autoclaved_jsonl_s3_sync', bash_command='shovel_jump.sh', dag=dag)\n\ndag.set_dependency('reports_raw_sensor', 'canning')\n\ndag.set_dependency('reports_raw_sensor', 'tar_reports_raw')\ndag.set_dependency('canning', 'tar_reports_raw')\n\ndag.set_dependency('tar_reports_raw', 'reports_tgz_s3_sync')\n\ndag.set_dependency('reports_tgz_s3_sync', 'reports_tgz_s3_ls')\n\n# reports_raw_cleanup -> reports_tgz_cleanup is NOT a dependency as reports_raw_cleanup uses only index file\ndag.set_dependency('reports_tgz_s3_sync', 'reports_tgz_cleanup') # can't cleanup unless synced\ndag.set_dependency('reports_tgz_s3_ls', 'reports_tgz_cleanup') # data dependency\n\ndag.set_dependency('canning', 'canned_s3_sync')\n\ndag.set_dependency('canned_s3_sync', 'canned_s3_ls')\n\n# reports_raw_cleanup -> canned_cleanup is NOT a dependency as reports_raw_cleanup uses only index file\ndag.set_dependency('autoclaving', 'canned_cleanup') # uses `canned` data\ndag.set_dependency('tar_reports_raw', 'canned_cleanup') # may use `canned` data\ndag.set_dependency('canned_s3_sync', 'canned_cleanup') # can't cleanup unless synced\ndag.set_dependency('canned_s3_ls', 'canned_cleanup') # data dependency\n\ndag.set_dependency('canning', 'autoclaving')\n\ndag.set_dependency('autoclaving', 'meta_pg')\n\n# reports_raw_cleanup is done when both tasks are finished and have same data\n# reports_raw_cleanup does not remove unknown files as a safeguard\ndag.set_dependency('canning', 'reports_raw_cleanup')\ndag.set_dependency('tar_reports_raw', 'reports_raw_cleanup')\n\ndag.set_dependency('autoclaving', 'sanitised_check')\n\ndag.set_dependency('autoclaving', 'autoclaved_tarlz4_s3_sync')\n\ndag.set_dependency('autoclaving', 'autoclaved_jsonl_s3_sync')\n\ndag.set_dependency('autoclaving', 'sanitised_cleanup')\ndag.set_dependency('sanitised_s3_ls', 'sanitised_cleanup')\ndag.set_dependency('sanitised_check', 'sanitised_cleanup')\n","sub_path":"ansible/roles/airflow/files/airflow-dags/canning.py","file_name":"canning.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"23235774","text":"#--------------------------------\r\n#EXERCICE 2\r\n#LEONIDAS PASTRAS\r\n#P20155\r\n#18-2-2021\r\n#--------------------------------\r\nimport random\r\n\r\nn = int(input(\"Enter a number (anything over 30 might or might not crash your computer): \"))\r\ni = 1\r\nnum = [0, 1]\r\np = num[1]\r\ndef Fibonacci(i, p):\r\n if i < n:\r\n i = i + 1\r\n tempNum = num[1]\r\n num[1] = num[1] + num[0]\r\n num[0] = tempNum\r\n Fibonacci(i, p)\r\n else:\r\n CheckIfPrime(num[1])\r\ndef CheckIfPrime(p):\r\n isPrime = True\r\n j = 0\r\n while isPrime and j < 20:\r\n a = random.randint(0, 1000000) #1 million\r\n isPrime = (a ** p) % p == (a % p)\r\n UnnecessaryMessage(a, isPrime)\r\n j = j + 1\r\n if isPrime:\r\n print(\"The term\", n, \"of the Fibonacci sequence is\", p, \"and it is a Prime number! :D\")\r\n else:\r\n print(\"The term\", n, \"of the Fibonacci sequence is\", p, \"and it is NOT a Prime number! :(\")\r\ndef UnnecessaryMessage(a, isPrime):\r\n if isPrime:\r\n print(\"The random number\", a, \"does satisfy the equation\")\r\n else:\r\n print(\"The random number\", a, \"does NOT satisfy the equation\")\r\nFibonacci(i, p)","sub_path":"Code/Ex2.py","file_name":"Ex2.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"379557029","text":"from datetime import datetime\r\nimport optuna\r\nfrom joblib import dump, load\r\nfrom sklearn.metrics import mean_absolute_error, median_absolute_error, r2_score\r\nfrom SantaClaraPack.Models.PreProcessors import *\r\nfrom SantaClaraPack.Banco.Dados import Dados\r\nfrom SantaClaraPack.Optimizer.Optimizer import Optimizer\r\nfrom SantaClaraPack.Plot.Plot import Plot\r\n\r\nif __name__ == '__main__':\r\n desired_width = 320\r\n pd.set_option('display.width', desired_width)\r\n pd.set_option('display.max_columns', 100)\r\n\r\n # Avaliacao do modelo modelo com hyper-tunning\r\n dao = Dados()\r\n optimize = Optimizer()\r\n pre = PreProcessors()\r\n gridded = GriddedDataProcessor()\r\n window = WindowProcessor()\r\n\r\n study = optuna.create_study(\r\n storage='sqlite:///Optimizer/optimize_tests.db',\r\n direction='maximize',\r\n study_name='optimize',\r\n load_if_exists=True\r\n )\r\n\r\n # Load do modelo\r\n model = load(filename=r'Models/mlp_posto_1.joblib')\r\n\r\n df_resultados = pd.DataFrame()\r\n df_vaz_true = dao.get_vazao(\r\n data_inicial='2013-01-01',\r\n data_final='2017-12-30'\r\n )\r\n\r\n df_vaz_true.drop(columns=['id', 'num_posto'], inplace=True)\r\n df_vaz_true['dat_medicao'] = pd.to_datetime(df_vaz_true['dat_medicao'])\r\n df_vaz_true.set_index(keys=['dat_medicao'], inplace=True)\r\n\r\n df_chuva_true = dao.get_gridded_data(\r\n classe='Chuva',\r\n data_inicial='2013-01-01',\r\n data_final='2017-12-30',\r\n lat_inicial=-22.4,\r\n lat_final=-21.2,\r\n lon_inicial=-44.6,\r\n lon_final=-44.2,\r\n )\r\n\r\n df_chuva_true['dat_medicao'] = pd.to_datetime(df_chuva_true['dat_medicao'])\r\n df_chuva_true = gridded.transform(\r\n df=df_chuva_true,\r\n index='dat_medicao',\r\n cols=['val_lat', 'val_lon'],\r\n value='val_precip',\r\n var_name='chuva',\r\n agg='sum',\r\n )\r\n\r\n #for data in pd.date_range(start='2013-08-17', end='2017-12-30', freq='7D'):\r\n for data in pd.date_range(start='2013-08-17', end='2017-12-01', freq='7D'):\r\n\r\n data_inicial = datetime.strftime(\r\n data + pd.to_timedelta(arg=study.best_params['window_neg'], unit='D'),\r\n '%Y-%m-%d'\r\n )\r\n\r\n data_final_vazao = data + pd.to_timedelta(arg=study.best_params['window_neg'], unit='D') + \\\r\n pd.to_timedelta(arg=-study.best_params['window_neg'], unit='D')\r\n\r\n data_final = datetime.strftime(\r\n data + pd.to_timedelta(arg=10, unit='D'),\r\n '%Y-%m-%d'\r\n )\r\n\r\n df_vazao = df_vaz_true.loc[data_inicial:data_final_vazao]\r\n\r\n df_chuva_transform = df_chuva_true.loc[data_inicial:data_final]\r\n\r\n # Formando X e y\r\n X = pd.concat(objs=[df_vazao[['val_vaz_natr']], df_chuva_transform], sort=True, axis=1)\r\n\r\n i = 1\r\n # Gera previsões para t+1 a t+7 para cada data do loop anterior\r\n for data_previsao in pd.date_range(start=data, freq='1D', periods=10):\r\n\r\n X_test_lag = window.transform_predict(\r\n X=X,\r\n n_in=study.best_params['window_neg'],\r\n n_out=0\r\n )\r\n\r\n X_test_lag = pd.DataFrame(data=[X_test_lag.loc[data_previsao]], columns=X_test_lag.loc[data_previsao].index)\r\n y_hat = model.predict(X=X_test_lag)\r\n\r\n\r\n # Atualiza na base de dados\r\n X.loc[data_previsao, 'val_vaz_natr'] = y_hat\r\n\r\n # Atualiza log de resultados\r\n aux = pd.DataFrame(\r\n data=dict(\r\n dat_reference=data,\r\n dat_previsao=data_previsao,\r\n num_termo=i,\r\n val_vaz_pred=y_hat,\r\n val_vaz_true=df_vaz_true.loc[data_previsao, 'val_vaz_natr']\r\n )\r\n )\r\n\r\n df_resultados = pd.concat(objs=[df_resultados, aux], ignore_index=True)\r\n i += 1\r\n\r\n # Scores\r\n #print('MAE test: {:}'.format(mean_absolute_error(y_true=, y_pred=X_test_lag[1:, 'val_vaz_natr'])))\r\n #print('MedAE test: {:}'.format(median_absolute_error(y_true=y_test_lag, y_pred=y_hat)))\r\n #print('R2 test: {:}'.format(r2_score(y_true=y_test_lag, y_pred=y_hat)))\r\n\r\n\r\n df_resultados.to_csv(path_or_buf=r'Fig/resultados_posto_1.csv', sep=';', decimal=',')\r\n\r\n # plot\r\n #plot = Plot()\r\n #plot.plot_prediction_compararison(y_true=y_test_lag['val_vaz_natr'].values, y_pred=y_hat, times=y_test_lag.index)","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"239952320","text":"# coding=utf-8\n# 3. AFED = Accessibility of flow taking existing destinations\n\ndef AFED(flow_df, row_index): # AFAPF\n \n # rename teh columns so we can call them \n flow_df = flow_df.rename(columns = {flow_df.columns[0]:'origin_ID', \n flow_df.columns[1]:'dest_ID', \n flow_df.columns[2]:'dist', \n flow_df.columns[3]:'weight', \n flow_df.columns[4]:'dest_mass'})\n # define O and D for each row the variables\n D = flow_df['dest_ID'][row_index]\n O = flow_df['origin_ID'][row_index]\n \n # get the list of possible destinations\n all_dest = (flow_df.query('origin_ID == @O')\n .query('weight > 0')\n ['dest_ID']\n .unique()\n ) \n \n # Create all destination flows \n x1 = pd.DataFrame({'D': np.array([D]*len(all_dest), dtype=object), \n 'dests':all_dest}).merge(flow_df, how='left', left_on=['D','dests'], right_on=['origin_ID','dest_ID'])\n \n # merge with the distances and masses \n \n # Delete the flow to origin\n x1 = x1[~x1.dests.isin(list(O))] \n\n # calculate the accessibility\n A = (x1['dist']*x1['dest_mass']).sum()\n\n return A","sub_path":".ipynb_checkpoints/AFED-checkpoint.py","file_name":"AFED-checkpoint.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"649671906","text":"#!/usr/bin/python\n\n# Scrape http://auctions.strettons.co.uk and store the results\n# in a CSV file\n#\n# Syntax: scrape_auctions.py \n# If the first argument is '-', then the output is printed on the screen\n\nimport re\nimport bs4\nimport csv\nimport sys\nimport urllib2\n\n# most of the documents are generated dynamically through document.write()\n# this function extracts the strings out of document.write(), unescapes double quotes\n# and converts non-breaking spaces to normal spaces\ndef decode_document_writes(content):\n content = re.sub('^document.write\\(\"(.*)\"\\);', '\\\\1', content, 0, re.MULTILINE)\n content = re.sub('\\\\\\\\', '', content, 0, re.MULTILINE)\n # replace with a space\n content = re.sub(u'\\u00a0', ' ', content, 0, re.MULTILINE)\n\n return content\n\n# get the stripped string from a HTML element\ndef get_stripped_string(element):\n string = ''.join([ text for text in element.stripped_strings ])\n\n return string.encode('utf8')\n\n\n\ncsv_file = 'strettons.csv'\ncsv_file = open(csv_file, \"w\")\n\nbase_url = \"http://auctions.strettons.co.uk/CurrentAuction.aspx\"\n\nhomepage = urllib2.urlopen(base_url)\nsoup = bs4.BeautifulSoup(homepage)\n\n# there are multiple scripts that render the auction pages\n# first, main_script is loaded, which in turn loads auction_script\nmain_script_element = soup.find('script')\nmain_script_url = main_script_element.get('src')\nmain_script = urllib2.urlopen(main_script_url)\nmain_script_content = main_script.read().decode('utf8')\n\n# auction_script generates the actual list of actions on the home page\nauction_script_url_matches = re.search(\"http.*auction/[0-9]+/\", main_script_content)\nauction_script_url = auction_script_url_matches.group()\nauction_script = urllib2.urlopen(auction_script_url)\nauction_script_content = auction_script.read().decode('utf8')\nauction_script_content = decode_document_writes(auction_script_content)\n\n# details_script contains all the details of an auction\ndetails_script_url_matches = re.search(\"http.*/lot/\", main_script_content)\ndetails_script_url = details_script_url_matches.group()\n\nsoup = bs4.BeautifulSoup(auction_script_content)\n\n# all the auction information. it is not really required,\n# but can be helpful if the script needs to be extended\nauctions = []\nfor tr in soup.find_all('tr'):\n tds = tr.find_all('td')\n if (len(tds) != 3):\n continue\n\n lot_td, link_td, _ = tds\n\n # lot num is easier to get here\n lot_num = get_stripped_string(lot_td)\n\n # link to the particular auction page\n link_a = link_td.find('a')\n link = link_a.get('href')\n\n # extra parameters that may be pased\n lid = re.sub('.*lid=([0-9]+).*', '\\\\1', link)\n tid = re.sub('.*tid=([^&]*).*', '\\\\1', link)\n\n # logic copied from the javascript\n if tid == link:\n tid = '9'\n\n # last part of the URL\n urlpart = lid + \"/\" + tid\n\n auction = {\n 'lot_num': lot_num,\n 'url': details_script_url + urlpart + '?src=null'\n }\n\n auctions += [ auction ]\n\nwriter = csv.writer(csv_file)\n\n# load the auction page, get the timings and print the result\nfor auction in auctions:\n url = auction['url']\n auction_page = urllib2.urlopen(url)\n auction_content = auction_page.read().decode('utf8')\n auction_content = decode_document_writes(auction_content)\n\n soup = bs4.BeautifulSoup(auction_content)\n lot_address_div = soup.find(class_ = 'lotaddress')\n lot_address = get_stripped_string(lot_address_div)\n\n timing_ps = soup.find_all(class_ = 'red')\n\n timings = []\n for timing_p in timing_ps:\n timings += [ get_stripped_string(timing_p) ]\n\n auction['address'] = lot_address\n auction['timings'] = timings\n\n writer.writerow([ auction['lot_num'], auction['address'] ] + auction['timings'])\n csv_file.flush()\n\n","sub_path":"ScrapeScripts/strettons.py","file_name":"strettons.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"383224736","text":"# Copyright 2015 Cisco Systems, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom nose.tools import *\nfrom ucsmsdk.ucsfilter import generate_infilter\nfrom ucsmsdk.ucsxmlcodec import to_xml_str\n\n\ndef test_001_not_filter():\n\n expected = b' '\n\n filter_str = 'not (dn,\"org-root/ls-C1_B1\", type=\"eq\")'\n filter_xml = generate_infilter(class_id=\"LsServer\",\n filter_str=filter_str,\n is_meta_class_id=True)\n\n xml_str = to_xml_str(filter_xml.to_xml())\n\n assert_equal(xml_str, expected)\n","sub_path":"tests/common/test_generate_filter.py","file_name":"test_generate_filter.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"294029695","text":"from flask import jsonify\n\nclass SecurityManager__Utils(object):\n\n def _prepare_create_user_args(self, **kwargs):\n \"\"\"Checking if specified roles exist\"\"\"\n roles = kwargs.get(\"roles\", [])\n for i, role in enumerate(roles):\n rn = role.name if isinstance(role, self.role_model) else role\n roles[i] = self.find_role(rn)\n kwargs[\"roles\"] = roles\n return kwargs\n\n def _json_response(self, status, redirect_addr, desc):\n resp = {}\n resp['status'] = status\n resp['redirect_addr'] = redirect_addr\n resp['desc'] = desc\n return jsonify(resp)\n","sub_path":"flask_security/security_manager__utils.py","file_name":"security_manager__utils.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"484307196","text":"\n\n#calss header\nclass _CIRCUMNAVIGATE():\n\tdef __init__(self,): \n\t\tself.name = \"CIRCUMNAVIGATE\"\n\t\tself.definitions = [u'to sail all the way around something: ', u'to move around something in order to avoid hitting it: ', u'to avoid something by taking a particular course of action: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_circumnavigate.py","file_name":"_circumnavigate.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"76418375","text":"#!/usr/bin/python3\n\nimport scapy\nfrom scapy.all import send, conf, L3RawSocket\n\ndef inject_pkt(pkt):\n #import dnet\n #dnet.ip().send(pkt)\n conf.L3socket=L3RawSocket\n send(pkt)\n\n######\n# edit this function to do your attack\n######\ndef handle_pkt(pkt):\n identifyServer = str(pkt[30])+\".\"+str(pkt[31])+\".\"+str(pkt[32])+\".\"+str(pkt[33])\n if identifyServer == \"18.234.115.5\" and pkt.find(b'GET')!=-1: \n numberSeq = int(pkt[38:42].hex(),16)\n numberAck = int(pkt[42:46].hex(),16)\n portDestination = int(pkt[34:36].hex(),16)\n final_IP = str(pkt[26])+\".\"+str(pkt[27])+\".\"+str(pkt[28])+\".\"+str(pkt[29])\n payloadFinal = 'HTTP/1.1 200 OK\\r\\nServer: nginx/1.14.0 (Ubuntu)\\r\\nContent-Type: text/html; charset=UTF-8\\r\\nContent-Length: 335\\r\\nConnection: close\\r\\n\\r\\n\\n\\n Free AES Key Generator! \\n\\n\\nFree AES Key Generator! \\nDefinitely not run by the NSA. \\n \\n \\nYour free AES-256 key: 4d6167696320576f7264733a2053717565616d697368204f7373696672616765 \\n\\n'\n packet = IP(src=\"18.234.115.5\", dst=final_IP)/TCP(sport=80, dport=portDestination, flags=\"PA\", seq = numberAck , ack=numberSeq+1)/payloadFinal\n inject_pkt(packet)\n\ndef main():\n import socket\n s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, 0x0300)\n while True:\n pkt = s.recv(0xffff)\n handle_pkt(pkt)\n\nif __name__ == '__main__':\n main()\n","sub_path":"attack.py","file_name":"attack.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"417834292","text":"from operator import itemgetter\nimport csv\nreader = csv.reader(open(\"part_00000\"), delimiter=\"\\t\")\n\nf = open(\"sorted_data.tsv\", \"w\")\nf.write('%s\\t%s\\n'%(\"text\", \"size\"))\nfor line in sorted(reader, key=lambda row: int(row[1]), reverse=True):\n f.write( '%s\\t%s\\n' %(line[0],line[1]) ) # str() converts to string\nf.close()\n #print '%s\\t%s' %(line[0],line[1])\n","sub_path":"tsv_sort.py","file_name":"tsv_sort.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"453997992","text":"\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot\nimport argparse\nimport logging\nfrom mdtools import dr, get_object\nimport scipy.integrate\n\nfrom scipy.optimize import minimize\nimport pymbar\nimport time\n\nfrom mdtools import ParallelTool\n\nfrom constants import k\n\nfrom whamutils import *\n\nimport matplotlib as mpl\n\nimport matplotlib.pyplot as plt\n\n\nlog = logging.getLogger('mdtools.whamerr')\n\nfrom IPython import embed\n\n\ndef _subsample_small(bias_mat, uncorr_n_samples, uncorr_n_tot, n_samples, n_windows):\n\n # sub sample the bias matrix according to the number of uncorrelated samples from each window\n uncorr_bias_mat = np.zeros((uncorr_n_tot, n_windows), dtype=np.float32)\n start_idx = 0\n uncorr_start_idx = 0\n subsampled_indices = np.array([], dtype=int)\n\n np.random.seed()\n\n for i, this_uncorr_n_sample in enumerate(uncorr_n_samples):\n # the total number of (correlated) datapoints for this window\n this_n_sample = n_samples[i]\n avail_indices = np.arange(this_n_sample)\n # subsampled indices for this data\n this_indices = start_idx + np.random.choice(avail_indices, size=this_uncorr_n_sample, replace=True)\n subsampled_indices = np.append(subsampled_indices, this_indices)\n\n uncorr_bias_mat[uncorr_start_idx:uncorr_start_idx+this_uncorr_n_sample, :] = bias_mat[this_indices, :]\n \n uncorr_start_idx += this_uncorr_n_sample\n start_idx += this_n_sample\n\n\n return (uncorr_bias_mat, subsampled_indices)\n\ndef _subsample(bias_mat, uncorr_n_samples, autocorr_blocks, n_samples):\n\n # sub sample the bias matrix according to the number of uncorrelated samples from each window\n\n n_windows = n_samples.size\n\n ## Number of samples for each window, after selecting n_uncorr_sample of block size for each window\n n_blocked_sample = uncorr_n_samples*autocorr_blocks\n\n # sanity\n assert n_blocked_sample.sum() <= n_samples.sum()\n\n uncorr_bias_mat = np.zeros((n_blocked_sample.sum(), n_windows), dtype=np.float32)\n start_idx = 0\n uncorr_start_idx = 0\n subsampled_indices = np.array([], dtype=int)\n\n np.random.seed()\n\n # number of blocks (of size autocorr_blocks[i]) we'll select from this window\n for i, this_n_blocks in enumerate(uncorr_n_samples):\n \n this_block_size = autocorr_blocks[i]\n # the total number of (correlated) datapoints for this window\n this_n_sample = n_samples[i]\n\n ## Number of blocks times block size for this sample\n this_n_blocked_sample = n_blocked_sample[i]\n assert this_n_blocked_sample == this_n_blocks * this_block_size\n\n ## Possible starting indices for each block - these are local\n avail_indices = np.arange(this_n_sample-this_block_size+1)\n # Global indices of this window we have to choose from\n total_indices = start_idx + np.arange(this_n_sample)\n\n # (local) starting and ending indices for each block\n this_start_indices = np.random.choice(avail_indices, size=this_n_blocks, replace=True)\n this_end_indices = this_start_indices + this_block_size\n\n ## Now make up the indices of this bootstrap subsample\n this_indices = np.array([], dtype=int)\n for this_start, this_end in zip(this_start_indices, this_end_indices):\n this_indices = np.append(this_indices, total_indices[slice(this_start, this_end)])\n\n assert this_indices.min() >= start_idx\n assert this_indices.max() < start_idx + this_n_sample\n\n assert this_indices.size == this_n_blocked_sample\n\n subsampled_indices = np.append(subsampled_indices, this_indices)\n\n uncorr_bias_mat[uncorr_start_idx:uncorr_start_idx+this_n_blocked_sample, :] = bias_mat[this_indices, :]\n \n uncorr_start_idx += this_n_blocked_sample\n start_idx += this_n_sample\n\n\n return (uncorr_bias_mat, subsampled_indices)\n\n\n# Do a batch of bootstrap subsampling\ndef _bootstrap(lb, ub, bias_mat, uncorr_n_samples, autocorr_blocks, n_samples, \n ones_m, ones_n, xweights, all_data, all_data_aux, boot_fn=None):\n \n # Number of bootstrap runs to do this round\n batch_size = ub - lb\n\n n_windows = n_samples.size\n assert n_windows == uncorr_n_samples.size == autocorr_blocks.size\n\n ## Number of samples for each window, after selecting n_uncorr_sample of block size for each window\n n_blocked_sample = uncorr_n_samples*autocorr_blocks\n\n\n # Results for this bootstrap batch\n f_k_ret = np.zeros((batch_size, n_windows), dtype=np.float32)\n boot_fn_ret = np.zeros(batch_size, dtype=object)\n\n # Subsampled indices for each bootstrap in this batch\n all_boot_indices = np.zeros((batch_size, n_blocked_sample.sum()), dtype=int)\n\n for batch_num in range(batch_size):\n\n ## Fill up the uncorrelated bias matrix\n boot_uncorr_bias_mat, boot_indices = _subsample(bias_mat, uncorr_n_samples, autocorr_blocks, n_samples)\n\n myargs = (boot_uncorr_bias_mat, n_blocked_sample/n_blocked_sample.sum(), n_blocked_sample.sum())\n boot_f_k = np.append(0, minimize(eval_fn, xweights, method='BFGS', jac=True, args=myargs).x)\n\n f_k_ret[batch_num,:] = boot_f_k\n all_boot_indices[batch_num,:] = boot_indices\n \n if boot_fn is not None:\n boot_logweights = gen_data_logweights(boot_uncorr_bias_mat, boot_f_k, n_samples, ones_m, ones_n[boot_indices])\n boot_fn_ret[batch_num] = boot_fn(all_data, all_data_aux, boot_indices, boot_logweights)\n del boot_logweights\n\n del boot_uncorr_bias_mat\n\n return (f_k_ret, boot_fn_ret, all_boot_indices, lb, ub)\n\n\nclass WHAMmer(ParallelTool):\n prog='WHAM/MBAR analysis'\n description = '''\\\nPerform MBAR/Binless WHAM analysis on 'phiout.dat' or '*.xvg' datasets (e.g. from alchemical FE cals with GROMACS).\nNote that XVG type datasets must contain DeltaU for *every* other window (not just the adjacent window(s), as\n is required by 'g_bar', which uses TI, not MBAR).\n\nAlso perform bootstrapping standard error analysis - must specify an autocorrelation time for this to work correctly!\n\nThis tool supports parallelization (see options below)\n\n\n-----------------------------------------------------------------------------\nCommand-line options\n-----------------------------------------------------------------------------\n'''\n \n def __init__(self):\n super(WHAMmer,self).__init__()\n \n # Parallel processing by default (this is not actually necessary, but it is\n # informative!)\n self.wm_env.default_work_manager = self.wm_env.default_parallel_work_manager\n\n self.beta = 1\n\n self.n_bootstrap = None\n\n self.output_filename = None\n\n self.start_weights = None\n\n self.boot_fn = None\n \n self.data_extractor = None\n\n self.db = None\n self.n_bins = None\n\n # Total number of samples - sum of n_samples from each window\n @property\n def n_tot(self):\n return self.n_samples.sum()\n \n \n def add_args(self, parser):\n \n sgroup = parser.add_argument_group('(Binless) WHAM/MBAR error options')\n sgroup.add_argument('input', metavar='INPUT', type=str, nargs='+',\n help='Input file names')\n sgroup.add_argument('--fmt', type=str, choices=['phi', 'pmf', 'alc', 'chi', 'both'], default='phi',\n help='Format of input data files: \\'phi\\' for phiout.dat; \\ '\n '\\'pmf\\' for PMF type files (specify kappa (kJ/mol*nm2), rstar in header); \\ '\n '\\'alc\\' for alchemical type files (each column k gives biasing potential for this datapoint under kth window); \\ '\n '\\'both\\' for phi AND chi data (in pairs, phidat first)')\n sgroup.add_argument('--phisub', action='store_true',\n help='If true, expect two phiout files per window (main volume and subvol, resp.)')\n sgroup.add_argument('--auxfiles', metavar='AUXINPUT', type=str, nargs='+',\n help='(Optional): Aux data file names, same number of data points as inputs')\n sgroup.add_argument('-b', '--start', type=int, default=0,\n help='first timepoint (in ps) - default is first available time point') \n sgroup.add_argument('-e', '--end', type=int, default=None,\n help='last timepoint (in ps) - default is last available time point')\n sgroup.add_argument('--skip', type=int, default=None,\n help='Skip this many picoseconds from each dataset. --autocorr option WILL BE IGNORED if this option is used')\n sgroup.add_argument('-T', metavar='TEMP', type=float,\n help='convert Phi values to kT, for TEMP (K)')\n sgroup.add_argument('--start-weights', type=str,\n help='Starting weights, if known')\n sgroup.add_argument('--bootstrap', type=int, default=1000,\n help='Number of bootstrap samples to perform') \n sgroup.add_argument('--autocorr', '-ac', type=float, help='Autocorrelation time (in ps); this can be \\ '\n 'a single float, or one for each window') \n sgroup.add_argument('--autocorr-file', '-af', type=str, \n help='Name of autocorr file (with times in ps for each window), if previously calculated')\n sgroup.add_argument('--n-bins', type=int, default=201, help='number of bins, if plotting prob dist (default 25)')\n sgroup.add_argument('--db', type=float, default=1.0, help='bin width, for KL tests')\n sgroup.add_argument('--start-weights', type=str, default=None,\n help='(optional) previously calculated f_k file for INDUS simulations - \\ '\n 'if \\'phi\\' format option also supplied, this will calculate the Pv(N) (and Ntwid). \\ '\n 'For \\'xvg\\' formats, this will calculate the probability distribution of whatever \\ '\n 'variable has been umbrella sampled')\n sgroup.add_argument('--boot-fn', default=None, \n help='function, loaded from file of the form \\'module.function\\', to be performed \\ '\n 'during each bootstrap iteration. If provided, the function is called during each bootstrap as: \\ '\n 'fn(all_data, all_data_N, boot_indices, boot_logweights) where boot_indices corresponds to the indices\\ '\n 'of this selected bootstrap sample, and boot_logweights are the corresponding (log of) statistical weights \\ '\n 'for each bootstrap sample calculated with WHAM/MBAR.')\n\n ## Hackish way to 'inheret' attributes from the data extractor\n def __getattr__(self, attr):\n if self.data_extractor is not None:\n return getattr(self.data_extractor, attr)\n\n def process_args(self, args):\n\n self.beta = 1\n if args.T:\n self.beta /= (args.T * k)\n\n self.db = args.db\n self.n_bins = args.n_bins\n\n # Number of bootstrap samples to perform\n self.n_bootstrap = args.bootstrap\n\n if args.start_weights:\n self.start_weights = np.loadtxt(args.start_weights)\n log.info(\"starting weights: {}\".format(self.start_weights))\n\n if args.autocorr_file is not None:\n auto = args.autocorr_file\n else:\n auto = args.autocorr\n\n\n\n if args.auxfiles is not None:\n auxfiles = args.auxfiles\n else:\n auxfiles = None\n\n log.info(\"Extracting data...\")\n #embed()\n if args.fmt != 'both':\n self.data_extractor = WHAMDataExtractor(np.sort(args.input), auxinfiles=auxfiles, fmt=args.fmt, phisub=args.phisub, start=args.start, end=args.end, beta=self.beta)\n\n else:\n log.info(\"Extracting pairs of phi, chi data!!!\")\n log.info(\"WARNING: bootstrapping analysis is not yet implemented for 2D. Also, this may take a *long* time.\")\n self.data_extractor = WHAMDataExtractor(np.sort(args.input)[::2], auxinfiles=auxfiles, fmt='phi', phisub=args.phisub, start=args.start, end=args.end, beta=self.beta)\n chi_data_extractor = WHAMDataExtractor(np.sort(args.input)[1::2], fmt='chi', start=args.start, end=args.end, beta=self.beta)\n\n self.data_extractor.bias_mat += chi_data_extractor.bias_mat\n\n self.n_bootstrap = 0\n\n self.data_extractor.all_data = np.stack((self.data_extractor.all_data, chi_data_extractor.all_data)).T\n if auto is not None:\n min_idx = self.autocorr < auto\n self.autocorr[min_idx] = auto\n\n log.info(\"Tau for each window: {} ps\".format(self.autocorr))\n log.info(\"data time step: {} ps\".format(self.ts))\n log.info(\"autocorr nsteps: {}\".format(self.autocorr_blocks)) \n\n if args.boot_fn is not None:\n self.boot_fn = get_object(args.boot_fn)\n\n def go(self):\n #embed()\n if self.start_weights is not None:\n log.info(\"using initial weights: {}\".format(self.start_weights))\n f_k_sub = self.start_weights\n else:\n xweights = np.zeros(self.n_windows)\n\n assert xweights[0] == 0\n\n log.info(\"Quick sub-sampled MBAR run\")\n \n # Only grab n_uncorr_samples samples from each window for a quick WHAM run to get an initial guess\n # for weights {f_k}\n uncorr_bias_mat, subsampled_indices = _subsample_small(self.bias_mat, self.uncorr_n_samples, self.uncorr_n_tot, self.n_samples, self.n_windows)\n \n #myargs = (uncorr_bias_mat, self.uncorr_n_sample_diag, self.uncorr_ones_m, self.uncorr_ones_n, self.uncorr_n_tot)\n myargs = (uncorr_bias_mat, self.uncorr_n_samples / self.uncorr_n_tot, self.uncorr_n_tot)\n f_k_sub = minimize(eval_fn, xweights[1:], method='BFGS', args=myargs, jac=True).x\n #f_k_sub = minimize(kappa, xweights[1:], method='L-BFGS-B', args=myargs, jac=grad_kappa).x\n f_k_sub = np.append(0, f_k_sub)\n log.info(\"subsampled MBAR results: {}\".format(f_k_sub))\n\n log.info(\"Running MBAR on entire dataset\")\n log.info(\"...(this might take awhile)\")\n myargs = (self.bias_mat, self.n_samples / self.n_tot, self.n_tot)\n \n f_k_actual = minimize(eval_fn, f_k_sub[1:], method='BFGS', tol=1e-5, args=myargs, jac=True, callback=callbackF).x\n #f_k_actual = minimize(kappa, f_k_sub[1:], method='L-BFGS-B', args=myargs, jac=grad_kappa, callback=callbackF).x\n f_k_actual = np.append(0, f_k_actual)\n log.info(\"MBAR results on entire dataset: {}\".format(f_k_actual))\n\n np.savetxt('f_k_all.dat', f_k_actual, fmt='%3.6f')\n\n \n # Log of each datapoint's statistical weight. Note this accounts for statistical inefficiency in samples\n all_logweights = gen_data_logweights(self.bias_mat, f_k_actual, self.n_samples, self.ones_m, self.ones_n)\n \n \n np.savez_compressed('all_data.dat', logweights=all_logweights, data=self.all_data, data_aux=self.all_data_aux, bias_mat=self.bias_mat, n_samples=self.n_samples)\n #embed()\n # Now for bootstrapping...\n n_workers = self.work_manager.n_workers or 1\n #batch_size = self.n_bootstrap // n_workers\n batch_size = 1\n if self.n_bootstrap % n_workers != 0:\n batch_size += 1\n log.info(\"batch size for bootstrap: {}\".format(batch_size))\n\n # the bootstrap estimates of free energies wrt window i=0\n f_k_boot = np.zeros((self.n_bootstrap, self.n_windows), dtype=np.float32)\n boot_indices = np.zeros((self.n_bootstrap, (self.autocorr_blocks*self.uncorr_n_samples).sum()), dtype=int)\n # Results of hook function, if desired\n boot_res = np.zeros(self.n_bootstrap, dtype=object)\n\n def task_gen():\n \n if __debug__:\n checkset = set()\n for lb in range(0, self.n_bootstrap, batch_size):\n ub = min(self.n_bootstrap, lb+batch_size)\n \n if __debug__:\n checkset.update(set(range(lb,ub)))\n\n args = ()\n kwargs = dict(lb=lb, ub=ub, bias_mat=self.bias_mat, uncorr_n_samples=self.uncorr_n_samples, \n autocorr_blocks=self.autocorr_blocks, n_samples=self.n_samples, ones_m=self.ones_m,\n ones_n=self.ones_n,xweights=f_k_actual[1:], all_data=self.all_data,\n all_data_aux=self.all_data_aux, boot_fn=self.boot_fn)\n log.info(\"Sending job batch (from bootstrap sample {} to {})\".format(lb, ub))\n yield (_bootstrap, args, kwargs)\n\n\n log.info(\"Beginning {} bootstrap iterations\".format(self.n_bootstrap))\n # Splice together results into final array of densities\n for future in self.work_manager.submit_as_completed(task_gen(), queue_size=self.max_queue_len):\n f_k_slice, boot_res_slice, this_boot_indices, lb, ub = future.get_result(discard=True)\n log.info(\"Receiving result\")\n f_k_boot[lb:ub, :] = f_k_slice\n log.debug(\"this boot weights: {}\".format(f_k_slice))\n boot_res[lb:ub] = boot_res_slice\n boot_indices[lb:ub, :] = this_boot_indices\n del f_k_slice\n\n # Get SE from bootstrapped samples\n f_k_boot_mean = f_k_boot.mean(axis=0)\n f_k_se = np.sqrt(f_k_boot.var(axis=0))\n print('f_k (boot mean): {}'.format(f_k_boot_mean))\n print('f_k: {}'.format(f_k_actual))\n print('se: {}'.format(f_k_se))\n np.savetxt('err_f_k.dat', f_k_se, fmt='%3.6f')\n np.savetxt('boot_f_k.dat', f_k_boot)\n np.save('boot_indices.dat', boot_indices)\n\n print(\"saving bootstrap fn output...\")\n if self.boot_fn is not None:\n np.save('boot_fn_payload.dat', boot_res)\n\n log.info(\"Performing cross-entropy post-analysis\")\n print(\"...Done.\")\n \n bins = np.arange(np.floor(self.all_data.min()), np.ceil(self.all_data.max())+self.db, self.db)\n #bins = np.linspace(np.floor(self.all_data.min()), np.ceil(self.all_data.max()), self.n_bins)\n #embed()\n entropies = np.zeros(self.n_windows)\n\n fvn = get_negloghist(self.all_data, bins, all_logweights)\n mask = ~np.ma.masked_invalid(fvn).mask\n \n if self.fmt == 'phi': #or self.fmt == 'alc':\n\n for i, (data_slice, bias_slice) in enumerate(self.data_extractor.gen_obs_data()):\n #embed()\n this_n_sample = self.n_samples[i]\n obs_hist, _ = np.histogram(data_slice, bins=bins)\n occ = obs_hist > 0\n this_mask = mask&occ\n\n if self.fmt == 'phi':\n this_kappa = self.kappas[i]\n this_nstar = self.Nstars[i]\n this_phi = self.phis[i]\n\n # Get consensus hist for this window\n bias_logweights = all_logweights - 0.5*self.beta*this_kappa*(self.all_data - this_nstar)**2 - this_phi*self.all_data\n \n elif self.fmt == 'alc':\n bias_logweights = all_logweights - bias_slice\n\n bias_logweights -= bias_logweights.min()\n fvn_bias = get_negloghist(self.all_data, bins, bias_logweights)\n\n this_eta = (obs_hist / this_n_sample) * (np.log(obs_hist / this_n_sample) + fvn_bias)\n\n entropies[i] = this_eta[this_mask].sum()\n\n print(\"{}th window consensus: {:.2e}\".format(i, this_eta[this_mask].sum()))\n\n np.savetxt('kl_entropies.dat', np.dstack((self.kappas, self.Nstars, self.phis, entropies))[0], header='kappa(kJ/mol) Nstar phi(kJ/mol) KL_div')\n\n \n bins = np.arange(self.all_data_aux.max()+2)\n fvn = get_negloghist(self.all_data_aux, bins, all_logweights)\n mask = ~np.ma.masked_invalid(fvn).mask\n wt = np.exp(-fvn[mask])\n wt /= wt.sum()\n avg_n = np.dot(wt, bins[:-1][mask])\n avg_n_sq = np.dot(wt, bins[:-1][mask]**2)\n var_n = avg_n_sq - avg_n**2\n\n np.savetxt(\"fvn.dat\", np.stack((bins[:-1], fvn)).T, header='N beta Fv(N) : {:.2f} : {:.4f}'.format(avg_n, var_n), fmt='%f')\n\n\n elif self.fmt == 'alc':\n np.savetxt(\"fvn.dat\", np.stack((bins[:-1], fvn)).T)\n cum_n_samples = np.append(0, np.cumsum(self.n_samples))\n for i in range(self.n_windows):\n #embed()\n start_idx = cum_n_samples[i]\n end_idx = cum_n_samples[i+1]\n\n data_slice = self.all_data[start_idx:end_idx]\n this_n_sample = self.n_samples[i]\n obs_hist, _ = np.histogram(data_slice, bins=bins)\n occ = obs_hist > 0\n this_mask = mask&occ\n\n bias_logweights = all_logweights - self.bias_mat[:, i]\n\n bias_logweights -= bias_logweights.min()\n fvn_bias = get_negloghist(self.all_data, bins, bias_logweights)\n\n this_eta = (obs_hist / this_n_sample) * (np.log(obs_hist / this_n_sample) + fvn_bias)\n\n entropies[i] = this_eta[this_mask].sum()\n\n print(\"{}th window consensus: {:.2e}\".format(i, this_eta[this_mask].sum()))\n\n\n\nif __name__=='__main__':\n WHAMmer().main()\n\n\n \n","sub_path":"whamerr.py","file_name":"whamerr.py","file_ext":"py","file_size_in_byte":21675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"90247582","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom random import *\nfrom tkinter import filedialog\nfrom tkinter.filedialog import asksaveasfile\nimport copy\nre_get=0\ni=0\ngot_i=0\nroot=Tk()\nroot.iconbitmap(r'C:\\Users\\rina\\Downloads\\randomprogram--main\\tennoji.ico')\nlistbox=Listbox(root,selectmode=EXTENDED)\nlistbox_yougot=Listbox(root,selectmode=EXTENDED)\nroot.title(\"Rinachan Lots\")\nroot.geometry(\"700x700\")\nroot.resizable(0,0)\nkeys={}\nyougots={}\ndef download():\n if len(keys)==0:\n messagebox.showwarning(\"error!\", \"please input value more than 1.\")\n return\n f = asksaveasfile(mode='w', defaultextension=\".txt\")\n if f is None: # asksaveasfile return `None` if dialog closed with \"cancel\".\n return\n k_keys=[]\n for kk in keys.keys():\n k_keys.append(int(kk))\n for i in range(len(k_keys)):\n text2save = keys[k_keys[i]]+'\\n'\n f.write(text2save)\n f.close()\ndef openfile():\n filename = filedialog.askopenfilename()\n\n fileHandle = open(filename, 'r')\n getFromFile=(fileHandle.read()).split('\\n')\n global i\n for j in range(len(getFromFile)):\n print(getFromFile[j])\n print(i)\n if(getFromFile[j]==''):\n continue\n newArray = []\n for z in keys.keys():\n newArray.append(int(z))\n newArray.sort()\n print(newArray)\n if i in newArray:\n i = 0\n for k in newArray:\n if (i == int(k)):\n i += 1\n elif (i != int(k)):\n break\n print(i)\n keys[i] = getFromFile[j]\n listbox.insert(i, getFromFile[j])\n input_box.delete(\"0\", END)\n i += 1\n print(keys)\n\n fileHandle.close()\ndef random_select():\n if int(len(keys))==0:\n messagebox.showwarning(\"error!\", \"please input value more than 1.\")\n return\n global i\n check_value=[]\n for keysss in keys.keys():\n check_value.append(int(keysss))\n print(check_value)\n keys_value=check_value[randrange(0,len(check_value))]\n print(keys_value)\n newArray=[]\n global got_i\n for j in yougots.keys():\n newArray.append(int(j))\n if got_i in newArray:\n got_i=0\n print(\"hi\")\n for k in newArray:\n if(got_i==int(k)):\n got_i+=1\n elif(got_i!=int(k)):\n break\n global re_get\n re_get=check_value\n yougots[got_i]=keys[keys_value]\n listbox_yougot.insert(got_i,yougots[got_i])\n messagebox.showinfo(\"What You Get!\", keys[keys_value])\n var.set(f\"What You Got: {keys[keys_value]}\")\n got_i+=1\n print(keys.keys())\n print(1)\n\ndef delete_all():\n if int(len(keys))==0:\n messagebox.showwarning(\"error!\", \"lot box is empty!.\")\n return\n listbox.delete(0,END)\n global i\n i=0\n keys.clear()\ndef delete_button():\n try:\n selected = listbox.get(listbox.curselection())\n print(selected)\n global i\n i=i-1\n for key,value in keys.items():\n if value==selected:\n key_find=key\n del keys[key_find]\n listbox.delete(ANCHOR)\n except:\n return\n\ndef insert_fun(event):\n if(Entry.get(input_box)==\"\"):\n messagebox.showwarning(\"error!\",\"please input value\")\n return\n global i\n newArray=[]\n for j in keys.keys():\n newArray.append(int(j))\n newArray.sort()\n if i in newArray:\n print(\"3\")\n i=0\n for k in newArray:\n if(i==int(k)):\n\n i+=1\n elif(i!=int(k)):\n break\n\n keys[i]=Entry.get(input_box)\n listbox.insert( i,Entry.get(input_box))\n i = i + 1\n input_box.delete(\"0\",END)\n print(keys)\n\ninput_box = Entry(root, width=28)\nfileUpload_Button=Button(width=10,text=\"upload lots\",command=openfile)\nfileDownload_Button=Button(width=10,text=\"download lots\",command=download)\ndelete=Button(width=10,text=\"delete this\",command=delete_button)\ndelete.place(x=50,y=625)\nfileUpload_Button.place(x=160,y=650)\ndelete_all=Button(width=10,text=\"delete All\",command=delete_all)\nfileDownload_Button.place(x=50,y=650)\ndelete_all.place(x=160,y=625)\ninput_box.place(x=255,y=20)\nrandom=Button(width=10,text=\"Drawing Lots\",command=random_select)\nrandom.place(x=300,y=60)\nlistbox.place(x=50, y=100, width=200, height=500)\nlistbox_yougot.place(x=450,y=100,width=200,height=500)\nvar=StringVar()\nText_Slot=Label(root,textvariable=var,font=('Arial',10))\nText_Slot2=Label(root,text=\"Type and press enter!\",font=('Arial',10))\ndef delete_got():\n try:\n selected = listbox_yougot.get(listbox_yougot.curselection())\n print(selected)\n global got_i\n got_i=got_i-1\n for key,value in yougots.items():\n if value==selected:\n key_find=key\n del yougots[key_find]\n listbox_yougot.delete(ANCHOR)\n print(yougots.keys())\n except:\n return\n\ndelete_got=Button(width=10,text=\"delete this lot\",command=delete_got)\ndelete_got.place(x=450,y=625)\n\ndef delete_all_got():\n if int(len(yougots))==0:\n messagebox.showwarning(\"error!\", \"Drawing First!\")\n return\n listbox_yougot.delete(0,END)\n global got_i\n got_i=0\n yougots.clear()\ndelete_all_got=Button(width=10,text=\"delete all\",command=delete_all_got)\ndelete_all_got.place(x=570,y=625)\n\ndef download_yougot():\n if len(yougots) == 0:\n messagebox.showwarning(\"error!\", \"please draw!\")\n f = asksaveasfile(mode='w', defaultextension=\".txt\")\n if f is None: # asksaveasfile return `None` if dialog closed with \"cancel\".\n return\n got_kk=[]\n for c in yougots.keys():\n got_kk.append(int(c))\n for i in range(len(yougots)):\n text2save = yougots[got_kk[i]]+'\\n'\n f.write(text2save)\n f.close()\ndownload_got=Button(width=27,text=\"download those\",command=download_yougot)\ndownload_got.place(x=450,y=650)\nroot.bind('', insert_fun)\n\ndef swap_button():\n if len(yougots)==0:\n messagebox.showwarning(\"error!\", \"Drawing First!\")\n return\n\n global keys\n keys.clear()\n keys=copy.deepcopy(yougots)\n listbox.delete(0, END)\n global i\n i=0\n for k in range(len(yougots)):\n listbox.insert(k, keys[k])\n i=i+1\n\n\n\n\nswap_button=Button(width=15,text=\"<-move to lots\",command=swap_button)\nswap_button.place(x=280,y=265)\n\ndef swap_button2():\n if len(keys)==0:\n messagebox.showwarning(\"error!\", \"Input First!\")\n return\n global yougots\n yougots.clear()\n yougots = copy.deepcopy(keys)\n listbox_yougot.delete(0, END)\n global i\n i = 0\n for k in range(len(keys)):\n listbox_yougot.insert(k, yougots[k])\n i = i + 1\n\nswap_button=Button(width=15,text=\"move to lots->\",command=swap_button2)\nswap_button.place(x=280,y=300)\nText_Slot2.place(x=10,y=20)\nText_Slot.place(x=450,y=65)\nroot.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"140058494","text":"import csv\r\nfrom collections import Counter \r\n\r\nwith open(\"height-weight.csv\", newline=\"\")as f :\r\n reader = csv.reader(f)\r\n file_data = list(reader)\r\n\r\nfile_data.pop(0)\r\n\r\nheight_list = []\r\n\r\nfor i in range(len(file_data)):\r\n num = file_data[i][1]\r\n height_list.append(float(num)) \r\n\r\nn = len(height_list)\r\nheight_list.sort()\r\n\r\ntotal = 0\r\n\r\nfor x in height_list:\r\n total = total + x\r\n\r\nmean = total/n\r\n\r\nprint(mean)\r\n\r\nif n%2 == 0:\r\n median1 = float(height_list[n//2])\r\n median2 = float(height_list[n//2-1])\r\n median = (median1 + median2)/2\r\nelse:\r\n median = float(height_list[n//2]) \r\n\r\nprint(median) \r\n\r\nnewdata = Counter(height_list)\r\n\r\ndata_range = {\r\n \"50-60\": 0,\r\n \"60-70\": 0,\r\n \"70-80\": 0,\r\n}\r\n\r\nfor height, occurance in newdata.items():\r\n if 50 modeOccurance:\r\n modeRandge, modeOccurance = [int(range.split(\"-\")[0]), int(range.split(\"-\")[1])], occurance\r\n\r\nmode = float((modeRandge[0] + modeRandge[1]) //2)\r\n\r\nprint(mode)","sub_path":"workingwithcsv.py","file_name":"workingwithcsv.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"602524680","text":"# %load q02_country_operations/build.py\n# default imports\nfrom greyatomlib.olympics_project_new.q01_rename_columns.build import q01_rename_columns\n#Previous Functions\npath = './data/olympics.csv'\nOlympicsDF=q01_rename_columns(path) \ndef q02_country_operations(OlympicsDF=OlympicsDF):\n newcolumlist = [ (str(x)).split('(',1)[0].replace(u'\\xa0', u'') for x in OlympicsDF['Country']]\n OlympicsDF['Country_Name']=newcolumlist\n return OlympicsDF\nq02_country_operations()\n\n\n\n","sub_path":"q02_country_operations/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"87722727","text":"import jsonpickle\nfrom jsonpickle.handlers import BaseHandler\n\nfrom poker import Card, Combo\nfrom poker.handhistory import _BaseStreet, _BaseHandHistory, _Player, _PlayerAction\n\n\n@jsonpickle.handlers.register(Card, base=True)\nclass CardHandler(BaseHandler):\n\n def flatten(self, obj, data):\n data = {'rank': obj.rank.val, 'suit': obj.suit.name}\n return data\n\n def restore(self, obj):\n raise NotImplementedError\n\n\n@jsonpickle.handlers.register(Combo, base=True)\nclass ComboHandler(BaseHandler):\n\n def flatten(self, obj, data):\n data = {'1': self.context.flatten(obj.first, reset=False), '2': self.context.flatten(obj.second, reset=False)}\n return data\n\n def restore(self, obj):\n raise NotImplementedError\n\n\n@jsonpickle.handlers.register(_Player, base=True)\nclass PlayerHandler(BaseHandler):\n\n def flatten(self, obj, data):\n data.clear()\n data = {'name': obj.name, 'stack': float(obj.stack), 'seat': obj.seat}\n if obj.combo is not None:\n data['hand'] = self.context.flatten(obj.combo, reset=False)\n return data\n\n def restore(self, obj):\n raise NotImplementedError\n\n\n@jsonpickle.handlers.register(_PlayerAction, base=True)\nclass PlayerActionsHandler(BaseHandler):\n\n def flatten(self, obj, data):\n data = {}\n data['name'] = obj.name\n data['action'] = obj.action.name\n if obj.amount is not None:\n data['amount'] = float(obj.amount)\n return data\n\n def restore(self, obj):\n raise NotImplementedError\n\n\n@jsonpickle.handlers.register(_BaseStreet, base=True)\nclass StreetHandler(BaseHandler):\n\n def flatten(self, obj, data):\n data = {}\n if obj.actions is not None:\n data['actions'] = [self.context.flatten(action, reset=False) for action in obj.actions]\n if obj.cards is not None:\n data['cards'] = [self.context.flatten(x, reset=False) for x in obj.cards]\n data['flushdraw'] = obj.has_flushdraw\n data['gutshot'] = obj.has_gutshot\n data['paired'] = obj.has_pair\n data['straightdraw'] = obj.has_straightdraw\n data['monotone'] = obj.is_monotone\n data['triplet'] = obj.is_triplet\n return data\n\n def restore(self, obj):\n raise NotImplementedError\n\n\n@jsonpickle.handlers.register(_BaseHandHistory, base=True)\nclass HandHistoryHandler(BaseHandler):\n\n def flatten(self, obj, data):\n data = {}\n data['timestamp'] = str(obj.date)\n data['id'] = int(obj.ident)\n data['tablename'] = obj.table_name\n data['bb'] = float(obj.bb)\n data['sb'] = float(obj.sb)\n data['game'] = str(obj.game)\n data['gametype'] = str(obj.game_type)\n data['limit'] = str(obj.limit)\n data['max-players'] = obj.max_players\n data['hero'] = obj.hero.name\n data['button'] = obj.button.name\n if obj.total_pot is not None:\n data['total_pot'] = float(obj.total_pot)\n if obj.rake is not None:\n data['rake'] = float(obj.rake)\n if obj.tournament_ident is not None:\n data['tournament-id'] = int(obj.tournament_ident)\n if obj.tournament_level is not None:\n data['tournament-level'] = str(obj.tournament_level)\n if obj.currency is not None:\n data['currency'] = str(obj.currency)\n if obj.extra is not None and obj.extra.get('money_type') is not None:\n data['moneytype'] = str(obj.extra.get('money_type'))\n data['players'] = [self.context.flatten(player, reset=True) for player in obj.players]\n\n if obj.preflop is not None:\n preflop_actions = [self.context.flatten(action, reset=False) for action in obj.preflop.actions]\n data['preflop'] = {'actions': preflop_actions}\n\n if obj.flop is not None:\n data['flop'] = self.context.flatten(obj.flop, reset=True)\n\n if obj.turn is not None:\n data['turn'] = self.context.flatten(obj.turn, reset=True)\n\n if obj.river is not None:\n data['river'] = self.context.flatten(obj.river, reset=True)\n\n if obj.show_down is not None:\n data['show_down'] = self.context.flatten(obj.show_down, reset=True)\n\n if obj.board is not None:\n board_ = [self.context.flatten(card, reset=True) for card in obj.board]\n data['board'] = board_\n data['winners'] = obj.winners\n\n if obj.earnings is not None:\n data['earnings'] = float(obj.earnings)\n return data\n\n def restore(self, obj):\n raise NotImplementedError\n\n\nclass JsonEncoder:\n\n def encode(self, obj):\n return jsonpickle.encode(obj)\n","sub_path":"poker/jsonencoding.py","file_name":"jsonencoding.py","file_ext":"py","file_size_in_byte":4712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"105094550","text":"import numpy as np\nimport pickle as pkl\nimport networkx as nx\nimport scipy.sparse as sp\nfrom scipy.sparse.linalg.eigen.arpack import eigsh\nfrom sklearn import preprocessing\nimport sys\nimport os\nimport gc\nimport math\n\ndef parse_index_file(filename):\n \"\"\"Parse index file.\"\"\"\n index = []\n for line in open(filename):\n index.append(int(line.strip()))\n return index\n\n\ndef sample_mask(idx, l):\n \"\"\"Create mask.\"\"\"\n mask = np.zeros(l)\n mask[idx] = 1\n return np.array(mask, dtype=np.bool)\n\ndef save_sparse_csr(filename,array):\n np.savez(filename,data = array.data ,indices=array.indices,\n indptr =array.indptr, shape=array.shape )\n\ndef load_sparse_csr(filename):\n loader = np.load(filename)\n return sp.csr_matrix(( loader['data'], loader['indices'], loader['indptr']),\n shape = loader['shape'])\n\ndef encode_onehot(labels):\n classes = set(labels)\n classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)}\n labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32)\n return labels_onehot\n\ndef load_data2(dataset_str):\n \"\"\"Load citation network dataset (cora only for now)\"\"\"\n print('Loading {} dataset...'.format(dataset_str))\n path = \"data/\"\n idx_features = np.genfromtxt(\"{}{}.content\".format(path, dataset_str), dtype=np.dtype(str))\n features = sp.csr_matrix(idx_features, dtype=np.float32)\n #features = sp.vstack((allx, tx)).tolil()\n #features[test_idx_reorder, :] = features[test_idx_range, :]\n\n idx_labels = np.genfromtxt(\"{}{}.labels\".format(path, dataset_str), dtype=np.dtype(str))\n labels = encode_onehot(idx_labels)\n\n # build graph\n idx2=list(range(1,1019))\n idx = np.array(idx2, dtype=np.int32)\n print(idx.shape)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset_str), dtype=np.int32)\n a=list(map(idx_map.get, edges_unordered.flatten()))\n file=open('data.txt','w') \n file.write(str(a)); \n file.close() \n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),shape=(labels.shape[0], labels.shape[0]), dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n print('Dataset has {} nodes, {} edges, {} features.'.format(adj.shape[0], edges.shape[0], features.shape[1]))\n\n return features.todense(), adj, labels\n\ndef get_splits(y):\n idx_train = range(70)\t#0,1,2,...,139\n idx_val = range(100, 250)\t#200,201,...,499\n idx_test = range(250, 750)\t#500,501,...,1499\n y_train = np.zeros(y.shape, dtype=np.int32)\n y_val = np.zeros(y.shape, dtype=np.int32)\n y_test = np.zeros(y.shape, dtype=np.int32)\n y_train[idx_train] = y[idx_train]\n y_val[idx_val] = y[idx_val]\n y_test[idx_test] = y[idx_test]\n train_mask = sample_mask(idx_train, y.shape[0])\n val_mask = sample_mask(idx_val, y.shape[0])\n test_mask = sample_mask(idx_test, y.shape[0])\n return y_train, y_val, y_test, idx_train, idx_val, idx_test, train_mask, val_mask, test_mask\n\ndef load_data(dataset_str):\n \"\"\"Load data.\"\"\"\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = []\n for i in range(len(names)):\n with open(\"data/ind.{}.{}\".format(dataset_str, names[i]), 'rb') as f:\n if sys.version_info > (3, 0):\n objects.append(pkl.load(f, encoding='latin1'))\n else:\n objects.append(pkl.load(f))\n\n x, y, tx, ty, allx, ally, graph = tuple(objects)\n print(\"graph is....\")\n print(type(graph))\n print(\"allx is....\")\n print(type(allx))#\n test_idx_reorder = parse_index_file(\"data/ind.{}.test.index\".format(dataset_str))\n test_idx_range = np.sort(test_idx_reorder)\n\n if dataset_str == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph)\n # Find isolated nodes, add them as zero-vecs into the right position\n test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range-min(test_idx_range), :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range-min(test_idx_range), :] = ty\n ty = ty_extended\n\n if dataset_str == 'nell.0.001':\n # Find relation nodes, add them as zero-vecs into the right position\n test_idx_range_full = range(allx.shape[0], len(graph))\n isolated_node_idx = np.setdiff1d(test_idx_range_full, test_idx_reorder)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range-allx.shape[0], :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range-allx.shape[0], :] = ty\n ty = ty_extended\n\n features = sp.vstack((allx, tx)).tolil().toarray()\n print(features)\n features[test_idx_reorder, :] = features[test_idx_range, :]\n\n idx_all = np.setdiff1d(range(len(graph)), isolated_node_idx)\n\n if not os.path.isfile(\"data/planetoid/{}.features.npz\".format(dataset_str)):\n print(\"Creating feature vectors for relations - this might take a while...\")\n features_extended = sp.hstack((features, sp.lil_matrix((features.shape[0], len(isolated_node_idx)))),\n dtype=np.int32).todense()\n features_extended[isolated_node_idx, features.shape[1]:] = np.eye(len(isolated_node_idx))\n features = sp.csr_matrix(features_extended)\n print(\"Done!\")\n save_sparse_csr(\"data/planetoid/{}.features\".format(dataset_str), features)\n else:\n features = load_sparse_csr(\"data/planetoid/{}.features.npz\".format(dataset_str))\n\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\n\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\n\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n\n idx_test = test_idx_range.tolist()\n idx_train = range(len(y))\n idx_val = range(len(y), len(y)+500)\n\n train_mask = sample_mask(idx_train, labels.shape[0])\n val_mask = sample_mask(idx_val, labels.shape[0])\n test_mask = sample_mask(idx_test, labels.shape[0])\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n np.savetxt('labels',labels)\n\n return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask\n\n\ndef sparse_to_tuple(sparse_mx):\n \"\"\"Convert sparse matrix to tuple representation.\"\"\"\n def to_tuple(mx):\n if not sp.isspmatrix_coo(mx):\n mx = mx.tocoo()\n coords = np.vstack((mx.row, mx.col)).transpose()\n values = mx.data\n shape = mx.shape\n return coords, values, shape\n\n if isinstance(sparse_mx, list):\n for i in range(len(sparse_mx)):\n sparse_mx[i] = to_tuple(sparse_mx[i])\n else:\n sparse_mx = to_tuple(sparse_mx)\n\n return sparse_mx\n\n\ndef preprocess_features(features):\n \"\"\"Row-normalize feature matrix and convert to tuple representation\"\"\"\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n print(type(features))\n return sparse_to_tuple(features)\n\n\ndef normalize_adj(adj):\n \"\"\"Symmetrically normalize adjacency matrix.\"\"\"\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n\n\ndef preprocess_adj(adj):\n \"\"\"Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.\"\"\"\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))\n return sparse_to_tuple(adj_normalized)\n\n\ndef construct_feed_dict(features, support, labels, labels_mask, placeholders):\n \"\"\"Construct feed dictionary.\"\"\"\n feed_dict = dict()\n feed_dict.update({placeholders['labels']: labels})\n feed_dict.update({placeholders['labels_mask']: labels_mask})\n feed_dict.update({placeholders['features']: features})\n feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})\n feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})\n return feed_dict\n\n\ndef chebyshev_polynomials(adj, k):\n \"\"\"Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation).\"\"\"\n print(\"Calculating Chebyshev polynomials up to order {}...\".format(k))\n\n adj_normalized = normalize_adj(adj)\n laplacian = sp.eye(adj.shape[0]) - adj_normalized\n largest_eigval, _ = eigsh(laplacian, 1, which='LM')\n scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])\n\n t_k = list()\n t_k.append(sp.eye(adj.shape[0]))\n t_k.append(scaled_laplacian)\n\n def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):\n s_lap = sp.csr_matrix(scaled_lap, copy=True)\n return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two\n\n for i in range(2, k+1):\n t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))\n\n return sparse_to_tuple(t_k)\n\ndef xuxiaowei(adj, content, dataset):\n dataset_str = dataset\n adj=adj.todense()\n sample_num = adj.shape[0]\n sim_t = np.zeros((sample_num,sample_num))\n print (\"adj---start\")\n # compute similar by cost similar\n if dataset_str == 'cora':\n m=5429\n if dataset_str == 'citeseer':\n m=4732\n if dataset_str == 'pubmed':\n m=44338\n if dataset_str == 'nell.0.001':\n m=266144\n if dataset_str == 'fk107':\n m=26717\n sum = np.sum(adj,axis=1)\t#n*1\n sum = sum.astype(\"float64\")\n sum_trans = sum.T\n sum = sum*sum_trans\n sum = sum/(2*m)\n sim_t = sum-adj\n sim_t = np.asarray(sim_t)\n scale = np.max(sim_t)-np.min(sim_t)\n sim_t = sim_t/scale\n return sim_t\n\n\t\ndef shijianbo(adj, content):\n content = content.todense()\n sample_num = content.shape[0]\n sim_c = np.zeros((sample_num,sample_num))\n\n # compute similar by cost similar\n content_t = content.T\n sim_c = np.asarray(content*content_t)\n content_2 = np.multiply(content,content)\n sum = np.sum(content_2,axis=1)\n sum_trans = sum.T\n sum = sum*sum_trans\n sum = np.sqrt(sum)\n min = np.ones((sample_num,sample_num))*1e-10\n sum = sum+min\n sim_c = sim_c/sum\n sim_c = np.asarray(sim_c)\n sim_c_row = np.sum(sim_c,axis=1)\n one_vec = np.ones(sample_num)\n one_vec = one_vec.T\t\t#1*n\n sim_c_row = sim_c_row*one_vec\n sim_c = sim_c/(sim_c_row+min)\n scale = np.max(sim_c)-np.min(sim_c)\n sim_c = sim_c/scale\n\n #KNN\n print (\"content------KNN\")\n k_values=100\n sample_2_final=np.zeros((sample_num,sample_num))\n for i in range(sample_num):\n sample_2_sort=sorted(sim_c[i],reverse=True)\n k_order=0\n for j in range(k_values):\n find_index=np.where(sim_c[i]==sample_2_sort[j])\n for k in range(len(find_index[0])):\n sample_2_final[i][find_index[0][k]]=sample_2_sort[j]\n k_order+=1\n if (k_order==k_values):\n break\n if (k_order==k_values):\n break\n print (\"content------KNN---end\")\n print (np.max(sample_2_final))\n #sample_2_final=sim_c\n return sample_2_final\n\t\ndef NMI(A,B):\n B=np.argmax(B,axis=1)\n # len(A) should be equal to len(B)\n total = len(A)\n A_ids = set(A)\n B_ids = set(B)\n #Mutual information\n MI = 0\n eps = 1.4e-45\n a=0\n for idA in A_ids:\n for idB in B_ids:\n if(idA>0):\n a=1\n if(a==1):\n idAOccur = np.where(A==idA)\n idBOccur = np.where(B==idB)\n idABOccur = np.intersect1d(idAOccur,idBOccur)\n px = 1.0*len(idAOccur[0])/total\n py = 1.0*len(idBOccur[0])/total\n pxy = 1.0*len(idABOccur)/total\n MI = MI + pxy*math.log(pxy/(px*py)+eps,2)\n # Normalized Mutual information\n Hx = 0\n for idA in A_ids:\n idAOccurCount = 1.0*len(np.where(A==idA)[0])\n Hx = Hx - (idAOccurCount/total)*math.log(idAOccurCount/total+eps,2)\n Hy = 0\n for idB in B_ids:\n idBOccurCount = 1.0*len(np.where(B==idB)[0])\n Hy = Hy - (idBOccurCount/total)*math.log(idBOccurCount/total+eps,2)\n MIhat = 2.0*MI/(Hx+Hy)\n return MIhat","sub_path":"gcn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"187448441","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport nibabel as nib\nimport pandas as pd\nimport nibabel as nib\nimport json\nfrom nilearn import plotting\nfrom nilearn import image\nfrom scipy.stats.stats import pearsonr\nimport shutil\nimport scipy.stats\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport parsimony.utils.check_arrays as check_arrays\nfrom sklearn import preprocessing\nimport statsmodels.api as sm\nfrom statsmodels.formula.api import ols\nimport seaborn as sns\n\nDATA_PATH = \"/neurospin/brainomics/2016_schizConnect/2018_analysis_2ndpart_clinic/data\"\nINPUT_CLINIC_FILENAME = \"/neurospin/abide/schizConnect/data/december_2017_clinical_score/schizconnect_COBRE_assessmentData_4495.csv\"\nU_all = np.load(\"/neurospin/brainomics/2016_schizConnect/2018_analysis_2ndpart_clinic/results/clustering/U_scores_corrected/U_all.npy\")\ny_all = np.load(\"/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/data/y.npy\")\n\n\n\npop = pd.read_csv(os.path.join(DATA_PATH,\"pop_cobre_scz.csv\"))\nclinic = pd.read_csv(INPUT_CLINIC_FILENAME)\nage = pop[\"age\"].values\nsex = pop[\"sex_num\"].values\n\ny = np.load(\"/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/data/y.npy\")\nsite = np.load(\"/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/data/site.npy\")\nsite = site[y==1]\nlabels_cluster = np.load(\"/neurospin/brainomics/2016_schizConnect/\\\n2018_analysis_2ndpart_clinic/results/clustering/corrected_results/\\\ncorrection_age_sex_site/clusters_with_controls/2_clusters_solution/labels_cluster.npy\")\nlabels_cluster = labels_cluster[site==1]\nU0 =U_all[:,0][y_all==1][site==1]\n\n\ndf_scores = pd.DataFrame()\ndf_scores[\"subjectid\"] = pop.subjectid\nfor score in clinic.question_id.unique():\n df_scores[score] = np.nan\n\nfor s in pop.subjectid:\n curr = clinic[clinic.subjectid ==s]\n for key in clinic.question_id.unique():\n if curr[curr.question_id == key].empty == False:\n df_scores.loc[df_scores[\"subjectid\"]== s,key] = curr[curr.question_id == key].question_value.values[0]\n\n\n\n################################################################################\n\ndf_stats = pd.DataFrame(columns=[\"r\",\"p\"])\ndf_stats.insert(0,\"clinical_scores\",clinic.question_id.unique())\n################################################################################\noutput = \"/neurospin/brainomics/2016_schizConnect/2018_analysis_2ndpart_clinic/\\\nresults/clustering/corrected_results/correction_age_sex_site/clusters_with_controls/\\\n2_clusters_solution/cobre/cobre_correlation_clinics_p_values.csv\"\n\nkey_of_interest= list()\nfor key in clinic.question_id.unique():\n try:\n neurospycho = df_scores[key].astype(np.float).values\n\n df = pd.DataFrame()\n df[key] = neurospycho[np.array(np.isnan(neurospycho)==False)]\n df[\"age\"] = age[np.array(np.isnan(neurospycho)==False)]\n df[\"sex\"] = sex[np.array(np.isnan(neurospycho)==False)]\n df[\"labels\"]=labels_cluster[np.array(np.isnan(neurospycho)==False)]\n df[\"U0\"]=U0[np.array(np.isnan(neurospycho)==False)]\n r,p = scipy.stats.pearsonr(df[\"U0\"],df[key])\n\n df_stats.loc[df_stats.clinical_scores==key,\"r\"] = r\n df_stats.loc[df_stats.clinical_scores==key,\"p\"] = p\n if p<0.05:\n print(key)\n print(p)\n key_of_interest.append(key)\n\n\n except:\n df_stats.loc[df_stats.clinical_scores==key,\"r\"] = np.nan\n df_stats.loc[df_stats.clinical_scores==key,\"p\"] = np.nan\ndf_stats.to_csv(output)\n\n\n\n\n################################################################################\noutput = \"/neurospin/brainomics/2016_schizConnect/2018_analysis_2ndpart_clinic/\\\nresults/clustering/corrected_results/correction_age_sex_site/clusters_with_controls/\\\n2_clusters_solution/cobre/correlations\"\n\n\nfor key in key_of_interest:\n plt.figure()\n df = pd.DataFrame()\n neurospycho = df_scores[key].astype(np.float).values\n df[key] = neurospycho[np.array(np.isnan(neurospycho)==False)]\n df[\"age\"] = age[np.array(np.isnan(neurospycho)==False)]\n df[\"sex\"] = sex[np.array(np.isnan(neurospycho)==False)]\n df[\"labels\"]=labels_cluster[np.array(np.isnan(neurospycho)==False)]\n df[\"U0\"]=U0[np.array(np.isnan(neurospycho)==False)]\n r,p = scipy.stats.pearsonr(df[\"U0\"],df[key])\n df['color']= np.where( df['labels']==True , \"r\", \"g\")\n D_color_label = {\"r\":\"Cluster 2\",\"g\":\"Cluster 1\"}\n colors = list(set(df[\"color\"]))\n labels = [D_color_label[x] for x in set(df[\"color\"])]\n\n ax = sns.regplot(data = df, x =\"U0\",y=key,scatter_kws={'facecolors':df['color'],'s':50})\n ind = 0\n for i, grp in df.groupby(['color']):\n grp.plot(kind = 'scatter', x = 'U0', y = key, c = i, ax = ax, label = labels[ind], zorder = 0)\n ind += 1\n ax.legend()\n plt.title(\"Pearson corr: R = %s, and p= %s\"%(r,p))\n plt.savefig(os.path.join(output,\"%s.png\"%key))\n\n\n\n#from scipy import stats\n#slope, intercept, r_value, p_value, std_err = stats.linregress(df[\"U0\"],df[key])\n#print(slope, intercept, r_value, p_value, std_err)","sub_path":"2016_schizConnect/2018_analysis_2ndpart_clinic/clustering_based_on_PCs/corrected/correction_age_sex_site/clusters_with_controls/2_clusters_solution/02_clusters_correlationscobre.py","file_name":"02_clusters_correlationscobre.py","file_ext":"py","file_size_in_byte":5098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"142447636","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\nbrower=webdriver.Safari()\nurl='https://weibo.com'\nbrower.get(url)\ntime.sleep(10)\nwait = WebDriverWait(brower, 10)\nbrower.maximize_window()\ntry:\n time.sleep(1)\n clic = brower.find_element_by_xpath('//a[@href=\"javascript:void(0)\"]')\n time.sleep(1)\n clic.click()\nexcept:\n brower.quit()\n print('error')\ntry:\n time.sleep(1)\n clic = brower.find_element_by_xpath('//a[@action-data=\"tabname=qrcode\"]')\n clic.click()\nexcept:\n brower.quit()\n print('error')\ntry:\n time.sleep(5)\n s=input('enter yes to confirm you have scan the code')\n if s!='':\n brower.refresh()\n inp = brower.find_element_by_xpath('//input[@node-type=\"searchInput\"]')\n time.sleep(3)\n inp.send_keys('乘风破浪的姐姐')\nexcept:\n brower.quit()\n print('error')\ntry:\n time.sleep(1)\n clic = brower.find_element_by_xpath('//a[@node-type=\"searchSubmit\"]')\n time.sleep(1)\n clic.click()\nexcept:\n brower.quit()\n print('error')\nfile1=open('demo1.txt','w')#微博和评论\nfile2=open('demo2.txt','w')#仅微博\nfile3=open('demo3.txt','w')#仅评论\nfor i in range(20):\n try:\n time.sleep(3)\n clics = brower.find_elements_by_xpath('//a[@action-type=\"feed_list_comment\"]')\n for clic in clics:\n clic.send_keys(Keys.ENTER)\n time.sleep(1)\n clics = brower.find_elements_by_xpath('//a[@action-type=\"fl_unfold\"]')\n for clic in clics:\n clic.send_keys(Keys.ENTER)\n time.sleep(1)\n time.sleep(1)\n txts = brower.find_elements_by_xpath('//p[@node-type=\"feed_list_content\"]')\n time.sleep(1)\n comments = brower.find_elements_by_xpath('//div[@class=\"card-together\"]')\n time.sleep(1)\n for txt, comment in zip(txts, comments):\n file1.writelines('-----微博正文-----')\n file1.writelines(txt.text)\n file2.writelines(txt.text)\n #print(txt.text)\n try:\n infos = comment.find_elements_by_xpath('//div[@class=\"txt\"]')\n for info in infos:\n file1.writelines(info.text)\n file3.writelines(info.text)\n #print(info.text)\n except:\n file1.writelines('no comment!')\n file3.writelines('no comment!')\n try:\n clic = brower.find_element_by_xpath('//a[@class=\"next\"]')\n clic.send_keys(Keys.ENTER)\n except:\n brower.quit()\n except:\n brower.quit()\n print('error')\nbrower.quit()\n\n\n","sub_path":"爬虫版本迭代过程/crawler3.0.py","file_name":"crawler3.0.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"41174152","text":"# -*- coding: utf-8 -*-\nimport config\nimport telebot\nimport os\nimport time\nbot = telebot.TeleBot(config.TOKEN)\n\n@bot.message_handler(commands=['test'])\ndef find_file_ids(message):\n for file in os.listdir('.venv/botbrein/music/'):\n if file.split('.')[-1] == 'ogg':\n f = open('.venv/botbrein/music/'+file, 'rb')\n msg = bot.send_voice(message.chat.id, f, None)\n # отправка file_id:\n bot.send_message(message.chat.id, msg.voice.file_id, reply_to_message_id=msg.message_id)\n time.sleep(3)\n\nif __name__ == '__main__':\n bot.infinity_polling()\n","sub_path":"Downloadmusicbot.py","file_name":"Downloadmusicbot.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"119884129","text":"import time\nfrom decimal import Decimal\ntask_2_a = lambda element: element % 2 == 0 and 1 < element < 100 and element % 10 != 0\ntask_2_b = lambda elem: elem // 2 if elem % 2 == 0 else elem * 2\ntask_2_c = lambda result, a: result + len([a for a in a if a.islower()])\ntask_2_d = lambda i: sum([i for i in i if i >= 0]) / len([i for i in i if i >= 0])\n\n\ndef task_3(is_slow=False):\n a, b = 0, 1\n while a < 1000000000:\n if is_slow:\n time.sleep(0.5)\n yield a\n a, b = b, a+b\n\n\ndef task_4(func):\n def new_func(*args, **kwargs):\n new_args = [arg for arg in args]\n arguments = []\n kwargs_keys = [key for key in kwargs]\n kwargs_values = []\n for i in new_args:\n if isinstance(i, (int, float, Decimal)) and not isinstance(i, bool):\n a = -i\n arguments.append(a)\n elif isinstance(i, (str, list)):\n a = i[::-1]\n arguments.append(a)\n elif isinstance(i, dict):\n a = {key[::-1]: value for key, value in i.items()}\n arguments.append(a)\n elif isinstance(i, bool):\n if i is True:\n arguments.append(False)\n else:\n arguments.append(True)\n for value in kwargs.values():\n if isinstance(value, (int, float, Decimal)) and not isinstance(value, bool):\n b = -value\n kwargs_values.append(b)\n elif isinstance(value, (str, list)):\n b = value[::-1]\n kwargs_values.append(b)\n elif isinstance(value, dict):\n b = {key[::-1]: value for key, value in value.items()}\n kwargs_values.append(b)\n elif isinstance(value, bool):\n if value is True:\n kwargs_values.append(False)\n else:\n kwargs_values.append(True)\n kwargs_dict = dict(zip(kwargs_keys, kwargs_values))\n dict3 = dict(sorted([(key, value) for (key, value) in kwargs_dict.items()])[:5])\n return func(*arguments[:5], **dict3)\n return new_func\n\n\n\n","sub_path":"homework_old/homework3_5.py","file_name":"homework3_5.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"199379361","text":"#coding=utf-8\n\n\n'''\nnameA = input(\"輸入A同學姓名: \")\nscoreA = input(\"輸入A同學分數: \")\nnameB = input(\"輸入B同學姓名: \")\nscoreB = input(\"輸入B同學分數: \")\nprint(\"姓名\\t分數\")\nprint(nameA+\"\\t\"+scoreA)\nprint(nameB+\"\\t\"+scoreB)\n'''\n\n'''\nmoney = 1000\nfor i in range(20):\n\tmoney *= 1.005\n\t\nprint(money)\nprint(1000*(1.005**20))\n'''\n'''\nspeed = 100*128\nfile = 256*1024\nprint(file/speed)\n'''\n'''\nstr = \"PLEASE CONVERT THIS SENTENCE TO LOWER CASE.\"\nprint(str.lower())\t#變小寫\n'''\n'''\nborn = input(\"出生\")\nnow = input(\"現在\")\nage = int(now) - int(born)\nprint(\"%d\" %age)\n'''\n'''\ncard = input(\"卡號: \")\nprint(\"%016d\" %int(card))\n'''\n'''\nyear = input(\"年\")\nmonth = input(\"月\")\nday = input(\"日\")\nprint(\"%d.%02d.%02d\" %(int(year),int(month),int(day) ) )\n'''\n\n'''\ntest = \"I can\\n hi\\t\"\n#(arr,arr2) = test.split('.')\narr = test.split()\nprint(arr)\n#print(arr2)\n'''\n\n'''\narr = [123, \"betty\", 123.5, 'c']\n\nfor x in arr:\n print(x)\n'''\n\n'''\nscores = {}\nresult_f = open(\"Result.txt\")\nfor line in result_f:\n (name, score) = line.split()\n scores[score] = name\nresult_f.close()\n\n\nfor key in sorted(scores.keys(), reverse = True):\n print(scores[key] + \" \" + key)\n#test print scores\n#print(\"\\n\" + scores[\"7.81\"])\n'''\n\n'''\ndef changeBarHelper(variable):\n variable = variable * 2\n return variable\n\n\nbar = 20\nprint(bar)\nbar = changeBarHelper(bar)\nprint(bar)\n'''\n\n'''\ndef outer(x, y):\n #def inner(a = x, b = y):\n # print(\"1\")\n # return a*b\n\n #print(\"2\")\n #return inner\n return lambda a = x, b = y: a*b\n\nx = outer(2,4)\nprint (\"output: \" + str(x()))\n'''\n\n'''\ndef outer(x):\n fillin = [None]\n def inner(i, self = fillin):\n print(i)\n if i: self[0](i-1)\n\n fillin[0] = inner\n print(\"hi\")\n inner(x)\n\nprint(outer(3))\n'''\n\n'''\nclass ThirdClass:\n def __init__(self, value):\n print(\"init\")\n self.data = value\n def __add__(self, other):\n print(\"add\")\n return ThirdClass(self.data + other)\n def __mul__(self, other):\n print(\"mul\")\n #return ThirdClass(self.data * other)\n return ThirdClass(self.data + other)\n def display(self):\n print('Current value = \"%s\" ' %self.data)\n\n \n\na = ThirdClass(\"abc\")\na.display()\nprint(\"----------\")\nb = a + 'xyz'\nb.display()\n#print(\"----------\")\n#b * 3\n#b.display()\nprint(\"----------\")\nc = a*'xyz'\nc.display()\n'''\n'''\nimport json #內建模組\n\njsondata = {\"responseData\": {\"translatedText\":\"hello!\"},\n \"responseDetails\": None,\n \"responseStatus\": 200}\nencodetext = json.dumps(jsondata)\ndecode = json.loads(encodetext)\n\nprint(type(encodetext))\nprint(encodetext)\n\nprint(type(decode))\nprint(decode)\n\nprint(decode['responseData']['translatedText'])\nprint(decode[\"responseStatus\"])\n'''\n'''\njsondata = '{\"responseData\": {\"translatedText\":\"蟒蛇石頭!\"}, \"responseDetails\": null, \"responseStatus\": 200}'\ntext = json.loads(jsondata)\nprint('翻譯結果:',text['responseData']['translatedText'])\n'''\n\n'''\nnum = 5\ndef change_dict(in_dict):\n global num\n in_dict[str(num)] = 'E'\n num += 1\n print(\"in: \", in_dict)\n\ndict1 = {'1':'A', '2':'B', '3':'C', '4':'D'}\ndict2 = dict()\nfor num2 in range(0,3):\n\n change_dict(dict2)\n dict1.update(dict2)\n\n print(dict2)\n #dict2.clear()\n\nprint(dict1)\n\ndict 就算不傳進 function 也可以做修改\n只是好像不太建議\n'''\n\n'''\ndict1 = {'1':'A', '2':'B', '3':'C', '4':'D'}\nprint(len(dict1))\ndict1['5'] = dict1.pop('1')\nprint(dict1)\nprint(len(dict1))\n'''\n\n'''\nimport os\nimport re\n\nurl = \"/smw/index.php?title=Special:RecentChangesLinked\"\nstr1 = os.path.splitext(url)[-1]\nstr2 = str1.find(\"?\")\nprint(str1[:str2])\nprint(str2)\n'''\n\n'''\ndict1 = {'1':'A', '2':'B', '3':'C'}\ntest = dict1.get('4')\nprint(test)\nprint(type(test))\n'''\n\n'''\nfile1 = open('test.txt', 'r', encoding='utf-8')\nfor content in file1.readlines():\n str1 = content\n\nprint(str1)\nprint(len(str1))\n\nwrite_f = open('test2.txt', 'wb')\nwrite_f.write(str1.encode('utf-8'))\n\nwrite_f.close()\nprint(\"=====\")\nfile2 = open('test2.txt', 'rb')\n\nbyte = file2.read(3)\nwhile byte != b'':\n print(byte)\n print(byte.decode('UTF-8'))\n byte = file2.read(3)\n'''\n\n'''\nstr1 = \"你給我試試看ㄚㄚㄚㄚ\"\nfile3 = open('test3.txt', 'wb')\nfile3.write(str1.encode('utf-8'))\n\nfile3.close()\nprint(\"=====\")\nfile4 = open('test3.txt', 'rb')\n\nbyte = file4.read(3)\nwhile byte != b'':\n print(byte)\n print(byte.decode('UTF-8'))\n byte = file4.read(3)\n'''\n\n'''\ntest = \"happy birthday\"\ntest2 = test.find(\"y\", 3)\nprint(test2)\n\nc = \"What is real? How do you define real? \"\nprint(c.count(\"real\", 10))\n'''\n\n'''\nimport threading, time\n\nclass Thread (threading.Thread): # 繼承 Thread 類別\n def __init__(self, no, interval):\n threading.Thread.__init__(self)\n self.no = no\n self.interval = interval\n\n def run(self):\n global test1\n test1()\n time.sleep(self.interval)\n print('Thread '+str(self.no))\n\ndef test1():\n print(\"hi\")\n\nsource = \"test\"\n\ndef test():\n thread1 = Thread(1,5)\n thread2 = Thread(2,3)\n thread1.start()\n thread2.start()\n\nif __name__ == '__main__':\n test()\n'''\n\n'''\nimport os\nfor f in os.listdir(\"data/\"):\n print(type(f))\n'''\n\nstr1 = '請問Note 4 跟 iphone 6 plus那個好呢?\\n_1.txt'\n\ndef special_handle(str1):\n str1 = ''.join(str1.split('?'))\n str1 = ''.join(str1.split('\\\\'))\n str1 = ''.join(str1.split('/'))\n str1 = ''.join(str1.split(':'))\n str1 = ''.join(str1.split('|'))\n str1 = ''.join(str1.split('<'))\n str1 = ''.join(str1.split('>'))\n str1 = ''.join(str1.split('\"'))\n str1 = ''.join(str1.split('*'))\n str1 = ''.join(str1.split('\\n'))\n return str1\n\nstr1 = special_handle(str1)\nprint(type(str1))\nprint(str1)\n","sub_path":"Project/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":5770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"641875390","text":"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom utils.utils import *\nfrom torch.autograd import Variable\nimport numpy as np\n\nclass contrastive_loss(nn.Module):\n \"\"\"\n no-change,0\n change,1\n \"\"\"\n def __init__(self, margin1=0.1, margin2=2.0, eps=1e-6):\n super(contrastive_loss, self).__init__()\n self.margin1 = margin1\n self.margin2 = margin2\n self.eps = eps\n\n def forward(self, x1, x2, label):\n diff = torch.abs(x1 - x2)\n dist_sq = torch.pow(diff + self.eps, 2).sum(dim=1)\n dist = torch.sqrt(dist_sq)\n\n total = np.prod(label.size())\n refer = 1-label\n neg_dis = torch.clamp(dist - self.margin1, min=0.0)\n loss1 = refer * (neg_dis.pow(2))\n loss_1 = torch.sum(loss1)\n\n pos_dis = torch.clamp(self.margin2 - dist, min=0.0)\n loss2 = label * (pos_dis.pow(2)) * 10.0\n loss_2 = torch.sum(loss2)\n loss = (loss_1 + loss_2) / total\n return loss\n\nclass BCL_v2(nn.Module):\n \"\"\"\n batch-balanced contrastive loss\n no-change,0\n change,1\n \"\"\"\n def __init__(self, margin1=0.1, margin2=2.0, eps=1e-6):\n super(BCL_v2, self).__init__()\n self.margin1 = margin1\n self.margin2 = margin2\n self.eps = eps\n\n def forward(self, x1, x2, label):\n diff = torch.abs(x1 - x2)\n dist_sq = torch.pow(diff + self.eps, 2).sum(dim=1)\n dist = torch.sqrt(dist_sq)\n\n pos_num = torch.sum((label==1).float())+0.0001\n neg_num = torch.sum((label==0).float())+0.0001\n\n refer = 1-label\n neg_dis = torch.clamp(dist - self.margin1, min=0.0)\n loss1 = refer * (neg_dis.pow(2))\n loss_1 = torch.sum(loss1) /neg_num\n\n pos_dis = torch.clamp(self.margin2 - dist, min=0.0)\n loss2 = label * (pos_dis.pow(2))\n loss_2 = torch.sum(loss2) / pos_num\n loss = loss_1 + loss_2\n return loss\n\nclass BCLwithUncertainty_v1(nn.Module):\n def __init__(self, margin1=0.1, margin2=2.0, eps=1e-6, gamma=2):\n super(BCLwithUncertainty_v1, self).__init__()\n self.margin1 = margin1\n self.margin2 = margin2\n self.eps = eps\n self.gamma = gamma\n self.avgpool = nn.AvgPool2d(kernel_size=3, stride=1, padding=1)\n\n def forward(self, x1, x2, label):\n diff = x1 - x2\n dist = torch.pow(diff + self.eps, 2).sum(dim=1)\n dist_sq = torch.sqrt(dist)\n pos_num = torch.sum((label==1).float()) + 0.001\n neg_num = torch.sum((label==0).float()) + 0.001\n\n smooth_label = torch.pow(self.avgpool(label), self.gamma)\n smooth_refer = torch.pow((1-self.avgpool(label)), self.gamma)\n refer = 1 - label\n\n neg_dis = torch.clamp(dist_sq - self.margin1, min=0.0)\n loss_neg = (refer + smooth_refer) * neg_dis\n loss_1 = torch.sum(loss_neg) / neg_num\n\n pos_dis = torch.clamp(self.margin2 - dist_sq, min=0.0)\n loss_pos = (label + smooth_label) * pos_dis\n loss_2 = torch.sum(loss_pos) / pos_num\n loss_dis = loss_1 + loss_2\n return loss_dis\n\nclass BCLwithUncertainty_v2(nn.Module):\n def __init__(self, margin1=0.1, margin2=1.8, eps=1e-6):\n super(BCLwithUncertainty_v2, self).__init__()\n self.margin1 = margin1\n self.margin2 = margin2\n self.eps = eps\n self.avgpool = nn.AvgPool2d(kernel_size=3, stride=1, padding=1)\n\n def forward(self, x1, x2, label):\n diff = x1 - x2\n dist = torch.pow(diff + self.eps, 2).sum(dim=1)\n dist_sq = torch.sqrt(dist)\n pos_num = torch.sum((label==1).float()) + 0.001\n neg_num = torch.sum((label==0).float()) + 0.001\n\n x = self.avgpool(label)\n weight = 0.8 - 4 * torch.pow(x, 2) + 4 * x\n\n refer = 1 - label\n neg_dis = torch.clamp(dist_sq - self.margin1, min=0.0)\n loss_neg = refer * neg_dis * weight\n loss_1 = torch.sum(loss_neg) / neg_num\n\n pos_dis = torch.clamp(self.margin2 - dist_sq, min=0.0)\n loss_pos = label * pos_dis * weight\n loss_2 = torch.sum(loss_pos) / pos_num\n loss_dis = loss_1 + loss_2\n return loss_dis\n\ndef cross_entropy_2d(predict, target):\n \"\"\"\n Args:\n predict:(n, c, h, w)\n target:(n, h, w)\n \"\"\"\n assert predict.dim() == 4\n assert target.dim() == 3\n assert predict.size(0) == target.size(0)\n assert predict.size(2) == target.size(1)\n assert predict.size(3) == target.size(2)\n n, c, h, w = predict.size()\n target_mask = (target >= 0) * (target != 255)\n target = target[target_mask]\n if not target.data.dim():\n return Variable(torch.zeros(1))\n predict = predict.transpose(1,2).transpose(2,3).contiguous()\n predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)\n loss = F.cross_entropy(predict, target, size_average=True)\n return loss\n\nclass IOUloss_v1(nn.Module):\n def __init__(self, margin1=0.1, margin2=2.0, eps=1e-6):\n super(IOUloss_v1, self).__init__()\n self.margin1 = margin1\n self.margin2 = margin2\n self.eps = eps\n\n def forward(self, x1, x2, label):\n diff = x1 - x2\n dist = torch.pow(diff + self.eps, 2).sum(dim=1)\n dist_sq = torch.sqrt(dist)\n\n predict = dist_sq[:,:,:]>1.0\n gt = label[:,:,:]==1.0\n\n insection = (predict & gt).float()\n union = (predict | gt).float()\n iou_loss = 1 - (torch.sum(insection) / (torch.sum(union)))\n\n return iou_loss\n\nclass IOUloss_v2(nn.Module):\n def __init__(self, eps=1e-6):\n super(IOUloss_v2, self).__init__()\n self.eps = eps\n\n def forward(self, output, label):\n _, predicted = torch.max(output.data, dim=1)\n\n predict = predicted[:,:,:]==1\n gt = label[:,:,:]==1.0\n\n insection = (predict & gt).float()\n union = (predict | gt).float()\n iou_loss = 1 - (torch.sum(insection) / (torch.sum(union)))\n return iou_loss\n","sub_path":"utils/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":5973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"482336502","text":"#!/usr/bin/env python\n# coding:utf-8\nimport base64\nfrom io import BytesIO\n\nimport qrcode as qrc\n\n\ndef qrcode(data, version=None, error_correction='M', box_size=4.5, border=2.5, fit=False):\n \"\"\"\n makes qr image using qrcode as qrc See documentation for qrcode package for info\n taken from: https://github.com/agnerio/Flask-QRcode/blob/master/flask_qrcode/__init__.py\n \"\"\"\n correction_levels = {\n 'L': qrc.constants.ERROR_CORRECT_L,\n 'M': qrc.constants.ERROR_CORRECT_M,\n 'Q': qrc.constants.ERROR_CORRECT_Q,\n 'H': qrc.constants.ERROR_CORRECT_H\n }\n\n qr = qrc.QRCode(\n version=version,\n error_correction=correction_levels[error_correction],\n box_size=box_size,\n border=border\n )\n qr.add_data(data)\n qr.make(fit=fit)\n\n # creates qrcode base64\n out = BytesIO()\n qr_img = qr.make_image()\n qr_img.save(out, 'PNG')\n\n return u\"data:image/png;base64,\" + base64.b64encode(out.getvalue()).decode('ascii')\n","sub_path":"10-two-factor-authentication/app/qr.py","file_name":"qr.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"579099399","text":"from optparse import OptionParser\n\n\nclass CLIOptions(OptionParser):\n \"\"\"\n Command line interface options parser.\n \"\"\"\n def __init__(self, *args, **kwargs):\n OptionParser.__init__(self, *args, **kwargs)\n self.add_option(\n \"-c\", \"--config\", dest=\"configfile\",\n help=\"Configuration file.\", default=\"/etc/temboard/temboard.conf\")\n\n\nclass temboarduiOptions(CLIOptions):\n \"\"\"\n temboard options parser.\n \"\"\"\n def __init__(self, *args, **kwargs):\n CLIOptions.__init__(self, *args, **kwargs)\n self.add_option(\n \"-d\", \"--daemon\", dest=\"daemon\", action=\"store_true\",\n help=\"Run in background.\", default=False)\n self.add_option(\n \"-p\", \"--pid-file\", dest=\"pidfile\",\n help=\"PID file.\", default=\"/run/temboard.pid\")\n self.add_option(\n \"--debug\",\n action=\"store_true\", dest=\"debug\", default=False,\n help=\"Debug mode for development.\")\n","sub_path":"temboardui/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"437361378","text":"from random import randint\nfrom random import sample\nfrom Individual import Individual\n\n\n\"\"\"\nClasse de operadores do saga\n\"\"\"\nclass Operator():\n \"\"\"\n Construtor da classe operador, armazena os operadores em um dicionario, a fim de serem selecionados\n aleatoriamente pelos seus indices.\n \"\"\"\n def __init__(self):\n self.function_dict = {\n 1: self.crossover_one_point, # 2 parents\n 2: self.crossover_uniform, # 2 parents\n 3: self.block_shuffling_left, # 1 parents\n 4: self.block_shuffling_vertically, # 1 parents\n }\n self.selected_op = 0 # guarda a key do operador selecionado\n\n\n \"\"\"\n Seleciona de por meio da roleta um operador e retorna o numero de pais a serem selecionados\n \"\"\"\n def select_operator(self):\n dict_len = len(self.function_dict)\n\n # seleciona o operador aleatoriamente\n self.selected_op = randint(1,dict_len)\n # self.selected_op = 1\n\n # Verifica se o operator escolhido necessita de 1 parent\n if self.selected_op == 3 or self.selected_op == 4:\n return 1\n else:\n return 2\n\n\n \"\"\"\n Executa o operador selecionado, retornando os filhos gerados pelo operador\n \"\"\"\n def run_operator(self, parent1, parent2 = None):\n if parent2 is not None:\n self.function_dict[self.selected_op](parent1, parent2)\n else:\n self.function_dict[self.selected_op](parent1)\n\n\n \"\"\"\n Operador de crossover de um ponto, retorna os filhos do crossover\n \"\"\"\n def crossover_one_point(self, individual1, individual2):\n # print(\"Individual1 = %s\" % individual1.toString())\n # print(\"Individual2 = %s\\n\" % individual2.toString())\n chromosome1 = individual1.getChromosome()\n chromosome2 = individual2.getChromosome()\n \n num_seq = len(chromosome1)\n point = randint(1, len(chromosome1[0])-1) # ponto de corte\n\n # primeiro cromossomo\n part1_a = []\n part1_b = []\n\n # segundo cromossomo\n part2_a = []\n part2_b = []\n\n # divide o primeiro cromossomo\n for row in chromosome1:\n part1_a.append(row[0:point])\n part1_b.append(row[point::])\n\n num_char_a = []\n for row in part1_a:\n num_char_a.append(len(row) - row.count('-'))\n\n # Divide o segundo cromossomo\n for i, row in enumerate(chromosome2):\n cont = 0\n # print(\"num_char_a[i] = %d\\nrow %s\\n\" % (num_char_a[i], row))\n for j, char in enumerate(row):\n if char != '-':\n cont += 1\n if not num_char_a[i]:\n part2_a.append('')\n part2_b.append(row)\n elif cont == num_char_a[i]:\n part2_a.append(row[0:j+1])\n part2_b.append(row[j+1::])\n break\n \n # Encontra o tamnho da segunda sequencia lado A\n size_seq_A = 0\n for i in part2_a:\n size_seq_A = max(size_seq_A, len(i))\n\n # Encontra o tamnho da segunda sequencia lado B\n size_seq_B = 0\n for i in part2_b:\n size_seq_B = max(size_seq_B, len(i))\n\n # Ajusta o tamanho da segunda sequencia lado A\n for i in range(0, len(part2_a)):\n while len(part2_a[i]) < size_seq_A:\n part2_a[i] = part2_a[i] + '-'\n\n # Ajusta o tamanho da segunda sequencia lado B\n for i in range(0, len(part2_b)):\n while len(part2_b[i]) < size_seq_B:\n part2_b[i] = '-' + part2_b[i]\n\n # filhos a serem gerados\n child1 = []\n child2 = []\n\n # realiza a juncao dos cromossomos\n # print(\"part1_a %s\\npart1_b %s\" % (part1_a, part1_b))\n # print(\"part2_a %s\\npart2_b %s\\n\" % (part2_a, part2_b))\n for i in range(0, num_seq):\n child1.append(part1_a[i]+part2_b[i])\n child2.append(part2_a[i]+part1_b[i])\n\n # atribui os novos cromossomos aos individuos\n individual1.setChromosome(child1)\n individual2.setChromosome(child2)\n\n \"\"\"\n Operador de crossover uniforme, realiza o crossover uniforme entre dois cromossomos\n \"\"\"\n def crossover_uniform(self, individual1, individual2):\n # print(\"---------------OPERADOR CROSSOVER UNIFORME---------------\")\n chromosome1 = individual1.getChromosome()\n chromosome2 = individual2.getChromosome()\n position = []\n\n num_seq1 = len(chromosome1)\n # num_seq2 = len(chromosome2) (não esta sendo usada)\n\n # cromossomo dos filhos\n child1_chromosome = []\n child2_chromosome = []\n\n for x in range(0, num_seq1):\n tam_min = min(len(chromosome1[x]), len(chromosome2[x]))\n chrom1_size = len(chromosome1[x])\n chrom2_size = len(chromosome2[x])\n\n sequencia_chromosome1 = chromosome1[x]\n sequencia_chromosome2 = chromosome2[x]\n \n for y in range(0, tam_min):\n if sequencia_chromosome1[y] == sequencia_chromosome2[y]: #Condição para o mapeamento\n position.append(y) # Armazenar as posições que corresponde tanto ao pai1 como ao pai2\n # print(\"PAI 1: %s\" % sequencia_chromosome1)\n # print(\"PAI 2: %s\" % sequencia_chromosome2)\n # print('MAPEAMENTO: %s' % position)\n\n if len(position) < 2:\n return None\n x = sample(position, 2) \n\n # print (\"ESCOLHIDOS: %s\" % x)\n del position[:]\n child1_part1 = []\n child1_part2 = []\n child1_part3 = []\n child1_part4 = []\n child1_part5 = []\n \n child2_part1 = []\n child2_part2 = []\n child2_part3 = []\n child2_part4 = []\n child2_part5 = []\n\n menor_posicao = min(x[0], x[1])\n maior_posicao = max(x[0], x[1])\n\n # Para fazer a permutação dos pontos\n for y in range(0, chrom1_size):\n if y < menor_posicao:\n child1_part1.append(sequencia_chromosome1[y])\n if y == menor_posicao:\n child1_part2.append(sequencia_chromosome1[y])\n if y > menor_posicao and y < maior_posicao:\n child1_part3.append(sequencia_chromosome1[y])\n if y == maior_posicao:\n child1_part4.append(sequencia_chromosome1[y])\n if y > maior_posicao:\n child1_part5.append(sequencia_chromosome1[y])\n \n for y in range(0, chrom2_size):\n if y < menor_posicao:\n child2_part1.append(sequencia_chromosome2[y])\n if y == menor_posicao:\n child2_part2.append(sequencia_chromosome2[y])\n if y > menor_posicao and y < maior_posicao:\n child2_part3.append(sequencia_chromosome2[y])\n if y == maior_posicao:\n child2_part4.append(sequencia_chromosome2[y])\n if y > maior_posicao:\n child2_part5.append(sequencia_chromosome2[y])\n \n child1 = child1_part1+child1_part2+child2_part3+child1_part4+child1_part5\n child2 = child2_part1+child2_part2+child1_part3+child2_part4+child2_part5\n\n for y in range(0, len(child1)):\n if y == 0:\n teste = child1[y]\n else:\n teste += child1[y]\n # print(\"CHILD 1: %s\" % teste)\n child1_chromosome.append(teste)\n\n for y in range(0, len(child2)):\n if y == 0:\n teste = child2[y]\n else:\n teste += child2[y]\n # print(\"CHILD 2: %s\" % teste)\n # print(\"\\n\")\n child2_chromosome.append(teste)\n\n individual1.setChromosome(child1_chromosome)\n individual2.setChromosome(child2_chromosome)\n\n\n \"\"\"\n Implementação do operador block shuffling 1\n \"\"\"\n def block_shuffling_left(self, individual1):\n # Para o operador block shuffling mover um bloco cheio de lacunas uma posição para esquerda\n # print(\"-----------MOVER BLOCO DE LACUNAS PARA UMA POSIÇÃO NA ESQUERDA-----------\")\n sequence = individual1.getChromosome()\n amount_sequence = len(sequence)\n child_chromosome = []\n for x in range(0, amount_sequence):\n size_sequence = len(sequence[x]) \n child = []\n # var = \"\"\n # contador = 0\n for y in range(0, size_sequence): # Para identificar os gap e os mover\n if sequence[x][y] == \"-\":\n if y != 0:\n temp = child[len(child)-1] \n child.pop()\n child.append(sequence[x][y])\n child.append(temp)\n else:\n # contador = 1\n var = sequence[x][size_sequence-1]\n child.append(sequence[x][y])\n else:\n child.append(sequence[x][y])\n for y in range(0, len(child)):\n if y == 0:\n child_new = child[y]\n else:\n child_new += child[y]\n\n child_chromosome.append(child_new)\n # print(\"PAI: %s\" % sequence[x])\n # print(\"FILHO: %s\" % child_new)\n # print(\"\\n\")\n\n individual1.setChromosome(child_chromosome)\n\n\n \"\"\"\n Implementação do operador block shuffling 2\n \"\"\"\n def block_shuffling_vertically(self, individual1):\n # Para o operador block shuffling para dividir a metade um bloco de gaps e mover para esquerda\n # print(\"-----------VERTICAL GAPS-----------\")\n sequence = individual1.getChromosome()\n amount_sequence = len(sequence)\n contador1 = 0\n contador2 = 0\n child_chromosome = []\n for x in range(0, amount_sequence):\n \n size_sequence = len(sequence[x])\n child = []\n position = []\n for y in range(0, size_sequence):\n if sequence[x][y] == \"-\":\n position.append(y)\n contador1 += 1\n contador2 = 0\n else:\n contador2 = 1\n if contador1 > 1 and contador2 == 1:\n contador1 = 0\n contador2 = 0\n break\n \n if len(position) % 2 == 0:\n teste = len(position) / 2\n else:\n teste = len(position) / 2\n child = []\n temp = \"\"\n for y in range(0, size_sequence):\n if position == []:\n child.append(sequence[x][y])\n else:\n recebe = int(position[0]) + teste\n # print(\"CHILD: %s\" % child)\n if y == position[0]:\n if child != []:\n temp = child[len(child)-1]\n child.pop()\n \n if y == recebe:\n child.append(temp)\n child.append(sequence[x][y])\n else:\n child.append(sequence[x][y])\n \n for y in range(0, len(child)):\n if y == 0:\n child_new = child[y]\n else:\n child_new += child[y] \n del child\n #print(\"PAI:\\t%s\" % sequence[x]) \n #print(\"FILHO:\\t%s\" % child_new) \n #print(\"\\n\")\n child_chromosome.append(child_new)\n\n individual1.setChromosome(child_chromosome)","sub_path":"Operator.py","file_name":"Operator.py","file_ext":"py","file_size_in_byte":11963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"396323856","text":"from System.IO import *\n\nclass Feed:\n def __init__(self, Dir, NewsSourceName, RSSName, Module):\n self.Dir = Dir\n self.NewsSourceName = NewsSourceName\n self.RSSName = RSSName\n self.Module = Module\n\n def __str__(self):\n return self.Dir.FullName\n\ndef GetFeedsList():\n FeedsList = []\n for d in DirectoryInfo('Feeds').GetDirectories():\n NewsSourceName = d.Name\n for RSSdir in d.GetDirectories():\n for f in RSSdir.GetFiles():\n if f.Name.endswith('.rb') or f.Name.endswith('.py'):\n FeedsList.append(Feed(RSSdir, NewsSourceName, f.FullName.split('.')[0], f.FullName))\n return FeedsList\n\n##def GetFeedsList():\n## FeedsList = []\n## for d in DirectoryInfo('Feeds').GetDirectories():\n## NewsSourceName = ''\n## for f in d.GetFiles():\n## if f.FullName.endswith('.xml'):\n## NewsSourceName = GetNewsSourceName(f.FullName)\n## for RSSdir in d.GetDirectories():\n## RSSName = ''\n## for f in RSSdir.GetFiles():\n## if f.FullName.endswith('.xml'):\n## RSSName = GetRSSName(f.FullName)\n## FeedsList.append(Feed(NewsSourceName, RSSName, GetRSSModule(RSSdir.FullName)))\n## \n## return FeedsList\n##\n##def GetNewsSourceName(f):\n## with open(f) as f:\n## pass\n##\n##def GetRSSName(f):\n## pass\n##\n##def GetRSSModule(f):\n## pass\n","sub_path":"Feed.py","file_name":"Feed.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"37195516","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Float32\n\ndef callback(data):\n rospy.loginfo(rospy.get_caller_id() + \" I heard %s\", data.data)\n\ndef reader():\n\n\trospy.init_node('reader', anonymous=True)\n\n\trospy.Subscriber(\"convo\",Float32, callback)\n\t\n\trospy.spin()\n\t\nif __name__== '__main__':\n\treader()\n","sub_path":"scripts/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"643882083","text":"'''\r\n连续变量进行对数转换\r\n'''\r\nfrom util import dataset\r\nfrom scipy import stats\r\n\r\nprint('Loading data......')\r\ntrain = dataset.load('numeric', 'train').astype(float)\r\ntest = dataset.load('numeric', 'test').astype(float)\r\nnum_col = dataset.load('numeric', 'feature')\r\n\r\nfor col in num_col:\r\n if stats.skew(train[col]) > 0.25:\r\n values, lam = stats.boxcox(train[col].values+1)\r\n train[col] = values\r\n print(col)\r\n\r\n if stats.skew(test[col]) > 0.25:\r\n values, lam = stats.boxcox(test[col].values+1)\r\n test[col] = values\r\n\r\nprint(train.head())\r\nprint('='*20)\r\nprint(test.head())\r\nprint('='*20)\r\n\r\nprint('Saving data......')\r\ndataset(numeric_boxcox=train).save('train')\r\ndataset(numeric_boxcox=test).save('test')\r\n\r\nprint('Done!')\r\n","sub_path":"02Loan_Prediction/preprocessing/numeric-boxcox.py","file_name":"numeric-boxcox.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"516668564","text":"import asyncio\nimport random\n\n\nasync def add(start, end, wait):\n sum = 0\n for n in range(start, end):\n sum += n\n await asyncio.sleep(wait)\n print(f'Sum from {start} to {end} is {sum}')\n\n\nasync def main():\n task1 = asyncio.create_task(add(1, 10000001, 0))\n task2 = asyncio.create_task(add(2, 102, 2))\n task3 = asyncio.create_task(add(3, 10, 1))\n await asyncio.wait([task1, task2, task3])\n\n\nasync def slow_func():\n await asyncio.sleep(1)\n return 'answer'\n\n\nasync def failed_func():\n print('failed')\n await asyncio.sleep(2)\n raise Exception\n\n\nasync def test():\n response = slow_func()\n print(response)\n try:\n await failed_func()\n except Exception as e:\n print(e, await response)\n\n\nasync def say(what, when):\n await asyncio.sleep(when)\n print(what)\n\n\nasync def stop_after(loop, when):\n await asyncio.sleep(when)\n loop.stop()\n\n\nasync def long_say():\n index = 0\n while True:\n await say(f'{index}: hello', when=0.1)\n index += 1\n\n\nasync def print_every_second():\n while True:\n for i in range(60):\n print(f'{i}s')\n await asyncio.sleep(1)\n\n\nasync def print_every_minute():\n for i in range(1, 10):\n await asyncio.sleep(60)\n print(f'{i}min')\n\n\ndef time_runner():\n loop = asyncio.get_event_loop()\n loop.run_until_complete(\n asyncio.gather(print_every_second(), print_every_minute())\n )\n loop.close()\n\n\nasync def read():\n index = -1\n while True:\n index += 1\n print(f'reading: {index}')\n await asyncio.sleep(0.5)\n print(f'finished reading {index}')\n yield index\n\n\nasync def db_write(index):\n print(f'saving {index}')\n await asyncio.sleep(3)\n print(f'finished saving {index}')\n\n\nasync def rw_runner():\n async for item in read():\n await db_write(item)\n\n\ndef say_runner():\n loop = asyncio.get_event_loop()\n loop.run_until_complete(say('hello_world', 1))\n loop.close()\n\n\ndef say_runner_multiple():\n loop = asyncio.get_event_loop()\n loop.create_task(say('first hello', 2))\n loop.create_task(say('second hello', 1))\n loop.run_forever()\n loop.close()\n\n\ndef say_runner_long():\n loop = asyncio.get_event_loop()\n # loop.run_until_complete(long_say())\n loop.create_task(say('2 hello_world', 2))\n loop.create_task(long_say())\n loop.create_task(say('1 hello_world', 1))\n loop.run_forever()\n loop.close()\n\n\ndef say_stop_runner():\n loop = asyncio.get_event_loop()\n\n loop.create_task(say('first_hello', 2))\n loop.create_task(say('second_hello', 1))\n loop.create_task(say('third_hello', 4))\n loop.create_task(stop_after(loop, 3))\n\n loop.run_forever()\n loop.close()\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"reader/async_test.py","file_name":"async_test.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"190867339","text":"# -*- coding: utf-8 -*-\n# author:xls\n\"\"\"\n 成一个包含大写字母A-Z和数字0-9的随机4位验证码\n\"\"\"\nimport random\ncheck_code = ''\nfor i in range(4):\n current = random.randrange(4)\n if current == i:\n #生成一个随机整数\n temp = random.randrange(10)\n else:\n #生成一个随机的大写字母\n temp = chr(random.randrange(65, 91))\n check_code += str(temp)\nprint(check_code)","sub_path":"grammar/random_demo.py","file_name":"random_demo.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"328355305","text":"__all__ = ['add', 'dump', 'get', 'import', 'join', 'list', 'refresh', 'remove', 'removeall', 'setdate']\n\nfrom importlib import import_module\nfrom discord import Client, Message\n\nSHORT_HELP_TEXT = '$$$rss [...] - Gere feeds RSS (inclui subcomandos)'\n\ndef get_subcommand(name: str):\n \"\"\"\n Select subcommand\n \"\"\"\n return import_module('.' + name, 'hooks.commands.rss')\n\ndef get_subcommand_short_help(name: str) -> str:\n \"\"\"\n Show subcommand help\n \"\"\"\n return get_subcommand(name).SHORT_HELP_TEXT\n\ndef get_subcommand_long_help(name: str, **kwargs) -> str:\n \"\"\"\n Show subcommand long help\n \"\"\"\n return get_subcommand(name).help(**kwargs)\n\ndef help(**kwargs):\n \"\"\"\n Show help\n \"\"\"\n if not kwargs['args']:\n return str.join('\\n', map(get_subcommand_short_help, __all__))\n else:\n name = kwargs['args'][0]\n kwargs['args'] = kwargs['args'][1:]\n return get_subcommand_long_help(name, **kwargs)\n\nasync def run(client: Client, message: Message = None, **kwargs):\n \"\"\"\n Run command\n \"\"\"\n args = kwargs.get('args', [])\n if not args or args[0] == '':\n command = 'refresh'\n else:\n command = args[0]\n kwargs['args'] = args[1:]\n\n if command in __all__:\n command_module = get_subcommand(command)\n return await command_module.run(client, message, **kwargs)\n else:\n raise NotImplementedError('$$$rss {}'.format(command))\n","sub_path":"hooks/commands/rss/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"21688844","text":"from django.shortcuts import render\n\nfrom .models import Section, Navigation\n\n# Create your views here.\ndef index(request):\n section_list = Section.objects.all()\n navigation_list = Navigation.objects.all()\n\n context = {\n 'section_list': section_list,\n 'navigation_list': navigation_list,\n }\n\n return render(request, 'home/index.html', context)\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"171668668","text":"from datetime import timedelta\n# 第三方类\n# 自己的类\nfrom . import MetaClass\nfrom framework.database import DbOperation\nfrom utils.time import Datetime\nfrom utils.decorators import promise_do_once\nfrom settings import log\n\n\nclass IllegalDot1xUserJob(metaclass=MetaClass):\n next_time = Datetime.localtime()\n\n @classmethod\n def start(cls, force=False):\n now = Datetime.localtime()\n if not force and now < cls.next_time:\n return\n return\n # 下次运行: 每天早上7点\n cls.next_time = (now + timedelta(days=1)).replace(hour=7, minute=0, second=0, microsecond=0)\n #\n start_time = Datetime.localtime().replace(hour=0, minute=0, second=0, microsecond=0)\n end_time = start_time - timedelta(days=1)\n cls.doing(start_time=start_time, end_time=end_time)\n\n @classmethod\n @promise_do_once(file_name=__file__, func_name='doing')\n def doing(cls, start_time, end_time):\n # 所有public的AP\n public_ap = set()\n owner_ap = dict()\n sql = f\"\"\"\n SELECT * FROM ap_owner\n \"\"\"\n for row in DbOperation.select(sql):\n username = row['username']\n ap_mac = row['ap_mac']\n is_public = row['is_public']\n if is_public:\n public_ap.add(ap_mac)\n else:\n if username not in owner_ap:\n owner_ap[username] = set()\n owner_ap[username].add(ap_mac)\n\n # 按username统计连接最多的AP, 作为用户绑定的常用AP. 需排除is_public的AP\n username_ap = dict()\n # TODO 加上时间筛选, 30天内\n sql = f\"\"\"\n SELECT username, ap_mac, count(*) AS accept_count FROM stat_user GROUP BY username, ap_mac ORDER BY accept_count DESC\n \"\"\"\n log.info(f'sql: {sql}')\n for row in DbOperation.select(sql):\n username = row['username']\n ap_mac = row['ap_mac']\n accept_count = row['accept_count']\n if ap_mac in public_ap:\n continue\n if username in owner_ap:\n # 跳过已绑定用户的AP\n continue\n if username in username_ap:\n # 绑定关系已处理\n continue\n else:\n username_ap[username] = f'{ap_mac}:{accept_count}'\n\n # 按 username 统计, 告警: 不等于该ap_owner的username\n username_ap = dict()\n sql = f\"\"\"\n SELECT username, ap_mac, count(*) AS accept_count FROM stat_user GROUP BY username, ap_mac ORDER BY accept_count DESC\n \"\"\"\n log.info(f'sql: {sql}')\n for row in DbOperation.select(sql):\n username = row['username']\n ap_mac = row['ap_mac']\n accept_count = row['accept_count']\n #\n if username in owner_ap:\n # 跳过已绑定用户的AP\n continue\n if ap_mac in public_ap:\n # 公用AP跳过\n continue\n if f'{username}' in username_ap:\n # 绑定关系已处理\n continue\n else:\n username_ap[f'{username}'] = ap_mac\n\n log.info(f'public_ap: {public_ap}')\n log.info(f'owner_ap: {owner_ap}')\n log.info(f'username_ap: {username_ap}')\n log.info(f'username_ap: {username_ap}')\n for key, value in username_ap.items():\n username = key\n ap_mac = value\n correct_ap_mac, correct_accept_count = username_ap[username].split(':')\n if ap_mac == correct_ap_mac:\n continue\n log.error(f'username: {username} 应绑定AP: {correct_ap_mac}, 次数: {correct_accept_count}. 但现连接: {ap_mac}')\n # 发送slack统计消息\n # text = f'昨天充值金额: {today_sum/100} 元, 历史累计充值金额: {total_sum/100} 元'\n # Feishu.send_groud_msg(receiver_id=Feishu.FEISHU_CHARGE_CHAT_ID, text=text)\n","sub_path":"src/timer_processor/jobs/illegal_dot1x_user.py","file_name":"illegal_dot1x_user.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"240599282","text":"import argparse\nimport pandas as pd\nimport numpy as np\n\nimport argparse\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport os\nimport pickle\n# from data_loader import get_loader\nfrom build_vocab import Vocabulary\nfrom model_fastText import EncoderCNN, DecoderRNN\nfrom torch.nn.utils.rnn import pack_padded_sequence\nfrom torchvision import transforms\nfrom collections import OrderedDict\nimport spacy, string, nltk\nfrom spacy.lang.en.stop_words import STOP_WORDS\nimport gensim\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\n\n# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nnlp = spacy.load('en_core_web_sm')\nstopwords = set(stopwords.words('english'))\npunctuation = set(string.punctuation)\nlemmatize = WordNetLemmatizer()\nwords = set(nltk.corpus.words.words())\n\nclass TextRank4Keyword():\n \"\"\"Extract keywords from text\"\"\"\n\n def __init__(self):\n self.d = 0.85 # damping coefficient, usually is .85\n self.min_diff = 1e-5 # convergence threshold\n self.steps = 10 # iteration steps\n self.node_weight = None # save keywords and its weight\n\n\n def set_stopwords(self, stopwords):\n \"\"\"Set stop words\"\"\"\n for word in STOP_WORDS.union(set(stopwords)):\n lexeme = nlp.vocab[word]\n lexeme.is_stop = True\n\n def sentence_segment(self, doc, candidate_pos, lower):\n \"\"\"Store those words only in cadidate_pos\"\"\"\n sentences = []\n for sent in doc.sents:\n selected_words = []\n for token in sent:\n # Store words only with cadidate POS tag\n if token.pos_ in candidate_pos and token.is_stop is False:\n if lower is True:\n selected_words.append(token.text.lower())\n else:\n selected_words.append(token.text)\n sentences.append(selected_words)\n return sentences\n\n def get_vocab(self, sentences):\n \"\"\"Get all tokens\"\"\"\n vocab = OrderedDict()\n i = 0\n for sentence in sentences:\n for word in sentence:\n if word not in vocab:\n vocab[word] = i\n i += 1\n return vocab\n\n def get_token_pairs(self, window_size, sentences):\n \"\"\"Build token_pairs from windows in sentences\"\"\"\n token_pairs = list()\n for sentence in sentences:\n for i, word in enumerate(sentence):\n for j in range(i+1, i+window_size):\n if j >= len(sentence):\n break\n pair = (word, sentence[j])\n if pair not in token_pairs:\n token_pairs.append(pair)\n return token_pairs\n\n def symmetrize(self, a):\n return a + a.T - np.diag(a.diagonal())\n\n def get_matrix(self, vocab, token_pairs):\n \"\"\"Get normalized matrix\"\"\"\n # Build matrix\n vocab_size = len(vocab)\n g = np.zeros((vocab_size, vocab_size), dtype='float')\n for word1, word2 in token_pairs:\n i, j = vocab[word1], vocab[word2]\n g[i][j] = 1\n\n # Get Symmeric matrix\n g = self.symmetrize(g)\n\n # Normalize matrix by column\n norm = np.sum(g, axis=0)\n g_norm = np.divide(g, norm, where=norm!=0) # this is ignore the 0 element in norm\n\n return g_norm\n\n\n def get_keywords(self, number=10):\n \"\"\"Print top number keywords\"\"\"\n keywords = []\n node_weight = OrderedDict(sorted(self.node_weight.items(), key=lambda t: t[1], reverse=True))\n for i, (key, value) in enumerate(node_weight.items()):\n keywords.append(key)\n # print(key + ' - ' + str(value))\n if i > number:\n break\n return keywords\n\n def analyze(self, text,\n candidate_pos=['NOUN', 'PROPN'],\n window_size=4, lower=False, stopwords=list()):\n \"\"\"Main function to analyze text\"\"\"\n\n # Set stop words\n self.set_stopwords(stopwords)\n\n # Pare text by spaCy\n doc = nlp(text)\n\n # Filter sentences\n sentences = self.sentence_segment(doc, candidate_pos, lower) # list of list of words\n\n # Build vocabulary\n vocab = self.get_vocab(sentences)\n\n # Get token_pairs from windows\n token_pairs = self.get_token_pairs(window_size, sentences)\n\n # Get normalized matrix\n g = self.get_matrix(vocab, token_pairs)\n\n # Initionlization for weight(pagerank value)\n pr = np.array([1] * len(vocab))\n\n # Iteration\n previous_pr = 0\n for epoch in range(self.steps):\n pr = (1-self.d) + self.d * np.dot(g, pr)\n if abs(previous_pr - sum(pr)) < self.min_diff:\n break\n else:\n previous_pr = sum(pr)\n\n # Get weight for each node\n node_weight = dict()\n for word, index in vocab.items():\n node_weight[word] = pr[index]\n\n self.node_weight = node_weight\n\n\ndef unique_keywords(df):\n all_keywords = []\n values = []\n for item in df:\n # gensim.utils.simple_preprocess(item, deacc=True)\n doc = nlp(item)\n b = []\n for tok in doc:\n\t if tok.is_stop != True and tok.pos_ != 'SYM' and \\\n \t tok.tag_ != 'PRP' and tok.tag_ != 'PRP$' and \\\n tok.tag_ != '_SP' and tok.pos_ != 'NUM' and \\\n tok.dep_ != 'aux' and tok.dep_ != 'prep' and \\\n tok.dep_ != 'det' and tok.dep_ != 'cc' and \\\n tok.lemma_ != 'frac' and len(tok) != 1 and \\\n tok.lemma_.lower() in words and \\\n tok.lemma_.lower() not in stopwords and \\\n tok.lemma_.lower() not in punctuation:\n b.append(lemmatize.lemmatize(tok.lemma_.lower()))\n\n # print(b)\n # print(\" \".join(b))\n tr4w = TextRank4Keyword()\n tr4w.analyze(\" \".join(b), candidate_pos = ['NOUN', 'PROPN'], window_size=4, lower=False)\n keyword = tr4w.get_keywords(5)\n all_keywords.append(keyword)\n values = values + keyword\n return all_keywords, values\n\ndef genereate(args,key_words):\n # Create model directory\n if not os.path.exists(args.model_path):\n os.makedirs(args.model_path)\n\n # Load vocabulary wrapper\n with open(args.emb_model, 'rb') as f:\n emb_model = pickle.load(f)\n\n emb_model_weights = emb_model.wv.syn0\n\n # Build data loader\n # data_loader = get_loader(args.image_dir, args.caption_path, vocab,\n # args.dictionary, args.batch_size,\n # shuffle=True, num_workers=args.num_workers)\n # data = input(\"Enter Topic: \")\n # Build the models\n #encoder = EncoderCNN(args.embed_size).to(device)\n # dictionary = pd.read_csv(args.dictionary, header=0,encoding = 'unicode_escape',error_bad_lines=False)\n # dictionary = list(dictionary['keys'])\n\n decoder = DecoderRNN(args.embed_size, args.hidden_size, len(emb_model.wv.vocab), args.num_layers, emb_model_weights).to(device)\n decoder.load_state_dict(torch.load(args.model_path, map_location=device))\n decoder.eval()\n\n\n # Train the models\n # total_step = len(data_loader)\n # for epoch in range(args.num_epochs):\n # for i, (array, captions, lengths) in enumerate(data_loader):\n array = torch.zeros((256))\n count = 0\n for val in key_words:\n array = torch.add(array, torch.from_numpy(emb_model.wv[val]))\n count += 1\n array = torch.div(array, count)\n # print(\"In sample\", array)\n array = (array, )\n array = torch.stack(array, 0)\n array = array.to(device)\n # print(\"After\", array)\n #captions = captions.to(device)\n # targets = pack_padded_sequence(captions, lengths, batch_first=True)[0]\n\n # Forward, backward and optimize\n #features = encoder(images)\n outputs = decoder.sample(array)\n\n count = 0\n sentence = ''\n for i in range(len(outputs)):\n sampled_ids = outputs[i].cpu().numpy() # (1, max_seq_length) -> (max_seq_length)\n\n # Convert word_ids to words\n sampled_caption = []\n for word_id in sampled_ids:\n count += 1\n word = emb_model.wv.index2word[word_id]\n sampled_caption.append(word)\n if word == '':\n break\n sentence = sentence.join(' ')\n sentence = sentence.join(sampled_caption)\n\n # Print out the image and the generated caption\n print (sentence)\n return sentence\n # print(count)\n\ndef Distance(x1,y1,x2,y2):\n\treturn ((x1-x2)**2 + (y1-y2)**2 ) ** 0.5\n\n\ndef generateKeywords(camp,resource_map,X,Y):\n\tcamp[\"distance\"] = 0\n\n\tfor idx,row in cmap.iterrows():\n\t\tcmap.loc[idx,\"distance\"] = Distance(row[\"X\"],row[\"Y\"],X,Y)\n\n\tcmap.sort_values(by=['distance'],inplace = True)\n\tresource_ids = cmap.resource_id.values[0:6]\n\tdes = []\n\tfor i in resource_ids:\n\t\tif(len(resource_map[resource_map.resource_id == i].Summarization.values)!=0):\n\t\t\tdes.append(resource_map[resource_map.resource_id == i].Summarization.values[0])\n\n\tall_keywords, values = unique_keywords(des)\n\tkeys = list(pd.unique(values))\n\tprint(keys)\n\treturn keys\n\n\nif __name__ == '__main__':\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--computency_map\", type=str, required=True, help=\"Path to the computency map\")\n\tparser.add_argument(\"--resource\", type=str, required=True, help=\"Path to the resource csv\")\n\n\tparser.add_argument(\"--input_X\", type=float, required=True, help=\"X_Cooridnate\")\n\tparser.add_argument(\"--input_Y\", type=float, required=True, help=\"Y_Cooridnate\")\n\tparser.add_argument('--model_path', type=str, default='collections_all_science_out/allcontent_required_NotNull_sum_outFastText.ckpt' , help='path of saved models')\n# \tparser.add_argument('--vocab_path', type=str, default='allcontent_required_NotNull_sum_outCaptions.pkl', help='path for vocabulary wrapper')\n# \tparser.add_argument('--dictionary', type=str, default='allcontent_required_NotNull_sum_out.dict', help='path to dictionary file')\n # parser.add_argument('--caption_path', type=str, default='data/testdata.csv', help='path for train annotation json file')\n\tparser.add_argument('--log_step', type=int , default=10, help='step size for prining log info')\n\tparser.add_argument('--image_dir', type=str, default='png/' , help='tmp')\n\tparser.add_argument('--emb_model', type=str, default='collections_all_science_out/fasttext.model', help='path for embedding model')\n\n # Model parameters\n\tparser.add_argument('--embed_size', type=int , default=256, help='dimension of word embedding vectors')\n\tparser.add_argument('--hidden_size', type=int , default=512, help='dimension of lstm hidden states')\n\tparser.add_argument('--num_layers', type=int , default=2, help='number of layers in lstm')\n\n\tparser.add_argument('--num_epochs', type=int, default=5)\n\tparser.add_argument('--batch_size', type=int, default=128)\n\tparser.add_argument('--num_workers', type=int, default=2)\n\tparser.add_argument('--learning_rate', type=float, default=0.001)\n\n\n\targs = parser.parse_args()\n\n\tcmap = pd.read_csv(args.computency_map)\n\tresource_map = pd.read_csv(args.resource)\n\n\tcmap.sort_values(by = ['topic_volume', 'doc_volume'],inplace = True)\n\n\tnear_topics = generateKeywords(cmap,resource_map,args.input_X,args.input_Y)\n\n\tsen = genereate(args,near_topics)\n","sub_path":"gen_new_sumFastText.py","file_name":"gen_new_sumFastText.py","file_ext":"py","file_size_in_byte":11458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"136311786","text":"import numpy as np\nfrom copy import deepcopy\n\nclass SGD:\n\n\tdef __init__(self, lr, momentum):\n\t\tself.lr = lr\n\t\tself.momentum = momentum\n\t\tself.prev_deltas = None\n\n\tdef step(self, layers):\n\t\tif not self.prev_deltas:\n\t\t\tself.prev_deltas = [np.zeros_like(layer.weights) for layer in layers]\n\n\t\tfor layer, prev_deltas in zip(layers, self.prev_deltas):\n\t\t\tfor j, in_node in enumerate(layer.in_nodes):\n\t\t\t\tfor k, out_node in enumerate(layer.out_nodes):\n\t\t\t\t\tdelta = self.lr*in_node*out_node\n\t\t\t\t\tlayer.weights[k, j] += delta + self.momentum*prev_deltas[k, j]\n","sub_path":"optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"160897198","text":"import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE','first_project.settings')\n\nimport django\ndjango.setup()\n\n\nimport random\nfrom first_app.models import Topic,AccessRecord,Webpage\nfrom faker import Faker\n\nfakegen = Faker()\ntopic = ['Movies','Games','Electronics & Media','Cartoons','Footwear']\n\ndef add_topic():\n t = Topic.objects.get_or_create(top_name=random.choice(topic))[0]\n t.save()\n return t\n\n\ndef populate(N=5):\n\n for entry in range(N):\n\n top = add_topic()\n\n\n fake_name = fakegen.company()\n fake_url = fakegen.url()\n fake_date = fakegen.date()\n\n\n webpg = Webpage.objects.get_or_create(topic=top,name=fake_name,url=fake_url)[0]\n\n acc_rec = AccessRecord.objects.get_or_create(name=webpg,date=fake_date)[0]\n\n\nif __name__ == '__main__':\n print(\"populating script\")\n populate(10)\n print(\"populating complete/done\")\n","sub_path":"Django Level Two/first_project/populate_first_app.py","file_name":"populate_first_app.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"91746180","text":"import pandas as pd\r\nfrom matplotlib.pyplot import *\r\nimport numpy as np\r\n\r\nN = 1000\r\nname = \"korona.csv\"\r\n\r\n\r\nclass trade:\r\n __name = \"\"\r\n __money = 0\r\n\r\n def __init__(self, name, money):\r\n self.__name = name\r\n self.__money = money\r\n self.trading()\r\n\r\n def trading(self):\r\n bought = 0\r\n\r\n transactions = 0\r\n starting_money = self.__money\r\n macd = Macd(self.__name)\r\n exchange = data(self.__name)\r\n exchange, days = exchange.getData()\r\n signal = macd.getBuySell()\r\n print(exchange)\r\n print(signal)\r\n for i in range(1, len(signal)):\r\n if signal[i] == int(1) and bought == 0:\r\n bought = (self.__money) / exchange[i] # count how many actions are bought\r\n self.__money = self.__money - (self.__money)\r\n transactions = transactions + 1\r\n print(\"bought for: \" + str(exchange[i]))\r\n elif signal[i] == int(-1) and bought > 0:\r\n self.__money = self.__money + (bought * exchange[i]) # sell previously bought actions\r\n transactions = transactions + 1\r\n print(\"sold for: \" + str(exchange[i]))\r\n bought = 0\r\n\r\n print(\"To sold: \" + str(bought) + \" now for: \" + str(exchange[len(signal)]) + \" equals: \" + str(\r\n bought * exchange[len(signal)]))\r\n print(\"After trading for 1000 days and completing \" + str(transactions) + \" transactions from: \" + str(\r\n starting_money) + \" PLN, we have: \" + str(self.__money) + \" PLN.\")\r\n\r\n\r\nclass data:\r\n __name = \"\"\r\n\r\n def __init__(self, name):\r\n self.__name = name\r\n\r\n def getData(self):\r\n data = pd.read_csv(self.__name) # load csv file\r\n close_data = []\r\n days_reversed = []\r\n for i in range(0, N):\r\n close_data.append(float(data.iloc[i, 2])) # 2 miałem\r\n days_reversed.append(data.iloc[i, 1])\r\n\r\n days = []\r\n data_reversed = []\r\n for i in reversed(close_data):\r\n data_reversed.append(i)\r\n for i in reversed(days_reversed):\r\n days.append(i)\r\n return data_reversed, days\r\n\r\n\r\nclass Macd:\r\n __macd = []\r\n __signal = []\r\n __date = []\r\n __buy_and__sell_signals = []\r\n\r\n def __init__(self, name):\r\n numbers = data(name)\r\n numbers, days = numbers.getData()\r\n self.macd(numbers)\r\n self.signal()\r\n self.buySell()\r\n self.__date = days\r\n\r\n def eman(self, n, data, day):\r\n alpha = float(2 / (n + 1))\r\n p_reversed = data[day - n: day:]\r\n p_base = []\r\n\r\n for i in reversed(p_reversed):\r\n p_base.append(i)\r\n\r\n counter = float(0.0)\r\n denominator = float(0.0)\r\n\r\n for i in range(n):\r\n number = pow(float((1 - alpha)), i)\r\n counter += p_base[i] * number\r\n denominator += number\r\n\r\n return counter / denominator\r\n\r\n def signal(self):\r\n self.__signal = []\r\n\r\n for i in range(1, len(self.__macd)):\r\n if i < 9:\r\n ema9 = self.eman(i, self.__macd, i)\r\n self.__signal.append(0)\r\n else:\r\n ema9 = self.eman(9, self.__macd, i)\r\n self.__signal.append(ema9)\r\n\r\n def macd(self, exchange):\r\n self.__macd = []\r\n for i in range(1, len(exchange)):\r\n if i <= 12:\r\n ema12 = self.eman(i, exchange, i)\r\n ema26 = self.eman(i, exchange, i)\r\n self.__macd.append(ema12 - ema26)\r\n elif i < 26:\r\n ema12 = self.eman(12, exchange, i)\r\n ema26 = self.eman(i, exchange, i)\r\n self.__macd.append(ema12 - ema26)\r\n else:\r\n ema12 = self.eman(12, exchange, i)\r\n ema26 = self.eman(26, exchange, i)\r\n self.__macd.append(ema12 - ema26)\r\n\r\n def buySell(self):\r\n self.__buy_and__sell_signals.clear()\r\n self.__buy_and__sell_signals.append(int(0))\r\n minus = 0\r\n plus = 0\r\n for var in range(1, len(self.__macd) - 1):\r\n if self.__macd[var - 1] > self.__signal[var - 1] and self.__macd[var] <= self.__signal[var] and self.__macd[var] > 0:\r\n minus = minus + 1\r\n self.__buy_and__sell_signals.append(int(-1))\r\n elif self.__macd[var - 1] < self.__signal[var - 1] and self.__macd[var] >= self.__signal[var] and self.__macd[var] < 0:\r\n plus = plus + 1\r\n self.__buy_and__sell_signals.append(int(1))\r\n else:\r\n self.__buy_and__sell_signals.append(int(0))\r\n print(plus)\r\n print(minus)\r\n\r\n def getMacd(self):\r\n return self.__macd\r\n\r\n def getSignal(self):\r\n return self.__signal\r\n\r\n def getDate(self):\r\n return self.__date\r\n\r\n def getBuySell(self):\r\n return self.__buy_and__sell_signals\r\n\r\n\r\nclass Plot:\r\n def __init__(self, name):\r\n self.create_macd_plot(name)\r\n self.create_data_plot(name)\r\n self.create_macd_and_dara_plot(name)\r\n\r\n def create_macd_plot(self, name):\r\n numbers = Macd(name)\r\n titleName = ''\r\n for i in name:\r\n if i == '.':\r\n break\r\n else:\r\n titleName += i\r\n\r\n titleName += \" MACD Pointer\"\r\n p1, = plot(numbers.getMacd(), label=\"macd\") # macd blue\r\n p2, = plot(numbers.getSignal(), label=\"signal\")\r\n l1 = legend([p1], [\"macd\"], loc=1)\r\n l2 = legend([p2], [\"signal\"], loc=2)\r\n grid(True)\r\n days = numbers.getDate()\r\n xlabel('Days, from: ' + days[0] + ' to ' + days[N - 1])\r\n ylabel('Values')\r\n xticks(np.arange(0, N + 1, step=N / 5), (\r\n days[0], days[int(N / 5)], days[int(2 * N / 5)], days[int(3 * N / 5)], days[int(4 * N / 5)],\r\n days[int(N - 1)]))\r\n title(titleName)\r\n gca().add_artist(l1)\r\n show()\r\n\r\n def create_data_plot(self, name):\r\n numbers = data(name)\r\n titleName = \"\"\r\n for i in name:\r\n if i == '.':\r\n break\r\n else:\r\n titleName += i\r\n titleName += \" exchange\"\r\n numbers, days = numbers.getData()\r\n plot(numbers)\r\n grid(True)\r\n xlabel('Days, from: ' + days[0] + ' to ' + days[N - 1])\r\n ylabel('Cost in PLN')\r\n xticks(np.arange(0, N + 1, step=N / 5), (\r\n days[0], days[int(N / 5)], days[int(2 * N / 5)], days[int(3 * N / 5)], days[int(4 * N / 5)],\r\n days[int(N - 1)]))\r\n title(titleName)\r\n show()\r\n\r\n def create_macd_and_dara_plot(self, name):\r\n numbers = Macd(name)\r\n imputData = data(name)\r\n imputData, days = imputData.getData()\r\n titleName = ''\r\n for i in name:\r\n if i == '.':\r\n break\r\n else:\r\n titleName += i\r\n\r\n p1, = plot(numbers.getMacd(), label=\"macd\") # macd blue\r\n p2, = plot(imputData, label=\"input data\")\r\n l1 = legend([p1], [\"macd\"], loc=1)\r\n l2 = legend([p2], [\"input data\"], loc=2)\r\n grid(True)\r\n xlabel('Days, from: ' + days[0] + ' to ' + days[N - 1])\r\n ylabel('Values')\r\n xticks(np.arange(0, N + 1, step=N / 5), (\r\n days[0], days[int(N / 5)], days[int(2 * N / 5)], days[int(3 * N / 5)], days[int(4 * N / 5)],\r\n days[int(N - 1)]))\r\n title(titleName)\r\n gca().add_artist(l1)\r\n show()\r\n\r\n\r\ndef main():\r\n # b = Plot(name)\r\n c = trade(name, 1000)\r\n\r\n\r\nmain()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"269283610","text":"import wx\r\n\r\nfrom Kernel import Module\r\nfrom icons import icons8_comments_50\r\n\r\n_ = wx.GetTranslation\r\n\r\n\r\nclass BufferView(wx.Frame, Module):\r\n def __init__(self, parent, *args, **kwds):\r\n # begin wxGlade: BufferView.__init__\r\n wx.Frame.__init__(self, parent, -1, \"\",\r\n style=wx.DEFAULT_FRAME_STYLE | wx.FRAME_FLOAT_ON_PARENT | wx.TAB_TRAVERSAL)\r\n Module.__init__(self)\r\n self.SetSize((697, 584))\r\n self.text_buffer_length = wx.TextCtrl(self, wx.ID_ANY, \"\")\r\n self.text_buffer_info = wx.TextCtrl(self, wx.ID_ANY, \"\", style=wx.TE_CHARWRAP | wx.TE_MULTILINE)\r\n\r\n # Menu Bar\r\n self.BufferView_menubar = wx.MenuBar()\r\n wxglade_tmp_menu = wx.Menu()\r\n item = wxglade_tmp_menu.Append(wx.ID_ANY, \"Export EGV\", \"Export Engrave Data\")\r\n self.Bind(wx.EVT_MENU, self.on_menu_export, id=item.GetId())\r\n item = wxglade_tmp_menu.Append(wx.ID_ANY, \"Import EGV\", \"Import Engrave Data\")\r\n self.Bind(wx.EVT_MENU, self.on_menu_import, id=item.GetId())\r\n self.BufferView_menubar.Append(wxglade_tmp_menu, \"File\")\r\n self.SetMenuBar(self.BufferView_menubar)\r\n # Menu Bar end\r\n\r\n self.__set_properties()\r\n self.__do_layout()\r\n # end wxGlade\r\n self.Bind(wx.EVT_CLOSE, self.on_close, self)\r\n\r\n def on_close(self, event):\r\n if self.state == 5:\r\n event.Veto()\r\n return\r\n else:\r\n self.state = 5\r\n self.device.close('window', self.name)\r\n event.Skip() # Call destroy as regular.\r\n\r\n def initialize(self, channel=None):\r\n self.device.close('window', self.name)\r\n self.Show()\r\n\r\n pipe = self.device.interpreter.pipe\r\n buffer = None\r\n if pipe is not None:\r\n try:\r\n buffer = pipe._buffer + pipe._queue\r\n except AttributeError:\r\n buffer = None\r\n if buffer is None:\r\n buffer = _(\"Could not find buffer.\\n\")\r\n\r\n try:\r\n buffer_str = buffer.decode()\r\n except ValueError:\r\n buffer_str = buffer.decode(\"ascii\")\r\n except AttributeError:\r\n buffer_str = buffer\r\n\r\n self.text_buffer_length = self.text_buffer_length.SetValue(str(len(buffer_str)))\r\n self.text_buffer_info = self.text_buffer_info.SetValue(buffer_str)\r\n\r\n def finalize(self, channel=None):\r\n try:\r\n self.Close()\r\n except RuntimeError:\r\n pass\r\n\r\n def shutdown(self, channel=None):\r\n try:\r\n self.Close()\r\n except RuntimeError:\r\n pass\r\n\r\n def __set_properties(self):\r\n _icon = wx.NullIcon\r\n _icon.CopyFromBitmap(icons8_comments_50.GetBitmap())\r\n self.SetIcon(_icon)\r\n # begin wxGlade: BufferView.__set_properties\r\n self.SetTitle(_(\"BufferView\"))\r\n self.text_buffer_length.SetMinSize((165, 23))\r\n # end wxGlade\r\n\r\n def __do_layout(self):\r\n # begin wxGlade: BufferView.__do_layout\r\n sizer_1 = wx.BoxSizer(wx.VERTICAL)\r\n sizer_5 = wx.BoxSizer(wx.HORIZONTAL)\r\n label_8 = wx.StaticText(self, wx.ID_ANY, _(\"Buffer\"))\r\n sizer_5.Add(label_8, 0, 0, 0)\r\n sizer_5.Add(self.text_buffer_length, 10, 0, 0)\r\n sizer_1.Add(sizer_5, 0, wx.EXPAND, 0)\r\n sizer_1.Add(self.text_buffer_info, 1, wx.EXPAND, 0)\r\n self.SetSizer(sizer_1)\r\n self.Layout()\r\n # end wxGlade\r\n\r\n# end of class BufferView\r\n\r\n def on_menu_export(self, event): # wxGlade: BufferView.\r\n self.device.execute(\"egv export\")\r\n\r\n def on_menu_import(self, event): # wxGlade: BufferView.\r\n self.device.execute(\"egv import\")\r\n\r\n","sub_path":"BufferView.py","file_name":"BufferView.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"437086136","text":"from copy import copy\nfrom time import sleep\n\nimport numpy as np\nfrom p02_simulator import AstroObject, Simulator\n\n\nfrom vpython import *\n\n\n#################################################################################\n# Main program\n#################################################################################\n\n# set up parameters\nG = 6.67*10**(-11) # Newton's gravitational constant in m**3 kg**(-1) s**(-2)\nM = 1.99*10**30 # mass of the Sun in kg\nm = 5.97*10**24 # mass of the Earth in kg\nRmin = 147.1*10**9 # perihelion distance (initial point) in m\n\n# express everything in natural units - use years for time\nm0=5.97*10**24 # express all masses in terms of Earth's mass\nR0=149.6*10**8 # 1/10 AU (experiment with this)\nt0=24*3600*365.24\n\nG=G/(R0**3) *m0 * t0**2 # G in R0^3 m_E**(-1) years**(-2) \nM = M/m0\nm = m/m0\nRmin = Rmin/R0\n\n\n# set up the VPython scene\nscene = canvas(title='Solar System',\n width=600, height=400,\n center=vector(0,0,0), background=color.black)\n\n# For some reason, the creators thought it would be a good idea to have y be the\n# upward direction. We'll change that to the z direction.\nscene.forward = vector(1,0,0)\nscene.up = vector(0,0,1)\n\n# Define and initiate the simulated objects - remember to translate to natural units!\nsun = AstroObject(G, \n mass = 1.99*10**30/m0, \n pos=vector(0,0,0),\n velocity=vector(0,0,0), \n color=color.orange, radius=1)\n\nearth = AstroObject(G, \n mass = 5.97*10**24/m0, \n pos=vector(147.1*10**9/R0,0,0), \n velocity=vector(0,29800*t0/R0,0), \n color=color.blue, radius=0.2)\n\n# Create the list of objects and initiate the simulator.\nobjects=[sun, earth]\nsim = Simulator(objects, G, 0.001)\n\n\n# Choose the time span of the simulation (in years).\ntmax = 2\n\n\n# Create a VPython graph object for the potential energy.\nvgraph = graph(x=800, y=0,width=600,height=600,\\\n title = 'Potential Energy', \\\n xtitle = 't [yr]', ytitle = 'V [m_E R0^2 yr^-2]', \\\n foreground = color.black, background =color.white, \\\n xmax = tmax, xmin = 0)\n\n# All subsequently defined VPython curve objects are children of the\n# same graph until the next graph object is created.\nvcurves=[ ]\nfor obj in objects:\n\tvcurves.append(gcurve(color=obj.color))\n\n# Same graph for the kinetic energy...\ntgraph = graph(x=800, y=0,width=600,height=600,\\\n title = 'Kinetic Energy (radial + angular)', \\\n xtitle = 't [yr]', ytitle = 'T [m_E R0^2 yr^-2]', \\\n foreground = color.black, background =color.white, \\\n xmax = tmax, xmin = 0)\n\ntcurves=[ ]\nfor obj in objects:\n\ttcurves.append(gcurve(color=obj.color))\n\n\n# ... and the total energy.\negraph = graph(x=800, y=0,width=600,height=600,\\\n title = 'Total Energy', \\\n xtitle = 't [yr]', ytitle = 'E [m_E R0^2 yr^-2]', \\\n foreground = color.black, background =color.white, \\\n xmax = tmax, xmin = 0)\n\necurves=[ ]\nfor obj in objects:\n\tecurves.append(gcurve(color=obj.color))\n\n# We add one curve that contains the total energy of the entire system.\necurves.append(gcurve(color=vector(31,158,137)/255.))\n\n\n# Initialize step counter...\nsteps = 0\n\n# ... and start the simulation.\nwhile steps * sim.dt < tmax:\n\n\t# VPython animation rate.\n\trate(100)\n\t\n\t# Take a time step.\n\tsim.update_euler()\n\n\t# Update energy graphs.\n\ttotE = 0\n\tfor i, obj in enumerate(objects):\n\t\t# if obj == earth:\n\t\ttcurves[i].plot(steps*sim.dt, obj.T)\n\t\tvcurves[i].plot(steps*sim.dt, obj.V)\n\t\tecurves[i].plot(steps*sim.dt, obj.V + obj.T)\n\t\ttotE += obj.T + 0.5*obj.V\n\n\tecurves[-1].plot(steps*sim.dt, totE)\n \n\tsteps+=1\n\n\n","sub_path":"Projects/p02/p02_solar_system.py","file_name":"p02_solar_system.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"65610352","text":"# -*- coding: utf-8 -*-\nimport os\nfrom config import *\nif os.getenv(\"DEV\") is not None:\n from dotenv import load_dotenv\n \n load_dotenv(dotenv_path='./.env')\n\nimport sys\nimport json\nimport time\nfrom hospital import *\n\nfrom flask import Flask, request, abort\n\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import *\n\napp = Flask(__name__)\n\n# getting channel secret\n# This would be the preferred approach but it just doesn't work\n# CHANNEL_SECRET = os.getenv('LINE_CHANNEL_SECRET')\n# CHANNEL_TOKEN = os.getenv('LINE_CHANNEL_TOKEN')\n\nif CHANNEL_SECRET is None:\n print(\"LINE_CHANNEL_SECRET may be undefined.\")\n sys.exit(1)\nif CHANNEL_TOKEN is None:\n print(\"LINE_CHANNEL_TOKEN may be undefined\")\n sys.exit(1)\n\nline_bot_api = LineBotApi(CHANNEL_TOKEN)\nhandler = WebhookHandler(CHANNEL_SECRET)\n\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n print(\"Invalid signature. Please check your channel access token/channel secret.\")\n abort(400)\n\n return 'OK'\n\n\nSTATE = {}\nDEPARTMENT = {}\n# state: 0(init), 1(diagnosis), 2(hospital), 3(covid-19), 4(knowledge), 5(knowledge_disease)\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n global STATE\n user = event.source.user_id\n if user not in STATE:\n STATE[user] = 0\n message = event.message.text\n if message == \"back\":\n STATE[user] = 0\n ret_message = TextSendMessage(text=\"請問要提供您什麼服務呢\")\n elif message == \"初步診斷\" and STATE[user] == 0:\n msg = \"請簡述您的症狀\"\n STATE[user] = 1\n ret_message = TextSendMessage(text=msg)\n\n elif STATE[user] == 1 and (\"胸悶\" in message) and (\"疲累\" in message):\n msg = \"初步分析結果:\\n心臟、肺臟、其他\\n\\n建議掛科:\\n心臟科、胸腔內科\\n\\n可能病因:\\n感染\\n\\n建議:\\n若為心臟方面疾病,需盡快就醫檢查\"\n STATE[user] = 0\n ret_message = TextSendMessage(\n text=msg,\n quick_reply=QuickReply(\n items=[\n QuickReplyButton(\n action=MessageAction(label=\"相關疾病查詢\", text=\"相關疾病查詢\")\n ),\n QuickReplyButton(\n action=MessageAction(label=\"查詢附近的內科醫院\", text=\"查詢附近的內科醫院\")\n ),\n QuickReplyButton(\n action=MessageAction(label=\"其他服務\", text=\"其他服務\")\n )\n ]))\n elif STATE[user] == 1 and (\"呼吸困難\" in message) or (\"嘨喘\" in message):\n STATE[user] = 0\n msg = \"初步分析結果:\\n氣管阻塞、氣喘、慢性阻塞性肺病(COPD)、肺栓塞\\n近期covid19疫情嚴重,若仍有發燒、咳嗽等症狀同時出現,可能為新冠肺炎之感染!\\n\\n建議掛科:\\n胸腔科、感染科\\nCOVID-19患者請前往急診篩檢\\n\\n可能病因:\\n肺部感染、心衰竭\"\n ret_message = TextSendMessage(\n text=msg,\n quick_reply=QuickReply(\n items=[\n QuickReplyButton(\n action=URIAction(label='Covid19篩檢站', uri='https://antiflu.cdc.gov.tw/ExaminationCounter')\n )\n ]))\n elif STATE[user] == 1:\n msg = \"Kompanion 暫時還診斷不出來您的病因,請尋找專業醫生協助,保重身體喔!!\"\n ret_message = TextSendMessage(text=msg)\n STATE[user] = 0\n\n elif STATE[user] == 0 and message == \"醫療小知識\":\n STATE[user] = 4\n time.sleep(2)\n msg = \"請問要詢問那一科呢?\"\n qr = [QuickReplyButton(action=MessageAction(label=department, text=department)) for department in DEPARTMENTS]\n ret_message = TextSendMessage(\n text=msg,\n quick_reply=QuickReply(items=qr)\n )\n \n elif STATE[user] == 4:\n STATE[user] = 5\n msg = f\"請問想了解{message}的什麼疾病呢?\"\n if message == \"心臟科\":\n qr = [QuickReplyButton(action=MessageAction(label=department, text=department)) for department in [\"心肌炎\",\"高血壓心臟病\",\"風濕性心臟病\",\"缺血性心臟病\",\"瓣膜性心臟病\",\"感染性心內膜炎\",\"心包膜疾病\",\"心律不整\",\"心臟腫瘤\",\"冠心病\",\"主動脈瘤破裂\",\"心肌梗塞\"]]\n ret_message = TextSendMessage(text=msg,quick_reply=QuickReply(items=qr))\n elif message == \"胸腔內科\":\n qr = [QuickReplyButton(action=MessageAction(label=department, text=department)) for department in [\"肺炎\",\"肺栓塞\",\"心因性肺水腫\",\"氣胸\",\"氣喘\",\"肺癌\",\"慢性阻塞性肺病(COPD)\",\"慢性支氣管炎\",\"急性支氣管炎\",\"支氣管擴張症\",\"支氣管癌\"]]\n ret_message = TextSendMessage(text=msg,quick_reply=QuickReply(items=qr))\n else:\n ret_message = TextSendMessage(text=msg)\n time.sleep(2)\n\n elif STATE[user] == 5 and message == \"心肌炎\":\n STATE[user] = 0\n msg = \"提供以下資訊給您參考:\\nhttps://wwwv.tsgh.ndmctsgh.edu.tw/unit/10012/12856\"\n ret_message = TextSendMessage(text=msg)\n\n elif STATE[user] == 5:\n STATE[user] = 0\n\n msg = \"提供以下資訊給您參考:\\nhttps://wwwv.tsgh.ndmctsgh.edu.tw/unit/10012/12856\"\n ret_message = TextSendMessage(text=msg)\n\n elif STATE[user] == 0 and message == \"查詢附近的採檢站\":\n STATE[user] = 3\n msg = \"請提供您的位置\"\n ret_message = TextSendMessage(\n text=msg,\n quick_reply=QuickReply(\n items=[\n QuickReplyButton( \n action=LocationAction(label=\"查詢附近的採檢站\")\n ),\n QuickReplyButton(\n action=MessageAction(label=\"其他服務\", text=\"其他服務\")\n )\n ]))\n\n elif STATE[user] == 0 and message == \"查詢附近的醫院\":\n STATE[user] = 2\n time.sleep(2)\n qr = [QuickReplyButton(action=MessageAction(label=department, text=department)) for department in DEPARTMENTS]\n \n ret_message = TextSendMessage(\n text=\"要看哪一科呢?\",\n quick_reply=QuickReply(items=qr))\n\n elif STATE[user] == 2 or message == \"查詢附近的內科醫院\":\n msg = \"請提供您的位置\"\n DEPARTMENT[user] = message\n ret_message = TextSendMessage(\n text=msg,\n quick_reply=QuickReply(\n items=[\n QuickReplyButton( \n action=LocationAction(label=\"查詢附近的醫院\")\n ),\n QuickReplyButton(\n action=MessageAction(label=\"其他服務\", text=\"其他服務\")\n )\n ]))\n\n else:\n STATE[user] = 0\n ret_message = TextSendMessage(text='你好!!我是 Kompanion,您的智慧醫療小助手!請問我能夠幫您什麼呢?')\n\n line_bot_api.reply_message(event.reply_token, ret_message)\n\n@handler.add(MessageEvent, message=LocationMessage)\ndef handle_location_message(event):\n\n global STATE\n global DEPARTMENT\n user = event.source.user_id\n LATITUDE = event.message.latitude\n LONGITUDE = event.message.longitude\n if STATE[user] == 3:\n pcr_name = get_nearby_PCR((LATITUDE, LONGITUDE))\n msg = f\"離您最近的採檢站為:\\n{pcr_name}\\n\\n打開google map以查詢位置:\\nhttps://www.google.com.tw/maps/search/{pcr_name}\"\n ret_message = TextSendMessage(text=msg)\n elif STATE[user] == 2:\n # get_hospital_by_department DEPARTMENT[user]\n test_flex = json.load(open(\"./flex/hospital.json\", \"r\"))\n ret_message = FlexSendMessage(alt_text='hospital', contents=test_flex)\n else:\n ret_message = TextSendMessage(text=str(STATE[user]))\n \n STATE[user] = 0\n line_bot_api.reply_message(event.reply_token, ret_message)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000, debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"244137461","text":"import sys\nimport os\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\nfrom PyQt5.QtCore import QUrl\n\nclass TabletFrame(QMainWindow):\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.title = 'Figaro Tablet Control'\n\t\tself.setWindowTitle(self.title)\n\t\tself.setGeometry(200,200,800,600)\n\t\thtml_view = HTMLVis(self)\n\t\tself.show()\n\nclass HTMLVis(QWebEngineView):\n\n\tdef __init__(self, parent):\n\t\tsuper().__init__(parent)\n\t\tself.cwd = os.getcwd()\n\t\tself.setGeometry(0,0,800,600)\n\t\turl = QUrl.fromLocalFile(\"{}/index.html\".format(self.cwd))\n\t\tself.load(url)","sub_path":"server/server/frame.py","file_name":"frame.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"387438305","text":"class Frequency:\n def __init__(self, arr, item):\n self.arr = arr\n self.item = item\n\n def get_frequency(self):\n count = 0\n\n if item not in arr:\n return count\n else:\n for i in self.arr:\n if i == self.item:\n count += 1\n\n return count\n\nif __name__ == \"__main__\":\n arr = [x for x in input(\"Enter array elements space seperated\").split()]\n item = input(\"Enter the item whose frequency you want to know\")\n\n f1 = Frequency(arr, item)\n print(f1.get_frequency())\n","sub_path":"phase1/freqArrayEle.py","file_name":"freqArrayEle.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"562268404","text":"import mechanicalsoup\nimport sys\nimport re\nfrom bs4 import BeautifulSoup\nfrom test_form import setup_mock_browser\nimport pytest\n\ndef test_submit_online():\n \"\"\"Complete and submit the pizza form at http://httpbin.org/forms/post \"\"\"\n browser = mechanicalsoup.StatefulBrowser()\n browser.set_user_agent('testing https://github.com/hickford/MechanicalSoup')\n browser.open(\"http://httpbin.org/\")\n for link in browser.links():\n if link[\"href\"] == \"/\":\n browser.follow_link(link)\n break\n browser.follow_link(\"forms/post\")\n assert browser.get_url() == \"http://httpbin.org/forms/post\"\n browser.select_form(\"form\")\n browser[\"custname\"] = \"Customer Name Here\"\n browser[\"size\"] = \"medium\"\n browser[\"topping\"] = (\"cheese\")\n browser[\"comments\"] = \"Some comment here\"\n browser.get_current_form().set(\"nosuchfield\", \"new value\", True)\n response = browser.submit_selected()\n json = response.json()\n data = json[\"form\"]\n assert data[\"custname\"] == \"Customer Name Here\"\n assert data[\"custtel\"] == \"\" # web browser submits \"\" for input left blank\n assert data[\"size\"] == \"medium\"\n assert data[\"topping\"] == \"cheese\"\n assert data[\"comments\"] == \"Some comment here\"\n assert data[\"nosuchfield\"] == \"new value\"\n\n assert (json[\"headers\"][\"User-Agent\"] ==\n 'testing https://github.com/hickford/MechanicalSoup')\n # Ensure we haven't blown away any regular headers\n assert set(('Content-Length', 'Host', 'Content-Type', 'Connection', 'Accept',\n 'User-Agent', 'Accept-Encoding')).issubset(json[\"headers\"].keys())\n\n\ndef test_no_404():\n browser = mechanicalsoup.StatefulBrowser()\n resp = browser.open(\"http://httpbin.org/nosuchpage\")\n assert resp.status_code == 404\n\ndef test_404():\n browser = mechanicalsoup.StatefulBrowser(raise_on_404=True)\n with pytest.raises(mechanicalsoup.LinkNotFoundError) as context:\n resp = browser.open(\"http://httpbin.org/nosuchpage\")\n resp = browser.open(\"http://httpbin.org/\")\n assert resp.status_code == 200\n\ndef test_user_agent():\n browser = mechanicalsoup.StatefulBrowser(user_agent='007')\n resp = browser.open(\"http://httpbin.org/user-agent\")\n assert resp.json() == {'user-agent': '007'}\n\ndef test_open_relative():\n # Open an arbitrary httpbin page to set the current URL\n browser = mechanicalsoup.StatefulBrowser()\n browser.open(\"http://httpbin.org/html\")\n\n # Open a relative page and make sure remote host and browser agree on URL\n resp = browser.open_relative(\"/get\")\n assert resp.json()['url'] == \"http://httpbin.org/get\"\n assert browser.get_url() == \"http://httpbin.org/get\"\n\n # Test passing additional kwargs to the session\n resp = browser.open_relative(\"/basic-auth/me/123\", auth=('me', '123'))\n assert browser.get_url() == \"http://httpbin.org/basic-auth/me/123\"\n assert resp.json() == {\"authenticated\": True, \"user\": \"me\"}\n\ndef test_links():\n browser = mechanicalsoup.StatefulBrowser()\n html = '''A Blue Link \n A Red Link '''\n expected = [BeautifulSoup(html).a]\n browser.open_fake_page(html)\n\n # Test StatefulBrowser.links url_regex argument\n assert browser.links(url_regex=\"bl\") == expected\n assert browser.links(url_regex=\"bluish\") == []\n\n # Test StatefulBrowser.links link_text argument\n assert browser.links(link_text=\"A Blue Link\") == expected\n assert browser.links(link_text=\"Blue\") == []\n\n # Test StatefulBrowser.links kwargs passed to BeautifulSoup.find_all\n assert browser.links(string=re.compile('Blue')) == expected\n assert browser.links(class_=\"bluelink\") == expected\n assert browser.links(id=\"blue_link\") == expected\n assert browser.links(id=\"blue\") == []\n\n # Test returning a non-singleton\n two_links = browser.links(id=re.compile('_link'))\n assert len(two_links) == 2\n assert two_links == BeautifulSoup(html).find_all('a')\n\n@pytest.mark.parametrize(\"expected_post\", [\n pytest.param(\n [\n ('comment', 'Selecting an input submit'),\n ('diff', 'Review Changes'),\n ('text', 'Setting some text!')\n ], id='input'),\n pytest.param(\n [\n ('comment', 'Selecting a button submit'),\n ('cancel', 'Cancel'),\n ('text', '= Heading =\\n\\nNew page here!\\n')\n ], id='button'),\n])\ndef test_submit_btnName(expected_post):\n '''Tests that the btnName argument chooses the submit button.'''\n browser, url = setup_mock_browser(expected_post=expected_post)\n browser.open(url)\n form = browser.select_form('#choose-submit-form')\n browser['text'] = expected_post[2][1]\n browser['comment'] = expected_post[0][1]\n res = browser.submit_selected(btnName = expected_post[1][0])\n assert(res.status_code == 200 and res.text == 'Success!')\n\ndef test_get_set_debug():\n browser = mechanicalsoup.StatefulBrowser()\n # Debug mode is off by default\n assert(not browser.get_debug())\n browser.set_debug(True)\n assert(browser.get_debug())\n\ndef test_list_links(capsys):\n # capsys is a pytest fixture that allows us to inspect the std{err,out}\n browser = mechanicalsoup.StatefulBrowser()\n links = '''\n Link #1 \n Link #2 \n'''\n browser.open_fake_page('{0}'.format(links))\n browser.list_links()\n out, err = capsys.readouterr()\n expected = 'Links in the current page:{0}'.format(links)\n assert out == expected\n\nif __name__ == '__main__':\n pytest.main(sys.argv)\n","sub_path":"tests/test_stateful_browser.py","file_name":"test_stateful_browser.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"564592001","text":"import torch\nfrom torchvision import models\nfrom torchvision import transforms\nfrom PIL import Image\n\n\nclass MyResnet():\n def __init__(self, model_state_path: str, classes_names_path: str):\n # load the model\n self._model = models.resnet101(pretrained=False)\n state_dict = torch.load(model_state_path)\n self._model.load_state_dict(state_dict)\n # put the network in eval mode\n self._model.eval()\n # create an image transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n )])\n # load names of all classes\n with open(classes_names_path) as f:\n self.classes = [line.strip() for line in f.readlines()]\n\n def predict(self, img: Image):\n img_t = self.transform(img)\n batch_t = torch.unsqueeze(img_t, 0)\n\n # carry out model inference\n out = self._model(batch_t)\n\n _, index = torch.max(out, 1)\n percentage = torch.nn.functional.softmax(out, dim=1)[0] * 100\n return self.classes[index[0]], percentage[index[0]].item()\n\n# # Forth, print the top 5 classes predicted by the model\n# _, indices = torch.sort(out, descending=True)\n# percentage = torch.nn.functional.softmax(out, dim=1)[0] * 100\n# print([(classes[idx], percentage[idx].item()) for idx in indices[0][:5]])\n","sub_path":"v4-docker-w-minio/model/myresnet.py","file_name":"myresnet.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"88763701","text":"from abc import ABC, abstractmethod\r\nfrom typing import List\r\nfrom typing import Optional\r\nfrom copy import deepcopy\r\nimport re\r\n\r\n\r\nclass Product:\r\n def __init__(self, name: str, price: float):\r\n self.name = name\r\n self.price = price\r\n\r\n def __hash__(self):\r\n return hash((self.name, self.price))\r\n\r\n def __eq__(self, other):\r\n return self.name == other.name and self.price == other.price\r\n\r\n\r\nclass Server(ABC):\r\n n_max_returned_entries = 3\r\n\r\n @abstractmethod\r\n def __init__(self):\r\n super().__init__()\r\n\r\n @abstractmethod\r\n def get_entries(self, n_letters: Optional[int]) -> List[Product]:\r\n pass\r\n\r\n\r\nclass ListServer(Server):\r\n\r\n def __init__(self, productlist: List[Product]):\r\n super().__init__()\r\n self.products = deepcopy(productlist)\r\n\r\n def get_entries(self, n_letters: Optional[int] = None) -> List[Product]:\r\n if n_letters is None:\r\n n_letters = 1\r\n\r\n new_list = []\r\n\r\n for el in self.products:\r\n if re.match('^[a-zA-Z]{{{n}}}\\\\d{{2,3}}$'.format(n=n_letters), el.name):\r\n new_list.append(el)\r\n\r\n if len(new_list) <= self.n_max_returned_entries:\r\n second_list = sorted(new_list, key=lambda el: el.price)\r\n return second_list\r\n else:\r\n raise TooManyProductsFoundError\r\n\r\n\r\nclass MapServer(Server):\r\n def __init__(self, productlist: List[Product]):\r\n super().__init__()\r\n productdict = dict()\r\n for el in productlist:\r\n productdict[el.name] = el\r\n self.products = deepcopy(productdict)\r\n\r\n def get_entries(self, n_letters: Optional[int] = None) -> List[Product]:\r\n if n_letters is None:\r\n n_letters = 1\r\n\r\n new_list = []\r\n\r\n for el in self.products.values():\r\n if re.match('^[a-zA-Z]{{{n}}}\\\\d{{2,3}}$'.format(n=n_letters), el.name):\r\n new_list.append(el)\r\n\r\n if len(new_list) <= self.n_max_returned_entries:\r\n second_list = sorted(new_list, key=lambda el: el.price)\r\n return second_list\r\n else:\r\n raise TooManyProductsFoundError\r\n\r\n\r\nclass Client:\r\n\r\n def __init__(self, city_centre: Server):\r\n self.city_server = city_centre\r\n\r\n def get_total_price(self, n_letters: Optional[int]) -> Optional[float]:\r\n try:\r\n sum = 0\r\n koszyk = self.city_server.get_entries(n_letters)\r\n if len(koszyk) == 0:\r\n return None\r\n for el in koszyk:\r\n sum = sum + el.price\r\n return sum\r\n except TooManyProductsFoundError:\r\n return None\r\n\r\n\r\nclass ServerError(Exception):\r\n pass\r\n\r\n\r\nclass TooManyProductsFoundError(ServerError):\r\n pass\r\n\r\n","sub_path":"servers.py","file_name":"servers.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"354689762","text":"\n\ndata = [ ('A', 'D', 24),\n ('B', 'D', 24),\n ('C', 'H', 192),\n ('D', 'H', 160),\n ('E', 'H', 94),]\n\nfor item in data:\n gubun = item[1]\n amount = item[2]\n salary = 0\n if gubun == 'D':\n salary = amount * 8 * 7560\n else:\n salary = amount * 7560\n\n print(item[0], salary)","sub_path":"calc_salary.py","file_name":"calc_salary.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"138184089","text":"# 引入记录日志的库\nimport logging\nimport voluptuous as vol\nimport homeassistant.helpers.config_validation as cv\nfrom datetime import timedelta\n# track_time_interval是监听时间变化事件的一个函数\nfrom homeassistant.helpers.event import track_time_interval\n \nDOMAIN = \"hello_world\"\nENTITYID = DOMAIN + \".hello_world\"\n\n\n# 预定义配置文件中的key值\nCONF_NAME_TOBE_DISPLAYED = \"name_tobe_displayed\"\nCONF_SLOGON = \"slogon\"\n\n# 预定义缺省的配置值\nDEFAULT_SLOGON = \"积木构建智慧空间!\"\n\nCONF_STEP = \"step\"\nDEFAULT_STEP = 3\n\n# 定义时间间隔为3秒钟\nTIME_BETWEEN_UPDATES = timedelta(seconds=3)\n \n# 在python中,__name__代表模块名字\n_LOGGER = logging.getLogger(__name__)\n\n# 配置文件的样式\nCONFIG_SCHEMA = vol.Schema(\n {\n DOMAIN: vol.Schema(\n {\n # “name_tobe_displayed”在配置文件中是必须存在的(Required),否则报错,它的类型是字符串\n vol.Required(CONF_NAME_TOBE_DISPLAYED): cv.string,\n # “slogon”在配置文件中可以没有(Optional),如果没有缺省值为“积木构建智慧空间!”,它的类型是字符串\n vol.Optional(CONF_SLOGON, default=DEFAULT_SLOGON): cv.string,\n vol.Optional(CONF_STEP,default=DEFAULT_STEP): cv.positive_int,\n }),\n },\n extra=vol.ALLOW_EXTRA)\n\n\n \ndef setup(hass, config):\n\n \"\"\"配置文件加载后,setup被系统调用.\"\"\"\n # config[DOMAIN]代表这个域下的配置信息\n conf = config[DOMAIN]\n # 获得具体配置项信息\n friendly_name = conf.get(CONF_NAME_TOBE_DISPLAYED)\n slogon = conf.get(CONF_SLOGON)\n step = conf.get(CONF_STEP)\n\n _LOGGER.info(\"Get the configuration %s=%s; %s=%s\",\n CONF_NAME_TOBE_DISPLAYED, friendly_name,\n CONF_SLOGON, slogon)\n\n # 根据配置内容设置属性值\n attr = {\"icon\": \"mdi:yin-yang\",\n \"friendly_name\": friendly_name,\n \"slogon\": slogon,\n \"unit_of_measurement\": \"steps\"\n }\n hass.states.set(ENTITYID, '太棒了', attributes=attr)\n\n \n def change_state(call):\n \"\"\"change_state函数切换改变实体的状态.\"\"\"\n # 记录info级别的日志\n _LOGGER.info(\"hachina's change_state service is called.\")\n \n # 切换改变状态值\n if hass.states.get(ENTITYID).state == '太棒了':\n hass.states.set(ENTITYID, '真好', attributes=attr)\n else:\n hass.states.set(ENTITYID, '太棒了', attributes=attr)\n \n # 注册服务hachina.change_state\n hass.services.register(DOMAIN, 'change_state', change_state)\n \n\n\n # 构建类GrowingState\n GrowingState(hass, step, attr)\n\n return True\n\n\nclass GrowingState(object):\n \"\"\"定义一个类,此类中存储了状态与属性值,并定时更新状���.\"\"\"\n \n def __init__(self, hass, step, attr):\n \"\"\"GrwoingState类的初始化函数,参数为hass、step和attr.\"\"\"\n # 定义类中的一些数据\n self._hass = hass\n self._step = step\n self._attr = attr\n self._state = 0\n \n # 在类初始化的时候,设置初始状态\n self._hass.states.set(ENTITYID, self._state, attributes=self._attr)\n \n # 每隔一段时间,更新一下实体的状态\n track_time_interval(self._hass, self.update, TIME_BETWEEN_UPDATES)\n \n def update(self, now):\n \"\"\"在GrowingState类中定义函数update,更新状态.\"\"\"\n _LOGGER.info(\"GrowingState is updating…\")\n \n # 状态值每次增加step\n self._state = self._state + self._step\n \n # 设置新的状态值\n self._hass.states.set(ENTITYID, self._state, attributes=self._attr)\n","sub_path":"custom_components/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"415059083","text":"from django.shortcuts import render\nfrom django.views.generic.base import TemplateView\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse, Http404, HttpResponseRedirect\nfrom product.models import Category\nfrom blog.models import Post, Subscriber\n# Create your views here.\n\nclass HomePageView(TemplateView):\n\n template_name = \"home.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"categories\"] = Category.objects.all() \n context[\"posts\"] = Post.objects.filter(public=True).order_by('-created_at')[:3]\n return context\n\n\n@csrf_exempt\ndef subscribe_view(request):\n if request.method == \"POST\" and request.is_ajax():\n\n email = request.POST.get('email', None)\n if email:\n subscriber = Subscriber.objects.filter(email=email)\n \n if not subscriber.exists():\n Subscriber.objects.create(email=email)\n \n elif Subscriber.objects.get(email=email).active == False:\n Subscriber.objects.filter(email=email).update(active = True) \n\n data = { 'registered': True }\n \n return JsonResponse(data)\n raise Http404(\"Página não encontrada.\")","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"254247483","text":"from flask import Flask,jsonify,request\r\n\r\napp=Flask(__name__)\r\n\r\nstores=[\r\n{\r\n \"name\":\"ram\",\r\n \"items\":[{\r\n \"name\":\"item\",\r\n \"price\":12\r\n }]\r\n}\r\n]\r\n\r\n@app.route(\"/\")\r\ndef home():\r\n\treturn \"hello\"\r\n\r\n@app.route(\"/store\")\r\ndef get_stores():\r\n\treturn jsonify({'stores': stores})\r\n\r\n@app.route('/store',methods=['POST'])\r\ndef create_store():\r\n request_data=request.get_json()\r\n new_store={\r\n \"name\":request_data[\"name\"],\r\n \"items\":[]\r\n }\r\n stores.append(new_store)\r\n return jsonify({'stores': stores})\r\n\r\n\r\n\r\n@app.route(\"/store/\", methods=['DELETE'])\r\ndef delete_store(name):\r\n\tfor store in stores:\r\n\t\tif store[\"name\"]==name:\r\n\t\t\tdel store\r\n\t\t\treturn \"done\"\r\n\treturn \"error\"\r\n\r\n@app.route(\"/store/\", methods=['PUT'])\r\ndef put_store(name):\r\n\trequest_data=request.get_json()\r\n\tfor store in stores:\r\n\t\tif store[\"name\"]==name:\r\n\t\t\tstore.update({\"name\":request_data[\"name\"]})\r\n\t\t\treturn \"done\"\r\n\treturn \"Error\"\r\n\r\n\r\n\t\r\n\r\n\r\napp.run(port=5000)","sub_path":"app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"613840184","text":"#Alan Pittman & Dionysios Grigoriadis\r\n\r\n#Program to genotype gVCF files in batches to join later for cohort (exome-wide studies etc.)\r\n\r\n#Step_by_step:\r\n#1)Combine gVCF files into a joint gVCF file,\r\n#2)Genotype joint gVCF file\r\n\r\n#Version 1.1\r\n# - Written in Python instead of bash\r\n# - Log files will be written in the output directories\r\n#################################################################################################################\r\nimport os\r\nimport sys\r\nimport subprocess\r\nimport csv\r\nfrom optparse import OptionParser\r\n\r\n#OUR RESOURCES:\r\nfrom dependencies import *\r\nfrom utils import *\r\n\r\n#USER INPUT\r\nparser = OptionParser()\r\nparser.add_option(\"-p\", \"--Project_name\", dest=\"projectname\",\r\n\t\t\t\t help=\"The suffix of the joint vcf you want to filter\")\r\n#parser.add_option(\"-o\", \"--output\", dest=\"output\",\r\n#\t\t\t\t help=\"suffix of the result files (e.g. comb for ./comb.g.vcf.gz\")\r\n\r\n(options, args) = parser.parse_args()\r\n\r\npname = options.projectname\r\n#gname = options.glocname\r\n#output = options.output\r\n\r\n\r\n#pname='postergaard_athos_and_25032019'\r\npname = pname.strip()\r\n\r\n##############################################################\r\n#Set-up working and output directories\r\ndirpath = os.getcwd()\r\ntemp_dir = dirpath+\"/tmp/\"+pname+\"/\"\r\ncohort_dir = dirpath+\"/Filtered_Joint_called_VCFs/\"\r\nout_dir = cohort_dir + pname + \"/\"\r\nvfilt_output = temp_dir+pname+\"_HF.vcf\"\r\nvrecal1_output = temp_dir+pname+\"_SNP.recal\"\r\nvsqr_output = temp_dir+pname+\"_HF_SNP.recal.snps.vcf\"\r\nvrecal2_output = temp_dir+pname+\"_INDEL.recal\"\r\nvsqr_final_output = out_dir+pname+\"_HF4_SNP.recal.snps.indel.vcf\"\r\nvsqr_final_annotated_output = out_dir+pname+\"_HF4_SNP.recal.snps.indel.dbSNP.vcf\"\r\n\r\n\r\nif not os.path.exists(temp_dir):\r\n os.makedirs(temp_dir)\r\n\r\n\r\nif not os.path.exists(cohort_dir):\r\n os.makedirs(cohort_dir)\r\n\r\n\r\n\r\nif not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n##############################################################\r\n##############################################################\r\n##RUNNING THE COMMANDS\r\n#Variant Filtration\r\nprint(\"\\n\")\r\nprint(\"VariantFiltration\")\r\ncomm = java+' -Xmx10g -jar '+gatk+' VariantFiltration -R '+BWAindex+\" -V \"+temp_dir+pname+\".vcf \"+'--genotype-filter-expression \"DP < 6\" ' \\\r\n '--genotype-filter-name \"LowDepth\" --genotype-filter-expression \"GQ < 20.0 && GQ > 0.0\" --genotype-filter-name \"LowGQ\" -O '+vfilt_output\r\nparallel_command([comm], 1, temp_dir+\"/\", 'VariantFiltration.log') #n=2\r\nprint(comm)\r\nprint(\"\\n\")\r\n\r\n#os.remove(temp_dir+'temp_vars.txt')\r\n\r\n#VariantRecalibrator\r\nprint(\"\\n\")\r\nprint(\"VariantRecalibrator\")\r\ncomm = java+' -Xmx10g -jar '+gatk+' VariantRecalibrator -R '+BWAindex+\" -V \"+vfilt_output+\" -tranche 100.0 -tranche 99.9 -tranche 99.5 \" \\\r\n \"-tranche 99.0 -tranche 90.0 -mode SNP --tranches-file \"+temp_dir+pname+\"_SNP.tranches --rscript-file \"+temp_dir+pname+\".plots.R \" \\\r\n \"--resource hapmap,known=false,training=true,truth=true,prior=15.0:\"+hapmap+\" --resource omni,known=false,training=true,truth=true,\" \\\r\n \"prior=12.0:\"+omni+\" --resource 1000G,known=false,training=true,truth=false,prior=10.0:\"+G1000+\" --resource dbsnp,known=true,training=\" \\\r\n \"false,truth=false,prior=2.0:\"+dbsnp+\" -an QD -an MQ -an MQRankSum -an ReadPosRankSum -an FS -an SOR --output \"+vrecal1_output\r\nparallel_command([comm], 1, temp_dir+\"/\", 'VariantRecalibrator.log') #n=2\r\n\r\n#os.remove(gvcf_output)\r\nprint(comm)\r\nprint(\"\\n\")\r\n\r\n#ApplyVQSR\r\nprint(\"\\n\")\r\nprint(\"ApplyVQSR\")\r\ncomm = java+' -Xmx10g -jar '+gatk+' ApplyVQSR -R '+BWAindex+\" -mode SNP --truth-sensitivity-filter-level 99.5 -V \"+vfilt_output+\" \" \\\r\n \"--tranches-file \"+temp_dir+pname+\"_SNP.tranches --recal-file \"+vrecal1_output+\" -O \"+vsqr_output\r\nparallel_command([comm], 1, temp_dir+\"/\", 'ApplyVQSR.log') #n=2\r\n\r\n#os.remove(gvcf_output)\r\nprint(comm)\r\nprint(\"\\n\")\r\n\r\n#VariantRecalibrator INDELS\r\nprint(\"\\n\")\r\nprint(\"VariantRecalibrator INDELS\")\r\ncomm = java+' -Xmx10g -jar '+gatk+' VariantRecalibrator -R '+BWAindex+\" -V \"+vfilt_output+\" -tranche 100.0 -tranche 99.9 -tranche 99.5 \" \\\r\n \"-tranche 99.0 -tranche 90.0 -mode INDEL --tranches-file \"+temp_dir+pname+\"_INDEL.tranches --rscript-file \"+temp_dir+pname+\"_INDEL.plots.R \" \\\r\n \"--resource mills,known=false,training=true,truth=true,prior=12.0:\"+mills+\" --resource dbsnp,known=true,training=\" \\\r\n \"false,truth=false,prior=2.0:\"+dbsnp+\" -an QD -an MQ -an MQRankSum -an ReadPosRankSum -an FS -an SOR --output \"+vrecal2_output\r\nparallel_command([comm], 1, temp_dir+\"/\", 'VariantRecalibrator_INDEL.log') #n=2\r\n\r\n#os.remove(gvcf_output)\r\nprint(comm)\r\nprint(\"\\n\")\r\n\r\n#ApplyVQSR INDELS\r\nprint(\"\\n\")\r\nprint(\"ApplyVQSR INDELS\")\r\ncomm = java+' -Xmx10g -jar '+gatk+' ApplyVQSR -R '+BWAindex+\" -mode INDEL --truth-sensitivity-filter-level 98.0 -V \"+vsqr_output+\" \" \\\r\n \"--tranches-file \"+temp_dir+pname+\"_INDEL.tranches --recal-file \"+vrecal2_output+\" -O \"+vsqr_final_output\r\nparallel_command([comm], 1, temp_dir+\"/\", 'ApplyVQSR_INDEL.log') #n=2\r\n\r\nprint(comm)\r\nprint(\"\\n\")\r\n\r\n#VariantAnnotator\r\nprint(\"\\n\")\r\nprint(\"Variant Annotator\")\r\ncomm = java+' -Xmx10g -jar '+gatk+' VariantAnnotator -R '+BWAindex+\" -V \"+vsqr_final_output+\" \" \\\r\n \"--dbsnp \"+refknownsitesSNPS+\" -O \"+vsqr_final_annotated_output\r\nparallel_command([comm], 1, temp_dir+\"/\", 'variantannotator.log') #n=2\r\n\r\nprint(comm)\r\nprint(\"\\n\")","sub_path":"pipeline_filter_vqsr_jointvcf_v1.1.py","file_name":"pipeline_filter_vqsr_jointvcf_v1.1.py","file_ext":"py","file_size_in_byte":5350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"456298576","text":"import pymongo\nfrom pymongo import MongoClient\nimport re\nimport requests\nimport json\nfrom const import *\n\nmongo_client = MongoClient()\nmongo_db = mongo_client.darth\ncollection_users = mongo_db.users\ncollection_users_twin = mongo_db.users_twin\ncollection_stats = mongo_db.users_stats\n\n# mongo_db.users\n# {'user': telega_username, 'ally_code': ally_code, 'swgoh_name': swgoh_name}\n\n# mongo_db.users_twin\n# {'user': telega_username, 'ally_code': ally_code, 'swgoh_name': swgoh_name}\n\ndef handler_reg(bot,message,my_logger):\n\n\ttry:\n\n\t\tRREG = False\n\t\tif (message.text.startswith('!rreg') or message.text.startswith('!ррег')) and message.from_user.id in ADMINS:\n\t\t\tRREG = True # админский бэкдор для форсированной регистрации !rreg tgusername allycode nick name name name in game\n\n\t\tmsg = \"\"\n\t\tally_code = False\n\t\ttelega_username = False\n\t\ttele_id = None\n\n\t\ts = message.text.split()\n\n\t\tif len(s) == 2:\n\t\t\t# передано только 1 параметр, предполагаем что это игрок регистрирует сам себя и указал ally code\n\t\t\tif message.from_user.username is not None: # у игрока установлен username в телеге\n\t\t\t\tif re.match(r\"(\\D*\\d){9}\", s[1]) and len(s[1]) == 9: # проверим что код = девятизначное число\n\t\t\t\t\tally_code = s[1]\n\t\t\t\t\ttelega_username = message.from_user.username\n\t\t\t\t\ttele_id = message.from_user.id\n\t\t\t\telse:\n\t\t\t\t\tbot.reply_to(message, f'Регистрация невозможна, код союзника указан неверно (правильный формат - 123456789)')\n\t\t\t\t\tmy_logger.info(\"Ally code in wrong format\")\n\t\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tbot.reply_to(message,f\"Регистрация невозможна, у вас не задан username в телеграме! Задать можно в настройках приложения\")\n\t\t\t\tmy_logger.info(\"Username is None\")\n\t\t\t\treturn\n\n\t\t\t# на выходе имеем установленный ally_code и telega_username\n\n\t\telif len(s)==3:\n\t\t\t# в !reg передано два параметра\n\t\t\tif re.match(r\"(\\D*\\d){9}\", s[2]) and len(s[2]) == 9: # проверим что код = девятизначное число\n\t\t\t\tally_code = s[2]\n\t\t\t\ttelega_username = s[1].replace(\"@\",\"\")\n\t\t\telse:\n\t\t\t\tbot.reply_to(message, f'Регистрация невозможна, код союзника указан неверно (правильный формат - 123456789)')\n\t\t\t\tmy_logger.info(\"Ally code in wrong format\")\n\t\t\t\treturn\n\n\t\t\t# на выходе имеем установленный ally_code и telega_username\n\n\t\telif len(s)>3 and RREG: # админ передал 4 параметра - !rreg tgusername allycode nick name name name in game\n\n\t\t\tif re.match(r\"(\\D*\\d){9}\", s[2]) and len(s[2]) == 9: # проверим что код = девятизначное число\n\t\t\t\tally_code = s[2]\n\t\t\t\ttelega_username = s[1].replace(\"@\",\"\")\n\t\t\t\tgame_nick = \"\"\n\t\t\t\tfor i in range(3,len(s)):\n\t\t\t\t\tgame_nick += f\"{s[i]} \"\n\t\t\t\tgame_nick = game_nick.rstrip()\n\t\t\t\tbot.reply_to(message, f'Попытка зарегистрировать {telega_username} с кодом {ally_code} и ником в игре \"{game_nick}\"')\n\n\t\telse:\n\t\t\tbot.reply_to(message, 'Регистрация невозможна! Корректное использование:\\n`!reg имяВТелеге кодСоюзника` или `!reg кодСоюзника` для регистрации себя', parse_mode=\"Markdown\")\n\t\t\tmy_logger.info(\"Registration not possible, wrong command format\")\n\n\n\t\t# далее исполняется только если код союзника = девятизначное число в ally_code, а также установлен telega_username\n\t\tif ally_code and telega_username:\n\t\t\t\n\t\t\tif not RREG: # получим данные из swgoh.gg\n\t\t\t\tr = requests.get(f'{SWGOH_URL}/{ally_code}')\n\t\t\t\tjdata = r.json()\n\t\t\t\tif jdata:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tbot.reply_to(message, f'Регистрация невозможна, указанный код союзника не найден на https://swgoh.gg/p/{ally_code}/')\n\t\t\t\t\tmy_logger.info(\"Ally code not found\")\n\t\t\t\t\treturn\n\t\t\t\tplayer_name = jdata['data']['name']\n\t\t\telif game_nick is not None: # это запрос на регистрацию от админа с указанным game_nick, который уже должен быть заполнен\n\t\t\t\tplayer_name = game_nick\n\t\t\telse:\n\t\t\t\tbot.reply_to(message, f'Регистрация невозможна, указанный код союзника не найден на https://swgoh.gg/p/{ally_code}/')\n\t\t\t\tmy_logger.info(\"Ally code not found\")\n\t\t\t\treturn\n\n\t\t\t# теперь так же имеем player_name\n\n\t\t\t# проверим, может игрок уже зарегистрирован\n\t\t\tfound_user = collection_users.find_one({'user': telega_username})\n\t\t\tif found_user: # действительно, уже \n\t\t\t\tmsg = f'Пользователь {telega_username} уже зарегистрирован! '\n\t\t\t\tmsg += f'Если нужно изменить информацию - сначала надо удалить пользователя через команду `!forget {telega_username}`'\n\t\t\t\tbot.send_message(message.chat.id, msg, parse_mode=\"Markdown\")\n\t\t\t\tmy_logger.info(\"User already exists, forget first\")\n\t\t\telse: # регистрируем нового пользователя\n\t\t\t\tnew_user = {'user': telega_username, 'ally_code': ally_code, 'swgoh_name': player_name}\n\t\t\t\tif tele_id is not None:\n\t\t\t\t\tnew_user['tele_id'] = tele_id\n\t\t\t\tcollection_users.insert_one(new_user)\n\t\t\t\tbot.send_message(message.chat.id, f'Пользователь {telega_username} успешно зарегистрирован! Найденное имя в SWGOH: {player_name}')\n\t\t\t\tmy_logger.info(f\"Registration successful! Found SWGOH name {player_name}\")\n\n\texcept Exception as e:\n\n\t\tbot.reply_to(message, \"Произошла ошибка, попробуйте позже!\")\n\t\tmy_logger.info(f\"Something went wrong during !reg: {e}\")\n\n\n\n\ndef handler_twin_reg(bot,message,my_logger):\n\n\t# бэкдор для регистрации двойника, доступен только админам\n\n\ttry:\n\n\t\tally_code = False\n\t\ttelega_username = False\n\n\t\ts = message.text.split()\n\n\t\tif len(s)>3: # должно быть передано не менее 4 параметров, последний из которых - имя в игре: !twinreg tgusername allycode nick name name name in game\n\n\t\t\tif re.match(r\"(\\D*\\d){9}\", s[2]) and len(s[2]) == 9: # проверим что код = девятизначное число\n\t\t\t\tally_code = s[2]\n\t\t\t\ttelega_username = s[1].replace(\"@\",\"\")\n\t\t\t\tgame_nick = \"\"\n\t\t\t\tfor i in range(3,len(s)):\n\t\t\t\t\tgame_nick += f\"{s[i]} \"\n\t\t\t\tgame_nick = game_nick.rstrip()\n\t\t\t\tbot.reply_to(message, f'Попытка зарегистрировать {telega_username} с кодом {ally_code} и ником в игре \"{game_nick}\"')\n\t\t\telse:\n\t\t\t\tbot.reply_to(message, 'Регистрация невозможна! Корректное использование:\\n`!twinreg username code nick_in_game', parse_mode=\"Markdown\")\n\t\t\t\tmy_logger.info(\"Registration not possible, wrong command format\")\n\n\t\telse:\n\t\t\tbot.reply_to(message, 'Регистрация невозможна! Корректное использование:\\n`!twinreg username code nick_in_game', parse_mode=\"Markdown\")\n\t\t\tmy_logger.info(\"Registration not possible, wrong command format\")\n\n\n\t\t# далее исполняется только если код союзника = девятизначное число в ally_code, а также установлен telega_username + game_nick\n\t\tif ally_code and telega_username and game_nick:\n\t\t\t\n\t\t\tnew_user = {'user': telega_username, 'ally_code': ally_code, 'swgoh_name': game_nick}\n\t\t\tcollection_users_twin.insert_one(new_user)\n\t\t\tbot.send_message(message.chat.id, f'Пользователь {telega_username} успешно зарегистрирован в качестве дубля! Имя в SWGOH: {game_nick}')\n\t\t\tmy_logger.info(f\"Twin registration successful! SWGOH name {game_nick}\")\n\n\n\texcept Exception as e:\n\n\t\tbot.reply_to(message, \"Произошла ошибка, попробуйте позже!\")\n\t\tmy_logger.info(f\"Something went wrong during !reg: {e}\")","sub_path":"handler_reg.py","file_name":"handler_reg.py","file_ext":"py","file_size_in_byte":8403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"318942602","text":"from socket import *\n\n# Parameters\nTCP_IP = 'localhost'\nTCP_PORT = 12003\nBUFFER_SIZE = 1024\n\n# Prepare a client socket\nclientSocket = socket(AF_INET, SOCK_STREAM)\nclientSocket.connect((TCP_IP, TCP_PORT))\n\n# Send message to GET HTML file\nMESSAGE = b'GET HelloWorld.html'\nclientSocket.send(MESSAGE)\n\n# GET the full content from the HTML file\nfull_content = ''\n\nwhile True:\n data = clientSocket.recv(BUFFER_SIZE)\n if not data:\n break\n data = data.decode('utf-8')\n full_content += data\n \nwith open('files_from_server/HelloWorld.html', 'w') as f:\n f.write(full_content)\n \nprint(\"received data:\", full_content)\n\n# Close Client\nclientSocket.close()\nprint(\"\\n\\nClient close successfully!\")\n\n","sub_path":"P1/WebServer/web_client.py","file_name":"web_client.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"417590443","text":"from setuptools import setup, find_packages\n\n__author__ = 'Matt Ryan '\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\ndef parse_reqs(file_path):\n with open(file_path, 'rt') as fobj:\n lines = map(str.strip, fobj)\n lines = filter(None, lines)\n lines = filter(lambda x: x.startswith(\"#\"), lines)\n return tuple(lines)\n\n\nsetup(\n name=\"napalm-edgeswitch\",\n version=\"0.0.2\",\n packages=find_packages(),\n author=\"Juan Gomez\",\n author_email=\"jgomez@phicus.es\",\n description=\"Network Automation and Programmability Abstraction Layer driver for Ubiquti Edgeswitch using SSH\",\n long_description_content_type=\"text/markdown\",\n long_description=long_description,\n classifiers=[\n 'Topic :: Utilities',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS',\n ],\n url=\"https://github.com/johnbarneta/napalm-edgeswitch\",\n include_package_data=True,\n install_requires=(\n 'napalm==2.*',\n 'netmiko==2.*',\n ),\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"382642182","text":"import keras\nfrom keras import layers\nfrom keras.models import Sequential, load_model\nfrom keras.preprocessing.image import load_img, img_to_array\nimport numpy as np\nbatch_size = 32\nimg_height = 180\nimg_width = 180\ntrain_dg = keras.preprocessing.image.ImageDataGenerator(rescale=1./255, validation_split=0.2)\ntrain_dg = keras.preprocessing.image.ImageDataGenerator(rescale=1./255)\ntrain_generator = train_dg.flow_from_directory(\n 'Large',\n class_mode='binary', seed=123)\nvalidation_generator = train_dg.flow_from_directory(\n 'Large',\n class_mode='binary', seed=123)\nclass_names = train_dg.class_names\nnormalization_layer = layers.experimental.preprocessing.Rescaling(1./255)\nnum_classes = len(class_names)\n\nmodel = Sequential([\n layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),\n layers.Conv2D(16, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(32, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(64, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Flatten(),\n layers.Dense(128, activation='relu'),\n layers.Dense(num_classes)\n])\nmodel.compile(optimizer='adam',\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\nepochs=10\nhistory = model.fit(\n train_ds,\n validation_data=val_ds,\n epochs=epochs\n)\nwith open('model.h5', 'w') as f:\n f.close()\nmodel.save('model.h5')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"107448057","text":"from numpy.testing import assert_allclose, run_module_suite\nimport numpy as np\nfrom pyins import dcm\n\n\ndef test_from_basic():\n A1 = dcm.from_basic(1, 30)\n A1_true = np.array([\n [1, 0, 0],\n [0, 0.5 * 3**0.5, -0.5],\n [0, 0.5, 0.5 * 3**0.5]\n ])\n assert_allclose(A1, A1_true, rtol=1e-10)\n\n A2 = dcm.from_basic(2, 30)\n A2_true = np.array([\n [0.5 * 3 ** 0.5, 0, 0.5],\n [0, 1, 0],\n [-0.5, 0, 0.5 * 3**0.5]\n ])\n assert_allclose(A2, A2_true, rtol=1e-10)\n\n A3 = dcm.from_basic(3, 30)\n A3_true = np.array([\n [0.5 * 3**0.5, -0.5, 0],\n [0.5, 0.5 * 3**0.5, 0],\n [0, 0, 1]\n ])\n assert_allclose(A3, A3_true, rtol=1e-10)\n\n A4 = dcm.from_basic(3, [30, 60])\n A4_true = np.array([\n [[0.5 * 3**0.5, -0.5, 0],\n [0.5, 0.5 * 3**0.5, 0],\n [0, 0, 1]],\n [[0.5, -0.5 * 3**0.5, 0],\n [0.5 * 3**0.5, 0.5, 0],\n [0, 0, 1]]\n ])\n assert_allclose(A4, A4_true, rtol=1e-10)\n\n\ndef test_from_rv():\n rv1 = np.array([1, 0, 0]) * np.pi / 3\n A1 = dcm.from_rv(rv1)\n A1_true = np.array([[1, 0, 0],\n [0, 0.5, -0.5 * np.sqrt(3)],\n [0, 0.5 * np.sqrt(3), 0.5]])\n assert_allclose(A1, A1_true, rtol=1e-10)\n\n rv2 = np.array([1, 1, 1]) * 1e-10\n A2 = dcm.from_rv(rv2)\n A2_true = np.array([[1, -1e-10, 1e-10],\n [1e-10, 1, -1e-10],\n [-1e-10, 1e-10, 1]])\n assert_allclose(A2, A2_true, rtol=1e-10)\n\n n = np.array([-0.5, 1/np.sqrt(2), 0.5])\n theta = np.pi / 6\n rv3 = n * theta\n s = np.sin(theta)\n c = np.cos(theta)\n\n A3 = dcm.from_rv(rv3)\n A3_true = np.array([\n [(1-c)*n[0]*n[0] + c, (1-c)*n[0]*n[1] - n[2]*s,\n (1-c)*n[0]*n[2] + s*n[1]],\n [(1-c)*n[1]*n[0] + s*n[2], (1-c)*n[1]*n[1] + c,\n (1-c)*n[1]*n[2] - s*n[0]],\n [(1-c)*n[2]*n[0] - s*n[1], (1-c)*n[2]*n[1] + s*n[0],\n (1-c)*n[2]*n[2] + c]\n ])\n assert_allclose(A3, A3_true, rtol=1e-10)\n\n rv = np.empty((30, 3))\n rv[:10] = rv1\n rv[10:20] = rv2\n rv[20:] = rv3\n A_true = np.empty((30, 3, 3))\n A_true[:10] = A1_true\n A_true[10:20] = A2_true\n A_true[20:] = A3_true\n A = dcm.from_rv(rv)\n assert_allclose(A, A_true, rtol=1e-8)\n\n rv = rv[::4]\n A_true = A_true[::4]\n A = dcm.from_rv(rv)\n assert_allclose(A, A_true, rtol=1e-10)\n\n\ndef test_to_rv():\n A1 = np.identity(3)\n rv1 = dcm.to_rv(A1)\n assert_allclose(rv1, 0, atol=1e-10)\n\n rv2 = 1e-10 * np.ones(3)\n A2 = np.array([\n [1, -rv2[2], rv2[1]],\n [rv2[2], 1, -rv2[0]],\n [-rv2[1], rv2[0], 1]\n ])\n assert_allclose(dcm.to_rv(A2), rv2, rtol=1e-10)\n\n A3 = np.array([\n [1/2**0.5, 1/2**0.5, 0],\n [-1/2**0.5, 1/2**0.5, 0],\n [0, 0, 1]\n ])\n rv3 = np.array([0, 0, -np.pi / 4])\n assert_allclose(dcm.to_rv(A3), rv3, rtol=1e-10)\n\n A = np.empty((30, 3, 3))\n A[:10] = A1\n A[10:20] = A2\n A[20:30] = A3\n rv = np.empty((30, 3))\n rv[:10] = rv1\n rv[10:20] = rv2\n rv[20:] = rv3\n assert_allclose(dcm.to_rv(A), rv, rtol=1e-10)\n\n A = A[::4]\n rv = rv[::4]\n assert_allclose(dcm.to_rv(A), rv, rtol=1e-10)\n\n\ndef test_dcm_rv_conversion():\n # Test conversions on random inputs.\n rng = np.random.RandomState(0)\n\n axis = rng.randn(20, 3)\n axis /= np.linalg.norm(axis, axis=1)[:, np.newaxis]\n angle = rng.uniform(-np.pi, np.pi, size=axis.shape[0])\n rv = axis * angle[:, np.newaxis]\n rv[::5] *= 1e-8\n\n A = dcm.from_rv(rv)\n rv_from_A = dcm.to_rv(A)\n assert_allclose(rv, rv_from_A, rtol=1e-10)\n\n rv = rv[:5]\n A = A[:5]\n rv_from_A = dcm.to_rv(A)\n assert_allclose(rv, rv_from_A, rtol=1e-10)\n\n\ndef test_dcm_quat_conversion():\n np.random.seed(0)\n h = np.random.uniform(0, 360, 20)\n p = np.random.uniform(-90, 90, 20)\n r = np.random.uniform(-180, 180, 20)\n\n As = dcm.from_hpr(h, p, r)\n for A in As:\n q = dcm.to_quat(A)\n Ac = dcm.from_quat(q)\n assert_allclose(Ac, A, rtol=1e-14, atol=1e-16)\n\n q = dcm.to_quat(As)\n Asc = dcm.from_quat(q)\n assert_allclose(Asc, As, rtol=1e-14, atol=1e-16)\n\n\ndef test_dcm_mrp_conversion():\n np.random.seed(1)\n h = np.random.uniform(0, 360, 100)\n p = np.random.uniform(-90, 90, 100)\n r = np.random.uniform(-180, 180, 100)\n\n As = dcm.from_hpr(h, p, r)\n for A in As:\n grp = dcm.to_mrp(A)\n Ac = dcm.from_mrp(grp)\n assert_allclose(Ac, A, rtol=1e-14, atol=1e-15)\n\n grp = dcm.to_mrp(As)\n Asc = dcm.from_mrp(grp)\n assert_allclose(Asc, As, rtol=1e-14, atol=1e-15)\n\n\ndef test_dcm_gibbs_conversion():\n np.random.seed(1)\n h = np.random.uniform(0, 360, 100)\n p = np.random.uniform(-90, 90, 100)\n r = np.random.uniform(-180, 180, 100)\n\n As = dcm.from_hpr(h, p, r)\n for A in As:\n grp = dcm.to_gibbs(A)\n Ac = dcm.from_gibbs(grp)\n assert_allclose(Ac, A, rtol=1e-14, atol=1e-15)\n\n grp = dcm.to_gibbs(As)\n Asc = dcm.from_gibbs(grp)\n assert_allclose(Asc, As, rtol=1e-14, atol=1e-15)\n\n\ndef test_from_hpr():\n hpr1 = [30, 0, 0]\n A_true1 = np.array([[np.sqrt(3)/2, 0.5, 0],\n [-0.5, np.sqrt(3)/2, 0],\n [0, 0, 1]])\n assert_allclose(dcm.from_hpr(*hpr1), A_true1, rtol=1e-10)\n\n hpr2 = np.rad2deg([1e-10, 3e-10, -1e-10])\n A_true2 = np.array([[1, 1e-10, -1e-10],\n [-1e-10, 1, -3e-10],\n [1e-10, 3e-10, 1]])\n assert_allclose(dcm.from_hpr(*hpr2), A_true2, rtol=1e-8)\n\n hpr3 = [45, -30, 60]\n A_true3 = np.array([\n [-np.sqrt(6)/8 + np.sqrt(2)/4, np.sqrt(6)/4,\n np.sqrt(2)/8 + np.sqrt(6)/4],\n [-np.sqrt(2)/4 - np.sqrt(6)/8, np.sqrt(6)/4,\n -np.sqrt(6)/4 + np.sqrt(2)/8],\n [-0.75, -0.5, np.sqrt(3)/4]\n ])\n assert_allclose(dcm.from_hpr(*hpr3), A_true3, rtol=1e-8)\n\n hpr = np.vstack((hpr1, hpr2, hpr3)).T\n A = np.array((A_true1, A_true2, A_true3))\n assert_allclose(dcm.from_hpr(*hpr), A, rtol=1e-8)\n\n\ndef test_to_hpr():\n A1 = np.identity(3)\n hpr1 = np.zeros(3)\n assert_allclose(dcm.to_hpr(A1), hpr1, atol=1e-10)\n\n A2 = np.array([[1, 1e-10, -2e-10],\n [-1e-10, 1, 3e-10],\n [2e-10, -3e-10, 1]])\n hpr2 = np.rad2deg([1e-10, -3e-10, -2e-10])\n assert_allclose(dcm.to_hpr(A2), hpr2, atol=1e-10)\n\n A3 = np.array([\n [1/np.sqrt(2), 0, 1/np.sqrt(2)],\n [0, 1, 0],\n [-np.sqrt(2), 0, np.sqrt(2)]\n ])\n hpr3 = np.array([0, 0, 45])\n assert_allclose(dcm.to_hpr(A3), hpr3, rtol=1e-10)\n\n A4 = np.array([[-1, 0, 0], [0, 0, -1], [0, -1, 0]])\n hpr4 = np.array([180, -90, 0])\n assert_allclose(dcm.to_hpr(A4), hpr4, rtol=1e-10)\n\n A = np.empty((20, 3, 3))\n A[:5] = A1\n A[5:10] = A2\n A[10:15] = A3\n A[15:] = A4\n hpr = np.empty((20, 3))\n hpr[:5] = hpr1\n hpr[5:10] = hpr2\n hpr[10:15] = hpr3\n hpr[15:20] = hpr4\n\n ret = dcm.to_hpr(A)\n for i in range(3):\n assert_allclose(ret[i], hpr[:, i], rtol=1e-10)\n\n\ndef test_dcm_hpr_conversion():\n rng = np.random.RandomState(0)\n\n h = rng.uniform(0, 360, 20)\n p = rng.uniform(-90, 90, 20)\n r = rng.uniform(-180, 180, 20)\n\n A = dcm.from_hpr(h, p, r)\n h_r, p_r, r_r = dcm.to_hpr(A)\n\n assert_allclose(h, h_r, rtol=1e-10)\n assert_allclose(p, p_r, rtol=1e-10)\n assert_allclose(r, r_r, rtol=1e-10)\n\n\ndef test_from_llw():\n llw1 = np.array([90, -90, 0])\n A1 = np.identity(3)\n assert_allclose(dcm.from_llw(*llw1), A1, rtol=1e-10, atol=1e-10)\n assert_allclose(dcm.from_llw(*llw1[:2]), A1, rtol=1e-10, atol=1e-10)\n\n llw2 = np.array([90, -90, np.rad2deg(1e-9)])\n A2 = np.array([[1, -1e-9, 0], [1e-9, 1, 0], [0, 0, 1]])\n assert_allclose(dcm.from_llw(*llw2), A2, rtol=1e-10, atol=1e-10)\n\n llw3 = np.array([-30, -45, 90])\n A3 = np.array([[np.sqrt(2)/4, -np.sqrt(2)/2, np.sqrt(6)/4],\n [-np.sqrt(2)/4, -np.sqrt(2)/2, -np.sqrt(6)/4],\n [np.sqrt(3)/2, 0, -0.5]])\n\n assert_allclose(dcm.from_llw(*llw3), A3, rtol=1e-10, atol=1e-10)\n\n A4 = np.array([[2**0.5/2, 2**0.5/4, 6**0.5/4],\n [2**0.5/2, -2**0.5/4, -6**0.5/4],\n [0, 3**0.5/2, -0.5]])\n assert_allclose(dcm.from_llw(*llw3[:2]), A4, rtol=1e-10, atol=1e-10)\n\n llw = np.empty((15, 3))\n llw[:5] = llw1\n llw[5:10] = llw2\n llw[10:] = llw3\n A = np.empty((15, 3, 3))\n A[:5] = A1\n A[5:10] = A2\n A[10:] = A3\n assert_allclose(dcm.from_llw(*llw.T), A, rtol=1e-10, atol=1e-10)\n\n\ndef test_to_llw():\n A1 = np.identity(3)\n llw1 = np.array([90, 0, -90])\n assert_allclose(dcm.to_llw(A1), llw1, rtol=1e-10)\n\n A2 = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])\n llw2 = np.array([0, 0, 0])\n assert_allclose(dcm.to_llw(A2), llw2, atol=1e-10)\n\n A = np.empty((10, 3, 3))\n A[:5] = A1\n A[5:] = A2\n llw = np.empty((10, 3))\n llw[:5] = llw1\n llw[5:] = llw2\n\n ret = dcm.to_llw(A)\n for i in range(3):\n assert_allclose(ret[i], llw[:, i], rtol=1e-10, atol=1e-10)\n\n\ndef test_dcm_llw_conversion():\n rng = np.random.RandomState(0)\n\n lat = rng.uniform(-90, 90, 20)\n lon = rng.uniform(-180, 180, 20)\n wan = rng.uniform(-180, 180, 20)\n\n A = dcm.from_llw(lat, lon, wan)\n lat_r, lon_r, wan_r = dcm.to_llw(A)\n\n assert_allclose(lon_r, lon, rtol=1e-10)\n assert_allclose(lat_r, lat, rtol=1e-10)\n assert_allclose(wan_r, wan, rtol=1e-10)\n\n\ndef test_dcm_Spline():\n ht = [0, 45, 90]\n C = dcm.from_hpr(ht, 0, 0)\n t = [0, 45, 90]\n s = dcm.Spline(t, C)\n\n t_test = [0, 30, 60, 90]\n C_test = s(t_test)\n h, p, r = dcm.to_hpr(C_test)\n assert_allclose(h, [0, 30, 60, 90], rtol=1e-14, atol=1e-16)\n assert_allclose(p, 0, atol=1e-16)\n assert_allclose(r, 0, atol=1e-16)\n\n omega = np.rad2deg(s(t_test, 1))\n assert_allclose(omega[:, 0], 0, atol=1e-16)\n assert_allclose(omega[:, 1], 0, atol=1e-6)\n assert_allclose(omega[:, 2], -1)\n\n beta = np.rad2deg(s(t_test, 2))\n assert_allclose(beta, 0, atol=1e-16)\n\n t = np.linspace(0, 100, 101)\n ht = 10 * t + 5 * np.sin(2 * np.pi * t / 10)\n pt = 7 * t + 3 * np.sin(2 * np.pi * t / 10 + 2)\n rt = -3 * t + 3 * np.sin(2 * np.pi * t / 10 - 2)\n C = dcm.from_hpr(ht, pt, rt)\n s = dcm.Spline(t, C)\n\n Cs = s(t[::-1])\n assert_allclose(Cs[::-1], C)\n\n\ndef test_match_vectors():\n Cab_true = dcm.from_hpr(20, -10, 5)\n vb = np.array([\n [0, 1, 0],\n [0, 0, 1]\n ])\n va = vb.dot(Cab_true.T)\n\n Cab = dcm.match_vectors(va, vb)\n assert_allclose(Cab, Cab_true, atol=1e-16)\n\n Cab = dcm.match_vectors(va, vb, [200, 1])\n assert_allclose(Cab, Cab_true, atol=1e-16)\n\n rng = np.random.RandomState(0)\n vb = rng.rand(100, 3)\n vb /= np.linalg.norm(vb, axis=1)[:, None]\n va = vb.dot(Cab_true.T)\n Cab = dcm.match_vectors(va, vb)\n assert_allclose(Cab, Cab_true, atol=1e-16)\n\n\nif __name__ == '__main__':\n run_module_suite()\n","sub_path":"pyins/tests/test_dcm.py","file_name":"test_dcm.py","file_ext":"py","file_size_in_byte":10934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"261066023","text":"\nfrom functools import cmp_to_key\n\ndef compare(A, B):\n ab = A + B\n ba = B + A \n if int(ab) - int(ba) > 0:\n return -1\n else:\n return 1\n\n\ndef solution(numbers):\n numbers = list(map(str, numbers))\n numbers.sort(key = cmp_to_key(compare))\n ret = ''.join(numbers)\n while len(ret) > 1 and ret[0] == '0':\n ret = ret[1:]\n \n return ret\n","sub_path":"programmers/42746.py","file_name":"42746.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"621325418","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/8/26 9:12\n# @Author : Tony\n\"\"\"神经网络测试用例\"\"\"\nimport sys\n\nimport numpy as np\nfrom datetime import datetime\n\nfrom app.cta_strategy.neural_network import NeuralNetwork\nfrom trader.constant import Exchange, Interval\nfrom vnpy.trader.database import database_manager\n\n\ndef make_input_data(raw_data):\n input_max_high = max([x.high_price for x in raw_data])\n input_min_low = min([x.low_price for x in raw_data])\n input_price_range = input_max_high - input_min_low\n input_max_volume = max([x.volume for x in raw_data])\n\n def parse_price(price):\n return ((price - input_min_low) / input_price_range) + 0.001\n\n return np.array([[parse_price(x.high_price), parse_price(x.open_price), parse_price(x.low_price)\n , parse_price(x.close_price), x.volume / input_max_volume] for x in raw_data])\n\n\ndef make_target_data(raw_data, last_close):\n max_high = max([x.high_price for x in raw_data])\n target_percent = (max_high - last_close) / last_close\n\n target_data_label = np.zeros(output_nodes) + 0.01\n if target_percent > long_profit_percent: # 多开信号\n target_data_label[1] = 0.99\n elif target_percent < - short_profit_percent: # 空开信号\n target_data_label[2] = 0.99\n else:\n target_data_label[0] = 0.99\n\n return target_data_label, target_percent, max_high\n\n\nif __name__ == '__main__':\n input_data_len = 100 # 输入X根分钟K线的数据(高开低收量)= x * 5 个数据点\n target_data_len = 15 # 预测10分钟后的高点\n long_profit_percent = 0.002 # 1万20 盈利点 - 1万1.5 * 2 手续费 = 17 元 单笔\n short_profit_percent = 0.002 # 1万20 盈利点 - 1万1.5 * 2 手续费 = 17 元 单笔\n\n input_nodes = input_data_len * 5\n hidden_nodes = input_data_len * 7\n output_nodes = 3\n learning_rate = 0.1\n\n symbol = 'rb1910'\n exchange = Exchange.SHFE\n interval = Interval.MINUTE\n start = datetime(2019, 6, 10)\n end = datetime.now()\n\n neuralNetwork = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)\n print(\"run nerual network\")\n\n bar_data = database_manager.load_bar_data(symbol, exchange, interval, start, end)\n bar_data_len = len(bar_data)\n print(f\"load bar data size {bar_data_len}, {interval}\")\n\n if bar_data_len < 1000:\n print(\"need more data >= 50000\")\n sys.exit()\n\n test_data_len = 200 # 测试集\n train_data_len = bar_data_len - test_data_len # 训练集\n print(f\"train data size: {train_data_len}, test data size: {test_data_len}\")\n\n epochs = 1\n for e in range(epochs):\n print(\"Start epoch: \", e)\n train_range = train_data_len - input_data_len - target_data_len + 1\n for train_index in range(train_range):\n input_bar_data = bar_data[train_index:train_index + input_data_len]\n target_bar_data = bar_data[train_index + input_data_len:train_index + input_data_len + target_data_len]\n\n input_data = make_input_data(input_bar_data).reshape(1, input_nodes)\n target_data, _, _ = make_target_data(target_bar_data, input_bar_data[-1].close_price)\n\n neuralNetwork.train(input_data, target_data)\n pass\n\n test_range = test_data_len - input_data_len - target_data_len + 1\n print(f\"Start test range: {test_range}, date: {bar_data[train_data_len].datetime}\")\n\n scordcard = []\n prediction_result = []\n actual_result = []\n for test_index in range(test_range):\n test_target_index = train_data_len + test_index + input_data_len\n\n input_bar_data = bar_data[train_data_len + test_index: test_target_index]\n target_bar_data = bar_data[test_target_index: test_target_index + target_data_len]\n\n input_data = make_input_data(input_bar_data).reshape(1, input_nodes)\n input_last_close = input_bar_data[-1].close_price\n target_data, target_profit_percent, target_max_high = make_target_data(target_bar_data, input_last_close)\n\n output = neuralNetwork.query(input_data)\n output_label = int(np.argmax(output))\n\n current_label = 0\n if target_profit_percent > long_profit_percent: # 多开信号\n current_label = 1\n print(bar_data[test_target_index].datetime, input_last_close, target_max_high, target_profit_percent)\n elif target_profit_percent < - short_profit_percent: # 空开信号\n current_label = 2\n else:\n current_label = 0\n\n prediction_result.append(output_label)\n actual_result.append(current_label)\n\n if current_label == output_label:\n scordcard.append(1)\n else:\n scordcard.append(0)\n\n scordcard_array = np.asarray(scordcard)\n print(\"Performance = \", scordcard_array.sum() / scordcard_array.size)\n print(f\"Predict Long {prediction_result.count(1)} , Short {prediction_result.count(2)}, Sleep {prediction_result.count(0)}\")\n print(f\"Actual Long {actual_result.count(1)} , Short {actual_result.count(2)}, Sleep {actual_result.count(0)}\")\n","sub_path":"tests/neural_network/test_neural_network.py","file_name":"test_neural_network.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"17790138","text":"import yagmail\n\n\nuser = 'astro.coffee.sheffield@gmail.com' # address to download and send email from\npwd = 'crack_astro'\nrecipient = []\n\ncontents = '🔭☕ Astro Coffee at 10:00 in the Austin Room ☕🔭 \\n\\n Sent by CRACKbot'\n\nwith open('emails.txt') as f:\n recipient.extend(f.read().split())\nyag = yagmail.SMTP(user, pwd)\nyag.send(recipient, \"🔭☕ Astro COFFEE at 10:00 ☕🔭\", contents) ","sub_path":"astro_coffee.py","file_name":"astro_coffee.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"261617265","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n\"\"\"\n@File : test_2 \n@Time : 2021/3/21 8:59 下午\n@Author : Xuesong Chen\n@Description : \n\"\"\"\nn, u, v, s, t, m = map(int,input().split(' '))\n\nmin_time = None\nfor y in range(n//2+1, 0, -1):\n x = n-2*y\n tili = s*x + t*pow(y, 2)\n if tili > m:\n continue\n else:\n time = u*x + v*y\n print(time)\n break\n # if min_time == None:\n # min_time = time\n # if time < min_time:\n # min_time = time\n # print(x, y, time, tili)\n\n# print(min_time)","sub_path":"tecent/test_2_2.py","file_name":"test_2_2.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"323317123","text":"\"\"\"Access to the sounddevice library. This library allows to\nuse sound devices for recording and playback. The library\nis based on the PortAudio library [1].\n\n\n[1] http://www.portaudio.com/\n\"\"\"\n# FIXME[bug]: I am experiences frequent crashes on my office computer\n# (Ubuntu 16.04):\n# src/hostapi/alsa/pa_linux_alsa.c:3636:\n# PaAlsaStreamComponent_BeginPolling:\n# Assertion `ret == self->nfds' failed.\n#\n#\n# cat /proc/asound/version\n# Advanced Linux Sound Architecture Driver Version k4.4.0-179-generic.\n#\n# aplay --version\n# aplay: version 1.1.0 by Jaroslav Kysela \n#\n# pulseaudio --version\n# pulseaudio 8.0\n#\n# python -c \"import sounddevice; print(sounddevice.__version__)\"\n# 0.4.0\n\n\n# standard imports\nfrom typing import Union\nimport logging\nimport threading\n\n# third party imports\nimport numpy as np\nimport sounddevice as sd\n\n# toolbox imports\nfrom ..base.sound import (SoundPlayer as SoundPlayerBase,\n SoundRecorder as SoundRecorderBase)\nfrom ..base import get_default_run\n\n# logging\nLOG = logging.getLogger(__name__)\n\n\nclass SoundPlayer(SoundPlayerBase):\n \"\"\"An implementation of a :py:class:`SoundPlayerBase` based on\n the `sounddevice` library.\n \"\"\"\n\n def __init__(self, samplerate: float = None, channels: int = None,\n **kwargs) -> None:\n super().__init__(**kwargs)\n self._lock = threading.Lock()\n self._event = threading.Event()\n\n if channels is None:\n channels = 2 if self._sound is None else self._sound.channels\n if samplerate is None:\n samplerate = (44100 if self._sound is None else\n self._sound.samplerate)\n\n # _finishing: this is a hack - we need it to mark a stream that\n # finishes, but that has not yet been stopped (see method _finished).\n self._blocking = False\n self._finishing = False\n self._stream = None\n self._check_stream(samplerate=samplerate, channels=channels)\n\n def _check_stream(self, samplerate: float = None,\n channels: int = None) -> None:\n \"\"\"This function is a hack to fix a problem with an sounddevice\n streams in an unsane state: these streams have both, `active`\n and `stopped` flag (and also the `closed` flag) set to `False`.\n Such a state seems to occur when the stream is stopped\n (or aborted) from within the stream Thread (while stopping\n or aborting from another Thread seems to be ok).\n Such unsane streams can not be restarted by calling stream.start(),\n they seem to be dead (at least I did not find a way to revive\n them). As a workaround, we simply create a new stream here to\n replace the original one.\n \"\"\"\n # Check the state of the current stream\n if self._stream is not None and not self._stream.closed:\n if self._stream.active or self._stream.stopped:\n return # Stream seems to be ok\n\n LOG.warning(\"SoundDevicePlayer: \"\n \"discovered unsane stream - creating a new one ...\")\n # Stream seems to be dead - copy stream parameters\n samplerate = samplerate or self._stream.samplerate\n channels = channels or self._stream.channels\n self._stream.close()\n\n # create a new stream\n self._stream = sd.OutputStream(samplerate=samplerate,\n channels=channels,\n callback=self._play_block,\n finished_callback=self._finished)\n\n def _set_position(self, position: float) -> None:\n \"\"\"Set the current playback position.\n \"\"\"\n # as we set the position from within the playback loop,\n # we lock the operation to avoid interferences.\n with self._lock:\n super()._set_position(position)\n\n @property\n def playing(self) -> bool:\n return self._stream.active and not self._finishing\n\n @property\n def samplerate(self) -> float:\n \"\"\"Samplerate to be used for playback.\n \"\"\"\n return self._stream.samplerate\n\n @property\n def channels(self) -> int:\n \"\"\"Number of channels to be used for playback.\n \"\"\"\n return self._stream.channels\n\n def play(self, *args, run: bool = None, **kwargs):\n # we have to overwrite the super method to care for the 'run'\n # parameter (which would usually be done by the @run decorator):\n # as the stream playback is done in its own thread (and there\n # is no way to prevent this from happening), we will realize\n # a blocking call (run=False), explicitly waiting for the\n # playback to finish.\n self._blocking = not get_default_run(run)\n super().play(self, *args, run=False, **kwargs)\n\n def _play(self) -> None:\n \"\"\"Start the actual playback in a background thread.\n \"\"\"\n self._check_stream()\n # another hack:\n self._finishing = False\n self._event.clear()\n\n # this will start the background thread, periodically invoking\n # _play_block\n self._stream.start()\n\n print(\"Soundplayer: blocking:\", self._blocking)\n if self._blocking:\n try:\n self._event.wait()\n finally:\n # Playback/recording may have been stopped with\n # a `KeyboardInterrupt` - make sure the stream\n # is closed\n self._stream.close(ignore_errors=True)\n\n def _play_block(self, outdata: np.ndarray, frames: int,\n time, status: sd.CallbackFlags) -> None:\n \"\"\"Callback to be called by the output stream.\n\n Arguments\n ---------\n outdata: np.ndarray\n An array of shape (frames, channels) and dtype float32.\n This is a buffer provided by the OutputStream in which\n the next block of output data should be stored.\n frames: int\n The number of frames to be stored. This should be the\n sames as len(outdata)\n \"\"\"\n if status:\n LOG.debug(\"SoundDevicePlayer: status = %s\", status)\n\n position = self._position\n reverse = self.reverse\n\n if position is None:\n LOG.debug(\"play block: no position\")\n wave_frames = 0\n else:\n # obtain the relevant sound data\n samplerate = self.samplerate\n duration = frames / samplerate\n if not reverse:\n start = position\n end = min(position+duration, self.end)\n else:\n start = max(self.start, position-duration)\n end = position\n wave = self._sound[start:end:samplerate]\n wave_frames = len(wave)\n\n # provide the wave to the OutputStream via the outdata array.\n valid_frames = min(wave_frames, frames)\n if not reverse:\n outdata[:valid_frames, :] = wave[:valid_frames]\n else:\n outdata[:valid_frames, :] = wave[valid_frames-1::-1]\n LOG.debug(\"block, position=%f:.2, reverse=%s; \"\n \"start=%f:.2, end=%f:.2, duration=%f:.4/%f:.4, \"\n \"frames=%d/%d\", position, reverse,\n start, end, duration, end-start,\n wave_frames, valid_frames)\n\n # pad missing data with zeros\n if wave_frames < frames:\n outdata[wave_frames:, :].fill(0)\n\n # If we have not obtained any data (wave_frames == 0) we will stop\n # playback here.\n if not reverse:\n new_position = end if wave_frames > 0 else None\n if new_position is not None and new_position >= self.end:\n new_position = self.start if self.loop else None\n else:\n new_position = start if wave_frames > 0 else None\n if new_position is not None and new_position <= self.start:\n new_position = self.end if self.loop else None\n # We have to avoid overwriting a change of position\n # that may have occured in the meantime (by some other thread)\n with self._lock:\n if self._position == position:\n super()._set_position(new_position)\n\n if new_position is None:\n # We cannot call _stream.stop() (or _stream.abort()) from\n # within the sub-thread (also not from finished_callback)\n # this will cause some error in the underlying C library).\n # The official way to stop the thread from within is to\n # raise an exception:\n raise sd.CallbackStop()\n\n def _finished(self) -> None:\n \"\"\"The finished_callback is called once the playback thread\n finishes (either due to an exception in the inner loop or by\n an explicit call to stream.stop() from the outside).\n \"\"\"\n # When the finihed_callback is called, the stream may not have\n # stopped yet - so when informing the observers, the playing\n # property may still report playing - to avoid this, we have\n # introduced the _finishing flag, that indicates that playback\n # has finished.\n self._event.set()\n if self.playing:\n self._finishing = True\n self.change('state_changed')\n\n def _stop(self) -> None:\n \"\"\"Stop an ungoing playback.\n \"\"\"\n # Here we could either call stream.stop() or stream.abort().\n # The first would stop acquiring new data, but finish processing\n # buffered data, while the second would abort immediately.\n # For the sake of a responsive interface, we choose abort here.\n if self._stream.active:\n self._stream.abort(ignore_errors=True)\n\n\nclass SoundRecorder(SoundRecorderBase):\n \"\"\"A :py:class:`SoundRecorder` based on the Python sounddevice\n library.\n \"\"\"\n\n def __init__(self, channels: int = None, samplerate: float = None,\n device: Union[int, str] = None, **kwargs):\n super().__init__(**kwargs)\n\n if channels is None:\n channels = 2 if self._sound is None else self._sound.channels\n if samplerate is None:\n samplerate = (44100 if self._sound is None else\n self._sound.samplerate)\n # device: input device (numeric ID or substring)\n # device_info = sd.query_devices(device, 'input')\n # samplerate = device_info['default_samplerate']\n\n self._stream = sd.InputStream(device=device, channels=channels,\n samplerate=samplerate,\n callback=self._record_block,\n finished_callback=self._finished)\n\n @property\n def samplerate(self) -> float:\n \"\"\"Samplerate used for recording.\n \"\"\"\n return self._stream.samplerate\n\n @property\n def channels(self) -> int:\n \"\"\"Number of channels to be recorded.\n \"\"\"\n return self._stream.channels\n\n @property\n def recording(self) -> bool:\n return self._stream.active\n\n def _record(self) -> None:\n \"\"\"\n \"\"\"\n LOG.info(\"Recorder: samplerate=%f\", self.samplerate)\n LOG.info(\"Recorder: sound=%s\", self.sound)\n\n LOG.info(\"Recorder: starting stream\")\n self._stream.start()\n LOG.info(\"Recorder: stream started\")\n\n def _FIXME_old_record(self) -> None:\n # This implementation assumes a plotter (like the\n # MatplotlibSoundPlotter), that has to start its own Thread\n # (as the matplotlib.animation.FuncAnimation class does).\n # The context manager (with self._stream) will start\n # the sounddevice.InputStream in its own Thread, and then\n # execute the inner block.\n #\n # # the context manager will start the stream task\n # with self._stream:\n # # this will start the plotter and block until the\n # # plotter has finished - hence we have to explicitly\n # # stop the plotter, once the stream has finished.\n # self._plotter.start_plot()\n\n # stream = sd.InputStream(device=device, channels=channels,\n # samplerate=samplerate, callback=audio_callback)\n # ani = FuncAnimation(fig, update_plot, interval=interval, blit=True)\n # with stream:\n # plt.show()\n pass\n\n def _record_block(self, indata, _frames, _time, status):\n \"\"\"This is called (from a separate thread) for each audio block.\"\"\"\n if status:\n LOG.debug(\"SoundDeviceRecorder: %s\", status)\n\n # append new data to the sound object\n self._sound += indata\n\n def _finished(self) -> None:\n LOG.info(\"SoundDeviceRecorder: finished\")\n\n def _stop(self) -> None:\n \"\"\"Stop ongoing sound recording.\n \"\"\"\n # Here we could either call stream.stop() or stream.abort().\n # The first would stop acquiring new data, but finish processing\n # buffered data, while the second would abort immediately.\n # In order to not loose any data, we choose stop here.\n LOG.info(\"SoundDeviceRecorder: aborting stream\")\n # self._stream.abort()\n if self._stream.active:\n self._stream.stop()\n LOG.info(\"SoundDeviceRecorder: stream aborted\")\n","sub_path":"dltb/thirdparty/sounddevice.py","file_name":"sounddevice.py","file_ext":"py","file_size_in_byte":13443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"483998586","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/max/Workspaces/gsfc/photometry_pipeline/code/reduction/astrom/astrometrystats.py\n# Compiled at: 2016-11-15 15:22:29\nimport numpy\n\ndef median(l):\n a = numpy.array(l)\n return numpy.median(a)\n\n\ndef stdev(l):\n a = numpy.array(l)\n return numpy.std(a)\n\n\ndef most(list, vmin=1, vmax=1):\n counter = numpy.zeros(len(list))\n for i in range(0, len(list)):\n counter[i] = ((list[i] + vmax >= list) & (list[i] - vmin <= list)).sum()\n\n if len(set(counter)) == 1:\n return numpy.median(list)\n else:\n return list[counter.argmax()]\n\n\ndef rasex2deg(rastr):\n rastr = str(rastr).strip()\n ra = rastr.split(':')\n if len(ra) == 1:\n return float(rastr)\n return 15 * (float(ra[0]) + float(ra[1]) / 60.0 + float(ra[2]) / 3600.0)\n\n\ndef decsex2deg(decstr):\n decstr = str(decstr).strip()\n dec = decstr.split(':')\n if len(dec) == 1:\n return float(decstr)\n sign = 1\n if decstr[0] == '-':\n sign = -1\n return sign * (abs(float(dec[0])) + float(dec[1]) / 60.0 + float(dec[2]) / 3600.0)\n\n\ndef magcomp(obj1, obj2):\n return (obj1.mag > obj2.mag) - (obj1.mag < obj2.mag)\n\n\ndef unique(inlist):\n lis = inlist[:]\n lis.sort()\n llen = len(lis)\n i = 0\n while i < llen - 1:\n if lis[(i + 1)] == lis[i]:\n del lis[i + 1]\n llen = llen - 1\n else:\n i = i + 1\n\n return lis","sub_path":"pycfiles/photopipe-0.1.0b4.tar/astrometrystats.py","file_name":"astrometrystats.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"344651012","text":"##\n## Seven layer fully connected CNN\n##\n\nimport tensorflow as tf\n\n\nclass Model(object):\n\tdef __init__(self, X, y):\n\t\t'''\n\t\tSetup graph using class. \n\t\tX = input tensor\n\t\ty = output tensor\n\t\t'''\n\n\t\t## Some hyper paramters\n\t\tself.learning_rate = 1e-2\n\t\tself.minibatch_size = 128\n\t\tself.num_epochs = 200\n\t\tself.evaluation_frequency = 1000\n\n\t\tfc1 = tf.layers.dense(inputs = X, units = 128, activation = 'sigmoid', name = 'fc_1')\n\t\tfc2 = tf.layers.dense(inputs = fc1, units = 64, activation = 'sigmoid', name = 'fc_2')\n\t\tfc3 = tf.layers.dense(inputs = fc2, units = 64, activation = 'sigmoid', name = 'fc_3')\n\t\tfc4 = tf.layers.dense(inputs = fc3, units = 32, activation = 'sigmoid', name = 'fc_4')\n\t\tfc5 = tf.layers.dense(inputs = fc4, units = 32, activation = 'sigmoid', name = 'fc_5')\n\t\tfc6 = tf.layers.dense(inputs = fc5, units = 16, activation = 'sigmoid', name = 'fc_6')\n\t\tlogits = tf.layers.dense(inputs = fc6, units = 10, activation = None, name = 'fc_7')\n\t\tself.yhat = tf.nn.softmax(logits)\n\n\t\t# Cost Function\n\t\tself.cost = tf.losses.softmax_cross_entropy(y, logits)\n\n\t\t# As we train, it will also be nice to keep track of the accuracy of our classifier\n\t\tcorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(self.yhat, 1)) # Check if predictions are equal to labels\n\t\tself.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Compute average accuracy\n\n\t\t# Add Optimizer to Graph:\n\t\toptimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate)\n\t\tself.train_op = optimizer.minimize(self.cost)\n\n\t\t#Setup Summary Writing for Tensorboard:\n\t\ttf.summary.scalar(name = 'cost', tensor = self.cost)\n\t\ttf.summary.scalar(name = 'accuracy', tensor = self.accuracy)\n\t\tself.merged_summary_op = tf.summary.merge_all() #Merges all summaries, in this case we only have one!\n\n\n\n","sub_path":"models/seven_layer_cross_entropy/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"24679157","text":"from bst import BST\nimport random\n\nclass BSTmaker():\n def make_random_bst(self, n=10, low_key=1, high_key=100):\n \"\"\"\n Create a BST with n distinct nodes. Each node has a random number wiht range\n [low_key, high_key)\n\n return nums, bst\n \"\"\"\n t = BST()\n nums = random.sample(range(low_key, high_key), n)\n for v in nums:\n t.insert(v)\n return nums, t\n\n def make_bst(self, nums):\n \"\"\"Create a bst with a given list of numbers\"\"\"\n t = BST()\n for v in nums:\n t.insert(v)\n return nums, t\n\n\nif __name__ == \"__main__\":\n nums, bst = BSTmaker().make_random_bst()\n print(nums)\n print(bst)\n\n# [76, 2, 47, 20, 10, 69, 29, 68, 15, 12]\n# ...76...\n# / \\\n# ..2..\n# / \\\n# ..47.\n# / \\\n# 20. 69\n# / \\ /\\\n# 10 29 68\n# / \\ /\\ /\\\n# 15\n# /\\\n# 12\n# /\\\n","sub_path":"chp4_tree_and_graph/bst_maker.py","file_name":"bst_maker.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"374994442","text":"#!/usr/bin/env python3.4\n\nfrom tkinter import *\nfrom tkinter import messagebox as msg\nfrom databaseManager import *\nfrom FeedReader import *\nimport webbrowser\n\nclass addRemove(Tk):\n\tdef __init__(self):\n\t\tTk.__init__(self)\n\t\tself.title(\"Manage Feeds\")\n\t\tself.font = (\"Arial\", \"16\")\n\t\tself.mainscreen = Mainscreen(self)\n\t\tself.mainscreen.grid()\n\t\tself.db = DatabaseManager()\n\n\tdef addFeedScreen(self):\n\t\tself.mainscreen.destroy()\n\t\tself.addScreen = AddFeedScreen(self)\n\t\tself.addScreen.grid()\n\n\tdef removeFeedScreen(self):\n\t\tself.mainscreen.destroy()\n\t\tself.removeScreen = RemoveFeedScreen(self, self.db)\n\t\tself.removeScreen.grid()\n\n\tdef addFeedToDatabase(self):\n\t\tfeedTitle = str(self.addScreen.titleEntry.get())\n\t\tfeedUrl = str(self.addScreen.urlEntry.get())\n\t\tself.db.addFeed(feedTitle, feedUrl)\n\t\tmsg.showinfo(\"Success!\", \"Feed added!\")\n\t\tself.addScreen.destroy()\n\t\tself.mainscreen = Mainscreen(self)\n\t\tself.mainscreen.grid()\n\n\tdef removeFeedFromDatabase(self):\n\t\tfeedTitle = str(self.removeScreen.feedName.get())\n\t\tself.db.removeFeed(feedTitle)\n\t\tmsg.showinfo(\"Success!\", \"Feed removed!\")\n\t\tself.removeScreen.destroy()\n\t\tself.mainscreen = Mainscreen(self)\n\t\tself.mainscreen.grid()\n\n\tdef iterateFeeds(self):\n\t\tself.db.closeConnection()\n\t\tfeedReader = FeedReader()\n\t\tfeedReader.iterateFeeds()\n\n\tdef openFeeds(self):\n\t\tself.iterateFeeds()\n\t\twebbrowser.open_new_tab(\"feeds.html\")\n\t\tself.destroy()\n\n\tdef quitApp(self):\n\t\tself.iterateFeeds()\n\t\tself.destroy()\n\nclass Mainscreen(Frame):\n\tdef __init__(self, parent):\n\t\tFrame.__init__(self, parent)\n\n\t\taddFeed = Button(self, text = \"Add Feed\", command = parent.addFeedScreen, width = \"10\")\n\t\taddFeed.grid(row = 0, column = 0)\n\n\t\tremoveFeed = Button(self, text = \"Remove Feed\", command = parent.removeFeedScreen, width = \"10\")\n\t\tremoveFeed.grid(row = 1, column = 0)\n\n\t\topenFeeds = Button(self, text = \"Open Feeds\", command = parent.openFeeds, width = \"10\")\n\t\topenFeeds.grid(row = 2, column = 0)\n\n\t\tquitApp = Button(self, text = \"Quit\", command = parent.quitApp, width = \"10\")\n\t\tquitApp.grid(row = 3, column = 0)\n\nclass AddFeedScreen(Frame):\n\tdef __init__(self, parent):\n\t\tFrame.__init__(self, parent)\n\n\t\tLabel(self, text = \"Feed title:\").grid(row = 0, column = 0, pady = 5, padx = 5)\n\n\t\tself.titleEntry = Entry(self)\n\t\tself.titleEntry.grid(row = 0, column = 1)\n\n\t\tLabel(self, text = \"Feed URL:\").grid(row = 1, column = 0, pady = 5, padx = 5)\n\n\t\tself.urlEntry = Entry(self)\n\t\tself.urlEntry.grid(row = 1, column = 1, pady = 5, padx = 5)\n\n\t\tsubmit = Button(self, text = \"Submit\", command = parent.addFeedToDatabase)\n\t\tsubmit.grid(row = 2, column = 0, columnspan = 2)\n\nclass RemoveFeedScreen(Frame):\n\tdef __init__(self, parent, db):\n\t\tFrame.__init__(self, parent)\n\t\tself.db = parent.db\n\t\tself.db = db.returnFeedArray()\n\n\t\tLabel(self, text = \"Remove Feed\").grid(row = 0, column = 0, columnspan = 2, pady = 5, padx = 5)\n\n\t\tsubmit = Button(self, text = \"Submit\", command = parent.removeFeedFromDatabase)\n\t\tsubmit.grid(row = 1, column = 0, columnspan = 2)\n\n\t\tself.feedName = StringVar()\n\t\tfeedlist = []\n\t\tfor index, feedEntry in enumerate(self.db):\n\t\t\tfeedlist.append(Radiobutton(self, text = feedEntry[0], variable = self.feedName, value = feedEntry[0]))\n\t\t\tfeedlist[index].grid(row = index + 2)\n\ndef main():\n gui = addRemove()\n gui.mainloop()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"RSS Reader/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"152705395","text":"import pandas_datareader as pdr\nimport pandas as pd\nfrom datetime import datetime\n\ndef pull_data(symbol):\n try:\n data = pdr.get_data_yahoo(symbols=symbol, start=datetime(2015, 1, 1), end=datetime(2019, 1, 1))\n if len(data) >= 800:\n data.to_csv(\"{}.csv\".format(symbol))\n else:\n print(\"Incomplete data for {}\".format(symbol))\n except: print(\"Could not load {}\".format(symbol))\n\ndef loadSP500():\n data = pd.read_excel(\"S&P500.xlsx\")\n return data['Ticker']\n\nstocks = loadSP500()\nfor i in stocks:\n pull_data(i)","sub_path":"data/yahoo-finance-script.py","file_name":"yahoo-finance-script.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"156537539","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nfrom inspect import isfunction\nfrom operator import mul\nfrom functools import partial, reduce, wraps\n\nfrom axial_positional_embedding import AxialPositionalEmbedding\nfrom product_key_memory import PKM\nfrom routing_transformer.reversible import ReversibleSequence, SequentialSequence\n\n# constants\n\nTOKEN_SELF_ATTN_VALUE = -5e4\nKMEAN_INIT_ITERS = 10\n\n# helper functions\n\ndef identity(x, *args, **kwargs):\n return x\n\ndef default(x, d):\n if x is None:\n return d if not isfunction(d) else d()\n return x\n\ndef cast_tuple(x):\n return x if isinstance(x, tuple) else (x,)\n\ndef cache_fn(f):\n cache = None\n @wraps(f)\n def cached_fn(*args, **kwargs):\n nonlocal cache\n if cache is not None:\n return cache\n cache = f(*args, **kwargs)\n return cache\n return cached_fn\n\ndef to(t):\n return {'device': t.device, 'dtype': t.dtype}\n\ndef find_modules(nn_module, type):\n return [module for module in nn_module.modules() if isinstance(module, type)]\n\ndef is_empty(t):\n return t.nelement() == 0\n\ndef max_neg_value(tensor):\n return -torch.finfo(tensor.dtype).max\n\ndef batched_index_select(values, indices):\n last_dim = values.shape[-1]\n return values.gather(2, expand_dim(indices, -1, last_dim))\n\ndef merge_dims(ind_from, ind_to, tensor):\n shape = list(tensor.shape)\n arr_slice = slice(ind_from, ind_to + 1)\n shape[arr_slice] = [reduce(mul, shape[arr_slice])]\n return tensor.reshape(*shape)\n\ndef expand_dim(t, dim, k):\n t = t.unsqueeze(dim)\n expand_shape = [-1] * len(t.shape)\n expand_shape[dim] = k\n return t.expand(*expand_shape)\n\ndef scatter_mean(src, t, index, dim, eps = 1e-5):\n numer = src.scatter_add(dim, index, t)\n denom = src.scatter_add(dim, index, torch.ones_like(t))\n return numer / (denom + eps)\n\ndef look_around(x, backward = 1, forward = 0, pad_value = -1, dim = 2):\n t = x.shape[1]\n dims = (len(x.shape) - dim) * (0, 0)\n padded_x = F.pad(x, (*dims, backward, forward), value= pad_value)\n tensors = [padded_x[:, ind:(ind + t), ...] for ind in range(forward + backward + 1)]\n return torch.cat(tensors, dim=dim)\n\ndef split_at_index(dim, index, t):\n pre_slices = (slice(None),) * dim\n l = (*pre_slices, slice(None, index))\n r = (*pre_slices, slice(index, None))\n return t[l], t[r]\n\ndef ema(old, new, decay):\n if old is None:\n return new\n return old * decay + new * (1 - decay)\n\ndef ema_inplace(moving_avg, new, decay):\n if is_empty(moving_avg):\n moving_avg.data.copy_(new)\n return\n moving_avg.data.mul_(decay).add_(new, alpha= (1 - decay))\n\n# helper classes\n\nclass Chunk(nn.Module):\n def __init__(self, chunks, fn, along_dim = -1):\n super().__init__()\n self.dim = along_dim\n self.chunks = chunks\n self.fn = fn\n\n def forward(self, x, **kwargs):\n if self.chunks <= 1:\n return self.fn(x, **kwargs)\n chunks = x.chunk(self.chunks, dim = self.dim)\n return torch.cat([self.fn(c, **kwargs) for c in chunks], dim = self.dim)\n\nclass PreNorm(nn.ModuleList):\n def __init__(self, dim, fn):\n super().__init__()\n self.norm = nn.LayerNorm(dim)\n self.fn = fn\n def forward(self, x, **kwargs):\n x = self.norm(x)\n return self.fn(x, **kwargs)\n\nclass ProjectInOut(nn.Module):\n def __init__(self, fn, dim_in, dim_out, project_out = True):\n super().__init__()\n self.fn = fn\n self.project_in = nn.Linear(dim_in, dim_out)\n self.project_out = nn.Linear(dim_out, dim_in) if project_out else identity\n\n def forward(self, x, **kwargs):\n x = self.project_in(x)\n x, loss = self.fn(x, **kwargs)\n x = self.project_out(x)\n return x, loss\n\n# positional embeddings\n\nclass AbsolutePositionalEmbedding(nn.Module):\n def __init__(self, dim, max_seq_len):\n super().__init__()\n self.emb = nn.Embedding(max_seq_len, dim)\n\n def forward(self, x):\n t = torch.arange(x.shape[1], device=x.device)\n return self.emb(t)\n\ndef shift(x):\n *_, i, j = x.shape\n zero_pad = torch.zeros((*_, i, i), **to(x))\n x = torch.cat([x, zero_pad], -1)\n l = i + j - 1\n x = x.view(*_, -1)\n zero_pad = torch.zeros(*_, -x.size(-1) % l, **to(x))\n shifted = torch.cat([x, zero_pad], -1).view(*_, -1, l)\n return shifted[..., :i, i - 1:]\n\nclass RelativePositionalEmbedding(nn.Module):\n def __init__(self, dim, heads, length):\n super().__init__()\n self.scale = dim ** -0.5\n self.weights = nn.Parameter(torch.zeros(length, heads, dim))\n\n def forward(self, q):\n emb = torch.einsum('bhnid,jhd->bhnij', q, self.weights.type(q.dtype)) * self.scale\n return shift(emb)\n\n# local attention\n\nclass LocalAttention(nn.Module):\n def __init__(self, bucket_size, heads, head_dim, causal = False, look_backward = 1, look_forward = None, dropout = 0., shared_qk = False, rel_pos_emb = True):\n super().__init__()\n self.look_forward = default(look_forward, 0 if causal else 1)\n assert not (causal and self.look_forward > 0), 'you cannot look forward if causal'\n\n self.bucket_size = bucket_size\n self.causal = causal\n self.look_backward = look_backward\n self.shared_qk = shared_qk\n\n self.heads = heads\n self.dropout = nn.Dropout(dropout)\n\n self.rel_pos = RelativePositionalEmbedding(head_dim, heads, bucket_size * 2) if rel_pos_emb else None\n\n def forward(self, q, k, v, input_mask = None):\n shape = q.shape\n\n merge_into_batch = lambda t: t.reshape(-1, *t.shape[-2:])\n q, k, v = map(merge_into_batch, (q, k, v))\n\n b, t, e, h, device, dtype = *q.shape, self.heads, q.device, q.dtype\n bucket_size, causal, look_backward, look_forward, shared_qk = self.bucket_size, self.causal, self.look_backward, self.look_forward, self.shared_qk\n\n buckets = t // bucket_size\n\n if shared_qk:\n k = F.normalize(k, 2, dim=-1).type(q.type())\n\n ticker = torch.arange(t, device=device, dtype=dtype)[None, :]\n b_t = ticker.reshape(1, buckets, bucket_size)\n\n bucket_fn = lambda t: t.reshape(b, buckets, bucket_size, -1)\n bq, bk, bv = map(bucket_fn, (q, k, v))\n\n look_around_kwargs = {'backward': look_backward, 'forward': look_forward}\n bk = look_around(bk, **look_around_kwargs)\n bv = look_around(bv, **look_around_kwargs)\n\n bq_t = b_t\n bq_k = look_around(b_t, **look_around_kwargs)\n\n dots = torch.einsum('bhie,bhje->bhij', bq, bk) * (e ** -0.5)\n\n if self.rel_pos is not None:\n rel_attn = self.rel_pos(bq.view(-1, h, *bq.shape[1:])).reshape_as(dots)\n dots = dots + rel_attn\n\n mask_value = max_neg_value(dots)\n\n if shared_qk:\n mask = bq_t[:, :, :, None] == bq_k[:, :, None, :]\n dots.masked_fill_(mask, TOKEN_SELF_ATTN_VALUE)\n del mask\n\n if causal:\n mask = bq_t[:, :, :, None] < bq_k[:, :, None, :]\n dots.masked_fill_(mask, mask_value)\n del mask\n\n mask = bq_k[:, :, None, :] == -1\n dots.masked_fill_(mask, mask_value)\n del mask\n\n if input_mask is not None:\n h = b // input_mask.shape[0]\n input_mask = input_mask.reshape(-1, buckets, bucket_size)\n mq = mk = input_mask\n mk = look_around(mk, pad_value=False, **look_around_kwargs)\n mask = (mq[:, :, :, None] * mk[:, :, None, :])\n mask = merge_dims(0, 1, expand_dim(mask, 1, h))\n dots.masked_fill_(~mask, mask_value)\n del mask\n\n attn = dots.softmax(dim=-1)\n attn = self.dropout(attn)\n\n out = torch.einsum('bhij,bhje->bhie', attn, bv)\n out = out.reshape(*shape)\n return out\n\n# kmeans related function and class\n\ndef update_kmeans_on_backwards(module):\n module.kmean_modules = find_modules(module, Kmeans)\n def hook(_, grad_in, grad_out):\n for m in module.kmean_modules:\n m.update()\n\n return module.register_backward_hook(hook)\n\ndef similarity(x, means):\n return torch.einsum('bhld,hcd->bhlc', x, means)\n\ndef dists_and_buckets(x, means):\n dists = similarity(x, means)\n _, buckets = torch.max(dists, dim=-1)\n return dists, buckets\n\ndef batched_bincount(index, num_classes, dim=-1):\n shape = list(index.shape)\n shape[dim] = num_classes\n out = index.new_zeros(shape)\n out.scatter_add_(dim, index, torch.ones_like(index, dtype=index.dtype))\n return out\n\ndef kmeans_iter(x, means, buckets = None):\n b, h, l, d, dtype, num_clusters = *x.shape, x.dtype, means.shape[1]\n\n if buckets is None:\n _, buckets = dists_and_buckets(x, means)\n\n bins = batched_bincount(buckets, num_clusters).sum(0, keepdim=True)\n zero_mask = bins.long() == 0\n\n means_ = buckets.new_zeros(b, h, num_clusters, d, dtype=dtype)\n means_.scatter_add_(-2, expand_dim(buckets, -1, d), x)\n means_ = F.normalize(means_.sum(0, keepdim=True), dim=-1).type(dtype)\n\n means = torch.where(zero_mask.unsqueeze(-1), means, means_)\n means = means.squeeze(0)\n return means\n\ndef distribution(dists, window_size):\n _, topk_indices = dists.topk(k=window_size, dim=-2)\n indices = topk_indices.transpose(-2, -1)\n return indices.reshape(*indices.size()[:2], -1)\n\nclass Kmeans(nn.Module):\n def __init__(self, num_heads, head_dim, num_clusters, ema_decay = 0.999, commitment = 1e-4):\n super().__init__()\n self.commitment = commitment\n self.ema_decay = ema_decay\n\n self.register_buffer('means', torch.randn(num_heads, num_clusters, head_dim))\n self.register_buffer('initted', torch.tensor(False))\n self.num_new_means = 0\n self.new_means = None\n\n @torch.no_grad()\n def init(self, x):\n if self.initted:\n return\n _, h, _, d, device, dtype = *x.shape, x.device, x.dtype\n\n num_clusters = self.means.shape[1]\n\n means = x.transpose(0, 1).contiguous().view(h, -1, d)\n num_samples = means.shape[1]\n\n if num_samples >= num_clusters:\n indices = torch.randperm(num_samples, device=device)[:num_clusters]\n else:\n indices = torch.randint(0, num_samples, (num_clusters,), device=device)\n\n means = means[:, indices]\n\n for _ in range(KMEAN_INIT_ITERS):\n means = kmeans_iter(x, means)\n\n self.num_new_means = 0\n self.means.data.copy_(means)\n self.initted.data.copy_(torch.tensor(True))\n\n @torch.no_grad()\n def update(self, new_means = None):\n new_means = default(new_means, self.new_means)\n assert new_means is not None, 'new kmeans has not been supplied'\n ema_inplace(self.means, new_means, self.ema_decay)\n\n del self.new_means\n self.new_means = None\n self.num_new_means = 0\n\n def forward(self, x, update_means = False):\n self.init(x)\n\n b, dtype = x.shape[0], x.dtype\n means = self.means.type(dtype)\n x = F.normalize(x, 2, dim=-1).type(dtype)\n\n with torch.no_grad():\n dists, buckets = dists_and_buckets(x, means)\n\n routed_means = batched_index_select(expand_dim(means, 0, b), buckets)\n loss = F.mse_loss(x, routed_means) * self.commitment\n\n if update_means:\n with torch.no_grad():\n means = kmeans_iter(x, means, buckets)\n self.new_means = ema(self.new_means, means, self.num_new_means / (self.num_new_means + 1))\n self.num_new_means += 1\n\n return dists, loss\n\n# kmeans attention class\n\nclass KmeansAttention(nn.Module):\n def __init__(self, num_clusters, window_size, num_heads, head_dim, causal = False, dropout = 0., ema_decay = 0.999, commitment = 1e-4, context_window_size = None, receives_context = False, num_mem_kv = 0, shared_qk = False):\n super().__init__()\n self.num_heads = num_heads\n self.num_clusters = num_clusters\n self.head_dim = head_dim\n\n self.window_size = window_size\n self.context_window_size = default(context_window_size, window_size)\n self.causal = causal\n\n self.shared_qk = shared_qk\n self.receives_context = receives_context\n self.kmeans = Kmeans(num_heads, head_dim, num_clusters, ema_decay, commitment)\n self.dropout = nn.Dropout(dropout)\n\n self.num_mem_kv = max(num_mem_kv, 1 if causal and not shared_qk else 0)\n self.mem_key = nn.Parameter(torch.randn(num_heads, num_clusters, self.num_mem_kv, head_dim))\n self.mem_value = nn.Parameter(torch.randn(num_heads, num_clusters, self.num_mem_kv, head_dim))\n\n def forward(self, q, k, v, query_mask = None, key_mask = None, **kwargs):\n b, h, t, d, kv_t, wsz, c_wsz, nc, device, dtype = *q.shape, k.shape[2], self.window_size, self.context_window_size, self.num_clusters, q.device, q.dtype\n is_reverse = kwargs.pop('_reverse', False)\n\n out = torch.zeros_like(q, dtype=dtype)\n\n update_kmeans = self.training and not is_reverse\n \n key_mask = default(key_mask, query_mask) if not self.receives_context else key_mask\n kv_wsz = wsz if not self.receives_context else c_wsz\n\n wsz = min(wsz, t)\n kv_wsz = min(kv_wsz, kv_t)\n\n if not self.shared_qk or self.receives_context:\n dists, aux_loss = self.kmeans(torch.cat((q, k), dim=2), update_kmeans)\n q_dists, k_dists = split_at_index(2, t, dists)\n indices = distribution(q_dists, wsz)\n kv_indices = distribution(k_dists, kv_wsz)\n else:\n dists, aux_loss = self.kmeans(q, update_kmeans)\n k = F.normalize(k, dim=-1).to(q)\n indices = distribution(dists, wsz)\n kv_indices = indices\n\n q = batched_index_select(q, indices)\n k = batched_index_select(k, kv_indices)\n v = batched_index_select(v, kv_indices)\n\n reshape_with_window = lambda x: x.reshape(b, h, nc, -1, d)\n q, k, v = map(reshape_with_window, (q, k, v))\n\n m_k, m_v = map(lambda x: expand_dim(x, 0, b).to(q), (self.mem_key, self.mem_value))\n k, v = map(lambda x: torch.cat(x, dim=3), ((m_k, k), (m_v, v)))\n\n dots = torch.einsum('bhnid,bhnjd->bhnij', q, k) * (d ** -0.5)\n\n mask_value = max_neg_value(dots)\n\n if query_mask is not None or key_mask is not None:\n query_mask = default(query_mask, lambda: torch.ones((b, t), device=device).bool())\n key_mask = default(key_mask, lambda: torch.ones((b, kv_t), device=device).bool())\n\n q_mask = expand_dim(query_mask, 1, h).gather(2, indices)\n kv_mask = expand_dim(key_mask, 1, h).gather(2, kv_indices)\n q_mask, kv_mask = map(lambda t: t.reshape(b, h, nc, -1), (q_mask, kv_mask))\n mask = q_mask[:, :, :, :, None] * kv_mask[:, :, :, None, :]\n mask = F.pad(mask, (self.num_mem_kv, 0), value=True)\n dots.masked_fill_(~mask, mask_value)\n del mask\n\n if self.causal:\n q_mask, kv_mask = map(lambda t: t.reshape(b, h, nc, -1), (indices, kv_indices))\n mask = q_mask[:, :, :, :, None] >= kv_mask[:, :, :, None, :]\n mask = F.pad(mask, (self.num_mem_kv, 0), value=True)\n dots.masked_fill_(~mask, mask_value)\n del mask \n\n if self.shared_qk:\n q_mask, kv_mask = map(lambda t: t.reshape(b, h, nc, -1), (indices, kv_indices))\n mask = q_mask[:, :, :, :, None] == kv_mask[:, :, :, None, :]\n mask = F.pad(mask, (self.num_mem_kv, 0), value=False)\n dots.masked_fill_(mask, TOKEN_SELF_ATTN_VALUE)\n del mask\n\n dots = dots.softmax(dim=-1)\n dots = self.dropout(dots)\n\n bo = torch.einsum('bhcij,bhcjd->bhcid', dots, v)\n so = torch.reshape(bo, (b, h, -1, bo.shape[-1])).type(dtype)\n out = scatter_mean(out, so, indices.unsqueeze(-1).expand_as(so), -2)\n return out, aux_loss\n\n# feedforward\n\nclass GELU_(nn.Module):\n def forward(self, x):\n return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n\nGELU = nn.GELU if hasattr(nn, 'GELU') else GELU_\n\nclass FeedForward(nn.Module):\n def __init__(self, dim, mult = 4, dropout = 0., activation = None, glu = False):\n super().__init__()\n activation = default(activation, GELU)\n\n self.glu = glu\n self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1))\n self.act = activation()\n self.dropout = nn.Dropout(dropout)\n self.w2 = nn.Linear(dim * mult, dim)\n\n def forward(self, x, **kwargs):\n if not self.glu:\n x = self.w1(x)\n x = self.act(x)\n else:\n x, v = self.w1(x).chunk(2, dim=-1)\n x = self.act(x) * v\n\n x = self.dropout(x)\n x = self.w2(x)\n return x\n\n# self attention\n\nclass SelfAttention(nn.Module):\n def __init__(self, dim, depth, max_seq_len, heads, local_attn_heads, window_size, local_attn_window_size = None, causal = False, attn_dropout = 0., dropout = 0., kmeans_ema_decay = 0.999, commitment_factor = 1e-4, receives_context = False, context_window_size = None, rel_pos_emb = True, num_mem_kv = 0, shared_qk = False):\n super().__init__()\n assert (dim % heads) == 0, 'hidden dimension must be divisible by number of heads'\n assert (max_seq_len % window_size) == 0, 'maximum sequence length must be divisible by the target window size'\n assert local_attn_heads <= heads, 'number of local attention heads must be less than total heads'\n assert not (receives_context and local_attn_heads > 0), 'local attention cannot be used for self attention with context'\n assert not (receives_context and causal), 'contextual attention layer cannot be causal'\n\n local_attn_window_size = default(local_attn_window_size, window_size // 2)\n context_window_size = default(context_window_size, window_size)\n\n self.shared_qk = shared_qk\n self.receives_context = receives_context\n self.heads = heads\n self.local_attn_heads = local_attn_heads\n self.global_attn_heads = heads - local_attn_heads\n\n self.window_size = window_size\n\n head_dim = dim // heads\n num_clusters = max_seq_len // window_size\n\n if self.local_attn_heads > 0:\n self.local_attn = LocalAttention(local_attn_window_size, local_attn_heads, head_dim, causal = True, dropout = attn_dropout, rel_pos_emb = rel_pos_emb, shared_qk = shared_qk)\n\n if self.global_attn_heads > 0:\n self.global_attn = KmeansAttention(num_clusters, window_size, self.global_attn_heads, head_dim, causal = causal, dropout = attn_dropout, ema_decay = kmeans_ema_decay, commitment = commitment_factor, receives_context = receives_context, num_mem_kv = num_mem_kv, shared_qk = shared_qk)\n\n self.to_q = nn.Linear(dim, dim, bias = False)\n self.to_v = nn.Linear(dim, dim, bias = False)\n self.to_out = nn.Linear(dim, dim, bias = False)\n\n if not self.shared_qk:\n self.to_k = nn.Linear(dim, dim, bias = False)\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, context = None, input_mask = None, context_mask = None, **kwargs):\n assert not (self.receives_context and context is None), 'context must be passed if self attention is set to receive context'\n b, t, e, h = *x.shape, self.heads\n head_dim = e // self.heads\n\n split_heads = lambda v: v.reshape(b, -1, h, head_dim).transpose(1, 2).contiguous()\n\n kv_input = x if not self.receives_context else context\n\n q, v = self.to_q(x), self.to_v(kv_input)\n\n if not self.shared_qk:\n k = self.to_k(kv_input)\n else:\n k = self.to_q(kv_input) if self.receives_context else q\n\n q, k, v = map(split_heads, (q, k, v))\n\n split_index_fn = partial(split_at_index, 1, self.local_attn_heads)\n (lq, q), (lk, k), (lv, v) = map(split_index_fn, (q, k, v))\n has_local, has_global = map(lambda x: x.shape[1] > 0, (lq, q))\n\n out = []\n total_loss = torch.tensor(0., requires_grad=True, **to(x))\n\n if has_local:\n local_out = self.local_attn(lq, lk, lv, input_mask = input_mask)\n out.append(local_out)\n\n if has_global:\n global_out, loss = self.global_attn(q, k, v, query_mask = input_mask, key_mask = context_mask)\n total_loss = total_loss + loss\n\n out.append(global_out)\n\n out = torch.cat(out, dim=1)\n out = out.reshape(b, h, t, -1).transpose(1, 2).reshape(b, t, -1)\n out = self.to_out(out)\n return self.dropout(out), total_loss\n\nclass RoutingTransformer(nn.Module):\n def __init__(self, dim, depth, max_seq_len, heads = 8, window_size = 64, local_attn_window_size = None, causal = False, weight_tie = False, attn_dropout = 0., ff_dropout = 0., attn_layer_dropout = 0., layer_dropout = 0., n_local_attn_heads = 0, ff_glu = False, reversible = False, ff_chunks = 1, kmeans_ema_decay = 0.999, commitment_factor = 1e-4, receives_context = False, context_window_size = None, _register_kmeans_update = False, rel_pos_emb = True, pkm_layers = tuple(), pkm_num_keys = 128, num_mem_kv = 0, shared_qk = None, context_shared_qk = False):\n super().__init__()\n shared_qk = default(shared_qk, causal) # default to shared qk when causal, due to experimental results\n\n local_attn_window_size = default(local_attn_window_size, window_size // 2)\n if type(n_local_attn_heads) is not tuple:\n n_local_attn_heads = tuple([n_local_attn_heads] * depth)\n\n assert len(n_local_attn_heads) == depth, 'local attention heads tuple must have the same length as the depth'\n assert all([(local_heads <= heads) for local_heads in n_local_attn_heads]), 'number of local attn heads must be less than the maximum number of heads'\n\n layers = nn.ModuleList([])\n fn_wrapper = partial(PreNorm, dim)\n\n get_attn = lambda local_heads: SelfAttention(dim, depth, max_seq_len, heads, local_heads, window_size, causal = causal, local_attn_window_size = local_attn_window_size, attn_dropout = attn_dropout, dropout = attn_layer_dropout, kmeans_ema_decay = kmeans_ema_decay, commitment_factor = commitment_factor, rel_pos_emb = rel_pos_emb, num_mem_kv = num_mem_kv, shared_qk = shared_qk)\n get_ff = lambda: Chunk(ff_chunks, FeedForward(dim, dropout = ff_dropout, glu = ff_glu), along_dim=1)\n get_context_attn = lambda: SelfAttention(dim, depth, max_seq_len, heads, 0, window_size, local_attn_window_size = local_attn_window_size, attn_dropout = attn_dropout, dropout = attn_layer_dropout, kmeans_ema_decay = kmeans_ema_decay, commitment_factor = commitment_factor, receives_context = True, context_window_size = context_window_size, num_mem_kv = num_mem_kv, shared_qk = context_shared_qk)\n get_context_ff = lambda: Chunk(ff_chunks, FeedForward(dim, dropout = ff_dropout, glu = ff_glu), along_dim=1)\n get_pkm = lambda: PKM(dim, num_keys = pkm_num_keys)\n\n if weight_tie:\n assert len(set(n_local_attn_heads)) == 1, 'you can only weight tie if number of local attention heads for all layers is the same'\n get_attn, get_ff, get_context_attn, get_context_ff, get_pkm = map(cache_fn, (get_attn, get_ff, get_context_attn, get_context_ff, get_pkm))\n\n for ind, local_heads in zip(range(depth), n_local_attn_heads):\n layer = ind + 1\n use_ff = layer not in cast_tuple(pkm_layers)\n get_parallel_fn = get_ff if use_ff else get_pkm\n\n layers.append(nn.ModuleList([\n fn_wrapper(get_attn(local_heads)),\n fn_wrapper(get_parallel_fn())\n ]))\n\n if not receives_context:\n continue\n\n layers.append(nn.ModuleList([\n fn_wrapper(get_context_attn()),\n fn_wrapper(get_context_ff())\n ]))\n\n execute_type = ReversibleSequence if reversible else SequentialSequence\n\n attn_context_layer = ((True, False),) if receives_context else tuple()\n route_attn = ((True, False), *attn_context_layer) * depth\n route_context = ((False, False), *attn_context_layer) * depth\n\n context_route_map = {'context': route_context, 'context_mask': route_context} if receives_context else {}\n attn_route_map = {'input_mask': route_attn}\n self.layers = execute_type(layers, args_route = {**attn_route_map, **context_route_map}, layer_dropout = layer_dropout)\n\n if _register_kmeans_update:\n update_kmeans_on_backwards(self)\n\n has_local_attn = any([num > 0 for num in n_local_attn_heads])\n self.pad_to_multiple = local_attn_window_size if has_local_attn else 0\n\n def forward(self, x, **kwargs):\n x, loss = self.layers(x, **kwargs)\n return x, loss\n\nclass RoutingTransformerLM(nn.Module):\n def __init__(self, num_tokens, dim, depth, max_seq_len, heads = 8, window_size = 64, local_attn_window_size = None, causal = False, emb_dim = None, weight_tie = False, attn_dropout = 0., ff_dropout = 0., attn_layer_dropout = 0., layer_dropout = 0., ff_mult = 4, ff_activation = None, ff_glu = False, return_embeddings = False, n_local_attn_heads = 0, reversible = False, ff_chunks = 1, kmeans_ema_decay = 0.999, commitment_factor = 1e-4, receives_context = False, context_window_size = None, rel_pos_emb = True, _register_kmeans_update = True, pkm_layers = tuple(), pkm_num_keys = 128, num_mem_kv = 0, shared_qk = None, context_shared_qk = False):\n super().__init__()\n assert (max_seq_len % window_size) == 0, 'max sequence length must be divisible by the window size, to calculate number of kmeans cluster'\n emb_dim = default(emb_dim, dim)\n self.max_seq_len = max_seq_len\n\n self.token_emb = nn.Embedding(num_tokens, emb_dim)\n self.axial_pos_emb = AxialPositionalEmbedding(emb_dim, axial_shape=(max_seq_len // window_size, window_size))\n self.routing_transformer = RoutingTransformer(dim, depth, max_seq_len, heads = heads, window_size = window_size, local_attn_window_size = local_attn_window_size, causal = causal, weight_tie = weight_tie, ff_dropout = ff_dropout, attn_dropout = attn_dropout, attn_layer_dropout = attn_layer_dropout, layer_dropout = layer_dropout, n_local_attn_heads = n_local_attn_heads, ff_glu = ff_glu, reversible = reversible, ff_chunks = ff_chunks, kmeans_ema_decay = kmeans_ema_decay, receives_context = receives_context, context_window_size = context_window_size, rel_pos_emb = rel_pos_emb, pkm_layers = pkm_layers, pkm_num_keys = pkm_num_keys, num_mem_kv = num_mem_kv, shared_qk = shared_qk, context_shared_qk = context_shared_qk, _register_kmeans_update = _register_kmeans_update)\n\n if emb_dim != dim:\n self.routing_transformer = ProjectInOut(self.routing_transformer, emb_dim, dim, project_out = not return_embeddings)\n\n self.out = nn.Linear(emb_dim, num_tokens) if not return_embeddings else identity\n\n def forward(self, x, **kwargs):\n x = self.token_emb(x)\n x = x + self.axial_pos_emb(x)\n x, loss = self.routing_transformer(x, **kwargs)\n return self.out(x), loss\n","sub_path":"routing_transformer/routing_transformer.py","file_name":"routing_transformer.py","file_ext":"py","file_size_in_byte":27337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"312323302","text":"\"\"\"\nUniversaly HSV range is ([0-359,0-100,0-100])\nBut in openCV HSV range is ([0-179,0-255,0-255])\nSo we need to normalize it\n1. For H we just need to divide by 2.\n Example-340 in U-HSV will be 170 in cv-HSV\n2. For S and V we need to cv-HSV value = U-HSV(%) * 2.55\n Example-55% in U-HSV will be = 55 * 2.55 = 140.25 ~ 140\n\"\"\"\n\n\n\nimport cv2\nimport numpy as np\nimport os\n\npath='/home/pranjal/Desktop/RM/RM-Coding-kids/Pranjal/OpenCV/Object_Filtering'\ncap=cv2.VideoCapture(0)\nwhile True:\n ret,frame=cap.read()\n frame=cv2.flip(frame,1)\n blur = cv2.GaussianBlur(frame,(11,11),0)\n HSV_frame =cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)\n\n\n#For Blue Screw driver head\n #low_blue=np.array([90,80,50]) #just making an array\n #high_blue=np.array([120,255,255])\n\n#For yellow cap\n low_orange=np.array([20,100,100]) #just making an array\n high_orange=np.array([30,255,255])\n\n mask=cv2.inRange(HSV_frame,low_orange,high_orange) #Thresholding within limits\n\n kernel = np.ones((5,5),np.uint8)\n img_erosion = cv2.erode(mask, kernel, iterations=1)\n img_dilation = cv2.dilate(img_erosion, kernel, iterations=1)\n\n\n res = cv2.bitwise_and(frame,frame,mask= img_dilation)\n#Contouring or outlining\n (cnts,_) = cv2.findContours(img_dilation.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n #cv2.drawContours(res,cnts,-1,(0,200,0),3)\n\n for c in cnts:\n # get the bounding rect\n x, y, w, h = cv2.boundingRect(c)\n # draw a green rectangle to visualize the bounding rect\n cv2.rectangle(res, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n cv2.imshow('C',res)\n var=cv2.waitKey(1)\n if var == ord('q'):\n break\n \ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"Second_Years/Pranjal/OpenCV/Object_Filtering/Filter_objects_HSV.py","file_name":"Filter_objects_HSV.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"382815989","text":"#Challenge 2\n#Created by: Zach Golik\n\nword1=('the')\nword2=('cat')\nword3=('sat')\nword4=('on')\nword5=('the')\nword6=('mat')\n\nprint(word1, word2, word3, word4, word5, word6)\n#Completed successfully? Yes\n#Did you have any errors? Yes, simple logic errors\n#How did you solve them? Made it simpler\n#What did yo ufind difficult? Nothing\n","sub_path":"Challenge 2.py","file_name":"Challenge 2.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"639612369","text":"import os\nimport re\nimport logging\nfrom datetime import datetime, timedelta\nfrom dateutil.parser import parse\nfrom todoist.api import TodoistAPI\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef get_token():\n token = os.getenv('TODOIST_APIKEY')\n return token\n\n\ndef get_project(api):\n project = os.getenv('TODOIST_PROJECT')\n if not project:\n return None\n for p in api.state['projects']:\n if p['name'] == project:\n return p['id']\n\n\ndef is_habit(text):\n return re.search(r'\\[day\\s(\\d+)\\]', text)\n\n\ndef update_streak(item, streak):\n days = '[day {}]'.format(streak)\n text = re.sub(r'\\[day\\s(\\d+)\\]', days, item['content'])\n item.update(content=text)\n\n\ndef main():\n API_TOKEN = get_token()\n today = datetime.utcnow().replace(tzinfo=None)\n if not API_TOKEN:\n logging.error('Please set the API token in environment variable.')\n exit()\n api = TodoistAPI(API_TOKEN)\n api.sync()\n project_id = get_project(api)\n tasks = api.state['items']\n for task in tasks:\n content = task['content']\n if all([\n task['due_date_utc'],\n is_habit(content),\n not project_id or task['project_id'] == project_id\n ]):\n logger.info(\"Found task id:%s content:%s\", task['id'], content[:20])\n date_string = task['date_string'] or 'ev day'\n task_id = task['id']\n due_at = parse(task['due_date_utc'], ignoretz=True)\n days_left = due_at.date() - today.date()\n if days_left:\n habit = is_habit(content)\n streak = int(habit.group(1)) + 1\n update_streak(task, streak)\n api.notes.add(task_id, '[BOT] Streak extended. Yay!')\n else:\n update_streak(task, 0)\n task.update(date_string=date_string + ' starting tod')\n api.notes.add(task_id, '[BOT] Chain broken :(')\n api.commit()\n\nif __name__ == '__main__':\n main()\n","sub_path":"habits.py","file_name":"habits.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"17724173","text":"__author__ = 'ugrend'\nfrom apscheduler.scheduler import Scheduler\n\nclass scheduler():\n\n\n def checkCouchPotatoStatus(self):\n from movielist.core.couchpotato import CouchPotato\n cp = CouchPotato(self.settings)\n cp.checkAllStatus()\n\n def getMoviePone(self):\n from movielist.core.datamining.moviefone import moviepone\n mp = moviepone()\n mp.insertFone()\n for m in range (1,13):\n mp.insertFone(m)\n\n\n def __init__(self,settings):\n self.settings = settings\n self.sched = Scheduler()\n self.sched.start()\n self.sched\n\n\n\n\n\n\n\n","sub_path":"movielist/core/scheduler/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"597051910","text":"# -*- coding: utf-8 -*-\n\n# Python's Libraries\nfrom __future__ import unicode_literals\n\n# Django's Libraries\nfrom django.db import models\n\n# Third-party Libraries\nfrom django_resized import ResizedImageField\n\n# Own's Libraries\nfrom .utilities import Helper\nfrom security.models import Profile\n\n\nclass Post(models.Model):\n\n STATUS = (\n ('PUB', 'PUBLICADO'),\n ('EDT', 'Editando'),\n )\n\n title = models.CharField(\"Titulo\", max_length=120)\n image = ResizedImageField(\n \"Imagen\",\n upload_to=Helper.get_ImagePath_Post,\n quality=75,\n blank=True,\n validators=[\n Helper.validate_Img_Extension,\n Helper.validate_Size\n ]\n )\n content = models.TextField(\"Contenido\", blank=True)\n\n status = models.CharField(\n \"Estado\",\n max_length=3,\n choices=STATUS,\n default=\"EDT\"\n )\n created_by = models.ForeignKey(\n Profile,\n verbose_name=\"Creado por\",\n related_name='post_created_by',\n null=True\n )\n created_date = models.DateTimeField(auto_now=False, auto_now_add=True)\n updated_by = models.ForeignKey(\n Profile,\n verbose_name=\"Actualizado por\",\n related_name='post_updated_by',\n null=True\n )\n updated_date = models.DateTimeField(auto_now=True, auto_now_add=False)\n\n def __unicode__(self):\n return self.title\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = 'Pubicación'\n verbose_name_plural = 'Publicaciones'\n","sub_path":"editorial/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"193314604","text":"from path import Path\nimport os\n\nroot = Path('.')\nf_root = open(root / 'README.md', 'w')\nf_root.write('## Introduction' + os.linesep + os.linesep)\nf_root.close()\nf_summary = open(root / 'SUMMARY.md', 'w')\nf_summary.write('# Introduction' + os.linesep + os.linesep + '* [Introduction](README.md)' + os.linesep)\nf_summary.close()\nf_root = open(root / 'README.md', 'a')\nf_summary = open(root / 'SUMMARY.md', 'a')\nfor dir in root.dirs():\n if dir.stem != '_book' and dir.stem != '.git':\n f = open(dir / 'README.md', 'w')\n f.write('## ' + dir.stem.replace('-', ' ').upper() + os.linesep + os.linesep)\n f.close()\n f_summary.write(os.linesep + '# ' + dir.stem.replace('-', ' ').upper() + os.linesep + os.linesep)\n f_root.write('* [' + dir.stem.replace('-', ' ').upper() + ']' + '(' + dir.stem + '/README.md)' + os.linesep)\n f_summary.write('* [' + dir.stem.replace('-', ' ').upper() + ']' + '(' + dir.stem + '/README.md)' + os.linesep)\n for file in dir.files():\n print(file)\n if file.stem != 'img' and file.stem != 'README':\n f = open(dir / 'README.md', 'a')\n f.write('* [' + file.stem.replace('-', ' ') + '](' + file.basename() + ')' + os.linesep)\n f_summary.write(' * [' + file.stem.replace('-', ' ') + '](' + dir.stem + '/' + file.basename() + ')' + os.linesep)\n\nf_root.close()\nf_summary.close()\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"594091847","text":"import numpy as np\nimport cv2 as cv\nfrom matplotlib import pyplot as plt\nimport skimage.measure\nfrom skimage import io\nfrom settings import *\n\npath = './img/enemy1/sample/'\n\n\ndef Capture():\n game = game_default(False)\n \n game.init()\n tempPicture = game.get_state().screen_buffer\n \n #plt.imshow(tempPicture)\n #plt.show()\n\n game.close()\n saveImg(tempPicture)\n \n print('finish!')\ndef ORB(img):\n #Initiate ORB detector\n orb = cv.ORB_create()\n # find the keypoints with ORB\n kp = orb.detect(img,None)\n # compute the descriptors with ORB\n kp, des = orb.compute(img, kp)\n # draw only keypoints location,not size and orientation\n img2 = cv.drawKeypoints(img, kp, None, color=(0,255,0), flags=0)\n #plt.imshow(img2,cmap='gray'), plt.show()\n #plt is show bgr, you need change to rgb\n plt.axis(\"off\")\n plt.imshow(img2),plt.show()\n #plt.imshow(cv.cvtColor(img2, cv.COLOR_BGR2RGB)), plt.show()\n\ndef orb_compare():\n \n \n \n img1 = cv.imread(path+'e1_rgb.png')\n img2 = cv.imread(path+'7.jpg')\n #gray\n gray1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY)\n gray2 = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)\n \n orb = cv.ORB_create()\n\n kp1 = orb.detect(img1,None)\n kp2 = orb.detect(img2,None)\n kp1, des1 = orb.compute(img, kp1)\n kp2, des2 = orb.compute(img, kp2)\n\n bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)\n matches = bf.match(des1, des2)\n matches = sorted(matches, key=lambda x: x.distance)\n img3 = cv.drawMatches(img1, kp1, img2, kp2, matches[:10], None, flags=2)\n plt.imshow(cv.cvtColor(img3, cv.COLOR_BGR2RGB)), plt.show()\n\n\ndef maxPooling(X,x,Y,y,img):\n return skimage.measure.block_reduce(img,(x,y),np.max)\n\ndef akaze_compare():\n img1 = cv.imread(path+'e1_rgb.png')\n img2 = cv.imread(path+'7.jpg')\n #gray\n gray1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY)\n gray2 = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)\n akaze = cv.AKAZE_create()\n kp1, des1 = akaze.detectAndCompute(gray1, None)\n kp2, des2 = akaze.detectAndCompute(gray2, None)\n\n bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)\n matches = bf.match(des1, des2)\n matches = sorted(matches, key=lambda x: x.distance)\n img3 = cv.drawMatches(img1, kp1, img2, kp2, matches[:10], None, flags=2)\n plt.imshow(cv.cvtColor(img3, cv.COLOR_BGR2RGB)), plt.show()\n \ndef monster_feature():\n #img = cv.imread(path+'7.jpg')\n img = cv.imread(path+'e1_rgb.png')\n #gray\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n \n blurred = cv.GaussianBlur(gray,(17,25),0)\n canny = cv.Canny(blurred, 30 ,150)\n #orb = cv.ORB_create()\n akaze = cv.AKAZE_create()\n kp, des = akaze.detectAndCompute(blurred, None)\n # draw only keypoints location,not size and orientation\n img2 = cv.drawKeypoints(blurred, kp, None, color=(0,255,0), flags=0)\n \n result = np.hstack([gray, blurred, canny])\n plt.imshow(img2,cmap='gray'), plt.show()\n \n \n blurred = cv.GaussianBlur(gray, (15, 15), 0)\n canny = cv.Canny(blurred, 30, 150)\n\n\n result = np.hstack([gray, blurred, canny])\n plt.imshow(result,cmap='gray'), plt.show()\n\n '''\n akaze = cv.AKAZE_create()\n kp, des = akaze.detectAndCompute(gray, None)\n img2 = cv.drawKeypoints(img, kp, None, color=(0,255,0), flags=0)\n plt.imshow(cv.cvtColor(img2, cv.COLOR_BGR2RGB)), plt.show()\n '''\n\n#Capture()\n#img = cv.imread(path+'4.jpg')\n#ORB(img)\n#orb_compare()\n#akaze_compare()\nmonster_feature()\n'''\ngame = game_default()\ntempPicture = game.get_state().depth_buffer\ngame.init()\ngame.close()'''\n","sub_path":"mywork/objDetect.py","file_name":"objDetect.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"303926625","text":"import gen_core\nimport json\nimport os\n\nunits = gen_core.units\n\ntools = gen_core.tools\n\nbase_directory = gen_core.base_directory\n\nfor i in units:\n with open(base_directory + i) as k:\n unit = json.load(k)\n if \"production\" in unit:\n if \"metal\" in unit[\"production\"]:\n unit[\"production\"][\"metal\"] = unit[\"production\"][\"metal\"] * 2\n if \"storage\" in unit:\n if \"metal\" in unit[\"storage\"]:\n unit[\"storage\"][\"metal\"] = unit[\"storage\"][\"metal\"] * 2\n\n if \"factory_cooldown_time\" in unit:\n unit[\"factory_cooldown_time\"] = unit[\"factory_cooldown_time\"] // 2\n\n if \"navigation\" in unit:\n if \"move_speed\" in unit[\"navigation\"]:\n unit[\"navigation\"][\"move_speed\"] = int(unit[\"navigation\"][\"move_speed\"] * 1.5)\n if \"acceleration\" in unit[\"navigation\"]:\n unit[\"navigation\"][\"acceleration\"] = int(unit[\"navigation\"][\"acceleration\"] * 1.5)\n if \"turn_speed\" in unit[\"navigation\"]:\n unit[\"navigation\"][\"turn_speed\"] = int(unit[\"navigation\"][\"turn_speed\"] * 1.5)\n\n\n if i[0:7] == '/pa_ex1':\n i = '/pa' + i[7:] \n\n try:\n with open('hypa' + i, 'w+') as out:\n c = 0\n except:\n os.makedirs(\"/\".join(('hypa' + i).split(\"/\")[:-1]))\n\n with open('hypa' + i, 'w+') as out:\n json.dump(unit, out)\n\nfor i in tools:\n with open(base_directory + i) as k:\n tool = json.load(k)\n\n if \"construction_demand\" in tool:\n if \"metal\" in tool[\"construction_demand\"]:\n tool[\"construction_demand\"][\"metal\"] = tool[\"construction_demand\"][\"metal\"] * 2\n\n if \"rate_of_fire\" in tool:\n tool[\"rate_of_fire\"] = tool[\"rate_of_fire\"] * 1.5\n if \"pitch_rate\" in tool:\n tool[\"pitch_rate\"] = tool[\"pitch_rate\"] * 1.5\n if \"yaw_rate\" in tool:\n tool[\"yaw_rate\"] = tool[\"yaw_rate\"] * 1.5\n\n if \"ammo_demand\" in tool:\n tool[\"ammo_demand\"] = tool[\"ammo_demand\"] * 1.5 \n\n\n if i[0:7] == '/pa_ex1':\n i = '/pa' + i[7:] \n\n try:\n with open('hypa' + i, 'w+') as out:\n c = 0\n except:\n os.makedirs(\"/\".join(('hypa' + i).split(\"/\")[:-1]))\n\n with open('hypa' + i, 'w+') as out:\n json.dump(tool, out)","sub_path":"gen_hypa.1.py","file_name":"gen_hypa.1.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"26546084","text":"import pandas as pd\nimport numpy as np\nfrom scipy.misc import imread\nfrom keras.layers import Flatten, Dense, Activation, Convolution2D, normalization\nfrom keras.models import Sequential\nimport matplotlib.pyplot as plt\nimport cv2\nfrom sklearn.model_selection import train_test_split\nfrom keras.optimizers import Adam\n\ndef extract_and_transform(df, col, pos):\n #Find and bring in the center picture from its residing folder\n im = imread(df[col][pos].strip())\n #Crop the image to get rid of the trees and landscape\n im = im[60:,:]\n #Resize the image\n im = cv2.resize(im, (200, 66))\n return im\n\ndef normalize(img_batch):\n #Normalize the extracted images\n norm = (np.array(img_batch)/127.5) - 1\n #Convert the image to a floating point number\n return norm.astype(np.float32)\n\ndef load_bottleneck_data(driving_log):\n file = driving_log #driving_log.csv\n #Name all the columns in the dataframe\n cols = ['Center_Image','Left_Image','Right_Image','Steering_Angle','Throttle','Break','Speed']\n df = pd.read_csv(file, names = cols)\n \n #Store all the center images\n X = []\n for i in range(len(df)):\n transformed = extract_and_transform(df, cols[0], i)\n X.append(transformed)\n \n X = normalize(X)\n #Convert the steering angles to a list and set them to y\n y = df[cols[3]].tolist()\n \n #Split the data into testing and training data\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)\n\n return X_train, X_test, y_train, y_test\n\n\n# load bottleneck data\nX_train, X_test, y_train, y_test = load_bottleneck_data('C:/Users/asyed/Downloads/driving_log.csv')\n\n#Reshape all the data for the model architecture\nX_train = np.reshape(X_train, (len(X_train), 66, 200, 3))\nX_test = np.reshape(X_test, (len(X_test), 66, 200, 3))\ny_train = np.reshape(y_train, (len(y_train), 1))\ny_test = np.reshape(y_test, (len(y_test), 1))\n\ninput_shape = X_train.shape[1:]\n#the output is a steering angle (i.e. 1 class)\nnb_classes = 1\n\n\nmodel = Sequential()\nmodel.add(normalization.BatchNormalization())\nmodel.add(Convolution2D(24, 5, 5, subsample=(2,2), border_mode='valid', activation='relu', input_shape=input_shape))\nmodel.add(Convolution2D(36, 5, 5, subsample=(2,2), border_mode='valid', activation='relu'))\nmodel.add(Convolution2D(48, 5, 5, subsample=(2,2), border_mode='valid', activation='relu'))\nmodel.add(Convolution2D(64, 3, 3, subsample=(1,1), border_mode='valid', activation='relu'))\nmodel.add(Convolution2D(64, 3, 3, subsample=(1,1), border_mode='valid', activation='relu'))\nmodel.add(Flatten())\n\n#model.add(Dropout(0.25))\nmodel.add(Dense(1164))\nmodel.add(Activation('relu'))\nmodel.add(Dense(100))\nmodel.add(Activation('relu'))\nmodel.add(Dense(50))\nmodel.add(Activation('relu'))\nmodel.add(Dense(10))\nmodel.add(Activation('relu'))\n#model.add(Dropout(0.5))\nmodel.add(Dense(nb_classes))\nmodel.summary()\nmodel.compile(loss='mse', optimizer=Adam(), metrics=['mean_squared_error'])\n#model.fit(X_train, y_train, nb_epoch=3, batch_size=10, validation_data=(X_test, y_test), shuffle=True)\nhistory = model.fit_generator((X_train, y_train), \n samples_per_epoch=100,\n nb_epoch=3,\n validation_data=(X_test, y_test),\n verbose=1) \n#Output the JSON and H5 file for trying in the driving program provided\nmodel_json = model.to_json()\nwith open(\"C:/Users/asyed/Downloads/model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\nmodel.save_weights(\"C:/Users/asyed/Downloads/model.h5\")\nprint(\"Saved model to disk\")","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"132954785","text":"# Author: Chase Chivers\n# Last updated: 10/28/19\n# Modular build for 2d heat diffusion problem\n# applied to liquid water in the ice shell of Europa\n\nimport numpy as np\nfrom scipy import optimize\nfrom HeatSolver import HeatSolver\n\nclass IceSystem(HeatSolver):\n\t\"\"\"\n\tClass with methods to set up initial conditions for two-dimensional, two-phase thermal diffusion model that\n\tincludes temperature-dependent conductivity and salinity. Includes the HeatSolver class used to solve the heat\n\tequation utilizing an enthalpy method (Huber et al., 2008) to account for latent heat from phase change as well\n\tas a parameterization for a saline system.\n\t\"\"\"\n\tdef __init__(self, Lx, Lz, dx, dz, kT=True, cpT=False, use_X_symmetry=False):\n\t\t\"\"\"\n\t\tInitialize the system.\n\t\tParameters:\n\t\t\tLx : float\n\t\t\t\tlength of horizontal spatial domain, m\n\t\t\tLz : float\n\t\t\t\tthickness of shell, length of vertical spatial domain, m\n\t\t\tdx : float\n\t\t\t\thorizontal spatial step size, m\n\t\t\tdz : float\n\t\t\t\tvertical spatial step size, m\n\t\t\tcpT : bool\n\t\t\t choose whether to use temperature-depedent specific heat,\n\t\t\t default = False.\n\t\t\t True: temperature-dependent, cp_i ~ 185 + 7*T (Hesse et al., 2019)\n\t\t\tkT : bool\n\t\t\t choose whether to use temperature-dependent thermal conductivity,\n\t\t\t default = True, temperature-dependent, k=ac/T (Petrenko, Klinger, etc.)\n\t\t\tuse_X_symmetry : bool\n\t\t\t\tassume the system is symmetric about the center of the intrusion\n\t\t\t\t* NOTE: Must use Reflecting boundary condition for sides if using this\n\t\t\tissalt : bool\n\t\t\t\tdeclare whether salinity will be used in this system, necessary for declaring fit functions and\n\t\t\t\tmelting temperature calculations\n\t\tUsage:\n\t\t\tIce Shell is 40 km thick and 40 km wide at a spatial discretization of 50 m.\n\t\t\t\tmodel = IceSystem(40e3, 40e3, 50, 50)\n\n\t\t\tSee README\n\t\t\"\"\"\n\n\t\tself.Lx, self.Lz = Lx, Lz\n\t\tself.dx, self.dz = dx, dz\n\t\tself.nx, self.nz = int(Lx / dx + 1), int(Lz / dz + 1)\n\t\tself.Z = np.array([j * dz for j in range(self.nz)], dtype=float) # positive down\n\t\tif use_X_symmetry:\n\t\t\tself.symmetric = True\n\t\t\tself.Lx = self.Lx / 2\n\t\t\tself.nx = int(self.Lx / self.dx + 1)\n\t\t\tself.X = np.array([i * dx for i in range(self.nx)], dtype=float)\n\t\t\tself.X, self.Z = np.meshgrid(self.X, self.Z) # create spatial grid\n\t\telif use_X_symmetry is False:\n\t\t\tself.X = np.array([-Lx / 2 + i * dx for i in range(self.nx)], dtype=float) # x domain centered on 0\n\t\t\tself.X, self.Z = np.meshgrid(self.X, self.Z, dtype=float) # create spatial grid\n\t\tself.T = np.zeros((self.nz, self.nx), dtype=float) # initialize domain at one temperature\n\t\tself.S = np.zeros((self.nz, self.nx), dtype=float) # initialize domain with no salt\n\t\tself.phi = np.zeros((self.nz, self.nx), dtype=float) # initialize domain as ice\n\t\tself.kT, self.cpT = kT, cpT # k(T), cp_i(T) I/O\n\t\tself.issalt = False # salt I/O\n\n\tclass constants:\n\t\t\"\"\"\n\t\tNo-methods class used for defining constants in a simulation. May be changed inside here or as an\n\t\tinstance during simulation runs.\n\t\t\"\"\"\n\t\tstyr = 3.154e7 # s/yr, seconds in a year\n\n\t\tg = 1.32 # m/s2, Europa surface gravity\n\n\t\t# Thermal properties\n\t\trho_i = 917. # kg/m3, pure ice density\n\t\trho_w = 1000. # kg/m3 pure water density\n\t\tcp_i = 2.11e3 # J/kgK, pure ice specific heat\n\t\tcp_w = 4.19e3 # J/kgK, pure water specific heat\n\t\tki = 2.3 # W/mK, pure ice thermal conductivity\n\t\tkw = 0.56 # W/mK, pure water thermal conductivity\n\t\tac = 567 # W/m, ice thermal conductivity constant, ki = ac/T (Klinger, 1980)\n\t\tTm = 273.15 # K, pure ice melting temperature at 1 atm\n\t\tLf = 333.6e3 # J/kg, latent heat of fusion of ice\n\t\texpans = 1.6e-4 # 1/K, thermal expansivity of ice\n\n\t\trho_s = 0. # kg/m3, salt density, assigned only when salinity is used\n\n\t\t# Radiation properties\n\t\temiss = 0.97 # pure ice emissivity\n\t\tstfblt = 5.67e-8 # W/m2K4 Stefan-Boltzman constant\n\n\t\t# Constants for viscosity dependent tidal heating\n\t\t# from Mitri & Showman (2005)\n\t\tact_nrg = 26. # activation energy for diffusive regime\n\t\tQs = 60e3 # J/mol, activation energy of ice (Goldsby & Kohlstadt, 2001)\n\t\tRg = 8.3144598 # J/K*mol, gas constant\n\t\teps0 = 1e-5 # maximum tidal flexing strain\n\t\tomega = 2.5e-5 # 1/s, tidal flexing frequency\n\t\tvisc0i = 1e13 # Pa s, minimum reference ice viscosity at T=Tm\n\t\tvisc0w = 1.3e-3 # Pa s, dynamic viscosity of water at 0 K\n\n\t\t# Mechanical properties of ice\n\t\tG = 3.52e9 # Pa, shear modulus/rigidity (Moore & Schubert, 2000)\n\t\tE = 2.66 * G # Pa, Young's Modulus\n\n\tdef save_initials(self):\n\t\t\"\"\" Save initial values to compare with simulation results. \"\"\"\n\t\tself.T_initial = self.T.copy()\n\t\tself.Tm_initial = self.Tm.copy()\n\t\tself.phi_initial = self.phi.copy()\n\t\tself.S_initial = self.S.copy()\n\n\t\tif self.kT:\n\t\t\tself.k_initial = self.phi_initial * self.constants.kw + (1 - self.phi_initial) * \\\n\t\t\t self.constants.ac / self.T_initial\n\t\telse:\n\t\t\tself.k_initial = self.phi_initial * self.constants.kw + (1 - self.phi_initial) * self.constants.ki\n\n\tdef init_volume_averages(self):\n\t\t\"\"\"\n\t\tInitialize volume averaged values over the domain. In practice, this is automatically called by any future\n\t\tfunction that are changing physical parameters such as liquid fraction, salinity or temperature.\n\t\t\"\"\"\n\t\tif self.kT:\n\t\t\tself.k = (1 - self.phi) * (self.constants.ac / self.T) + self.phi * self.constants.kw\n\t\telse:\n\t\t\tself.k = (1 - self.phi) * self.constants.ki + self.phi * self.constants.kw\n\n\t\tif self.cpT == \"GM89\":\n\t\t\t\"Use temperature-dependent specific heat for pure ice from Grimm & McSween 1989\"\n\t\t\tself.cp_i = 185. + 7.037 * self.T\n\t\telif self.cpT == \"CG10\":\n\t\t\t\"Use temperature-dependent specific heat for pure ice from Choukroun & Grasset 2010\"\n\t\t\tself.cp_i = 74.11 + 7.56 * self.T\n\t\telse:\n\t\t\tself.cp_i = self.constants.cp_i\n\n\t\t# this is very unimportant overall\n\t\tif self.issalt:\n\t\t\tself.rhoc = (1 - self.phi) * (self.constants.rho_i + self.Ci_rho * self.S) * self.cp_i \\\n\t\t\t + self.phi * (self.constants.rho_w + self.C_rho * self.S) * self.constants.cp_w\n\n\t\telse:\n\t\t\tself.rhoc = (1 - self.phi) * self.constants.rho_i * self.cp_i \\\n\t\t\t + self.phi * self.constants.rho_w * self.constants.cp_w\n\n\t\tself.save_initials()\n\n\tdef init_T(self, Tsurf, Tbot, profile='non-linear', real_Lz=0):\n\t\t\"\"\"\n\t\tInitialize temperature profile\n\t\t\tParameters:\n\t\t\t\tTsurf : float\n\t\t\t\t\tsurface temperature\n\t\t\t\tTbot : float\n\t\t\t\t\ttemperature at bottom of domain\n\t\t\t\tprofile : string\n\t\t\t\t\t-> defaults to 'non-linear'\n\t\t\t\t\tprescribed temperature profile\n\t\t\t\t\t'non-linear' -- expected equilibrium thermal gradient with k(T)\n\t\t\t\t\t'linear' -- equilibirium thermal gradient for constant k\n\t\t\t\t\t'stefan' -- sets up the freezing stefan problem temperature profile\n\t\t\t\t\t\t\t\t\tin this instance, Tbot should be the melting temperature\n\t\t\t\treal_Lz : float\n\t\t\t\t\tused if you want to simulate some portion of a much larger shell, so this parameter is used to\n\t\t\t\t\tmake the temperature profile that of the much larger shell than the one being simulated.\n\t\t\t\t\tFor example, a 40 km conductive shell (real_Lz = 40e3) discretized at 10 m can be computationally\n\t\t\t\t\texpensive.However, if we assume that any temperature anomaly at shallow depths (~1-5km) won't\n\t\t\t\t\treach to 40km within the model time, we can reduce the computational domain to ~5km to speed up\n\t\t\t\t\tthe simulation. This will take the Tbot as the Tbot of a 40km and find the temperature at 5km to\n\t\t\t\t\taccount for the reduced domain size.\n\t\t\t\t\tUsage case down below\n\t\t\tReturns:\n\t\t\t\tT : (nz,nx) grid\n\t\t\t\t\tgrid of temperature values\n\n\t\t\tUsage :\n\t\t\t\tDefault usage:\n\t\t\t\t\tmodel.init_T(Tsurf=75, Tbot=273.15)\n\n\t\t\t\tLinear profile:\n\t\t\t\t\tmodel.init_T(Tsurf = 50, Tbot = 273.15, profile='linear')\n\n\t\t\t\tCheated domain:\n\t\t\t\t\trealLz = 50e3\n\t\t\t\t\tmodelLz = 5e3\n\t\t\t\t\tmodel = IceSystem(Lz=modelLz, ...)\n\t\t\t\t\tmodel.init_T(Tsurf=110,Tbot=273.15,real_lz=realLz)\n\t\t\"\"\"\n\t\t# set melting temperature to default\n\t\tself.Tm = self.constants.Tm * np.ones(self.X.shape)\n\n\t\tif isinstance(profile, str):\n\t\t\tif profile == 'non-linear':\n\t\t\t\tif real_Lz > 0:\n\t\t\t\t\tTbot = Tsurf * (Tbot / Tsurf) ** (self.Lz / real_Lz)\n\t\t\t\tself.T = Tsurf * (Tbot / Tsurf) ** (abs(self.Z / self.Lz))\n\n\t\t\telif profile == 'linear':\n\t\t\t\tif real_Lz > 0:\n\t\t\t\t\tTbot = (Tbot - Tsurf) * (self.Lz / real_Lz) + Tsurf\n\t\t\t\tself.T = (Tbot - Tsurf) * abs(self.Z / self.Lz) + Tsurf\n\n\t\t\telif profile == 'stefan':\n\t\t\t\tself.T[0, :] = Tsurf # set the very top of grid to surface temperature\n\t\t\t\tself.T[1:, :] = Tbot # everything below is at the melting temperature\n\t\t\t\tself.phi[1:, :] = 1 # everything starts as liquid\n\t\t\t\tprofile += ' plus domain all water'\n\n\t\t\tprint('init_T(Tsurf = {}, Tbot = {})'.format(Tsurf, Tbot))\n\t\t\tprint('\\t Temperature profile initialized to {}'.format(profile))\n\n\t\telse:\n\t\t\tself.T = profile\n\t\t\tprint('init_T: custom profile implemented')\n\n\t\t# save boundaries for dirichlet or other\n\t\t# left and right boundaries\n\t\tself.TtopBC = self.T[0, :]\n\t\tself.TbotBC = self.T[-1, :]\n\t\tself.Tedge = self.T[:, 0] = self.T[:, -1]\n\t\tself.Tsurf = Tsurf\n\t\tself.Tbot = Tbot\n\t\tself.init_volume_averages()\n\n\tdef set_intrusion_geom(self, depth, thickness, radius, geometry='ellipse'):\n\t\t\"\"\"\n\t\tSets geometry of intrusion. In practice, is automatically called by init_intrusion() and generally unneeded\n\t\tto be called in simulation script. Creates tuple IceSystem.geom that holds the initial intrusion grid indices for\n\t\tmanipulation inside simulation and outside for more customization.\n\t\t\"\"\"\n\n\t\tif isinstance(geometry, str):\n\t\t\tif geometry == 'ellipse':\n\t\t\t\tcenter = thickness / 2 + depth\n\t\t\t\ttry:\n\t\t\t\t\tif self.symmetric: # adjust geometry to make sure the center of the intrusion isn't on the boundary\n\t\t\t\t\t\t_R_ = self.X - self.dx\n\t\t\t\t\t\tthickness += self.dz\n\t\t\t\texcept AttributeError:\n\t\t\t\t\t_R_ = self.X\n\t\t\t\tself.geom = np.where((_R_ / radius) ** 2 + (self.Z - center) ** 2 / ((thickness / 2) ** 2) <= 1)\n\t\t\t# del center, _R_\n\t\t\telif geometry == 'box':\n\t\t\t\ttry:\n\t\t\t\t\tif self.symmetric: # adjust geometry to make sure the center of the intrusion isn't on the boundary\n\t\t\t\t\t\tradius += self.dx\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tradius = radius\n\t\t\t\tr = np.where(abs(self.X[0, :]) <= radius)[0]\n\t\t\t\tz = np.intersect1d(np.where(self.Z[:, 0] <= thickness + depth), np.where(self.Z[:, 0] >= depth))\n\t\t\t\ttmp = np.zeros(self.T.shape)\n\t\t\t\ttmp[z.min():z.max(), r.min():r.max() + 1] = 1\n\t\t\t\tself.geom = np.where(tmp == 1)\n\t\t# del tmp, r, z\n\n\t\t# option for a custom geometry\n\t\telse:\n\t\t\tself.geom = geometry\n\n\tdef init_intrusion(self, T, depth, thickness, radius, phi=1, geometry='ellipse'):\n\t\t\"\"\"\n\t\tInitialize intrusion properties. Updates volume averages after initialization: means we can just initialize\n\t\ttemperature and intrusion to get all thermal properties set.\n\t\t**So far this only accounts for a single intrusion at the center of the domain\n\t\t\tshould be simple to add multiples in the future if necessary\n\n\t\tParameters:\n\t\t\tT : float\n\t\t\t\tset intrusion to single Temperature value, assuming that it is well mixed\n\t\t\tdepth : float\n\t\t\t\tset depth of upper edge of the intrusion, m\n\t\t\tthickness : float\n\t\t\t\tset thickness of intrusion, m\n\t\t\tradius : float\n\t\t\t\tset radius of intrusion, m\n\t\t\tphi : float [0,1]\n\t\t\t\tset liquid fraction of intrusion, generally interested in totally liquid bodies so default = 1\n\t\t\tgeometry : string (see set_intrusion_geom()), array\n\t\t\t\tset geometry of intrusion, default is an ellipse\n\n\t\tUsage:\n\t\t\tIntrusion at pure water melting temperature (273.15 K), emplaced at 2 km depth in the shell, 2 km thick\n\t\t\tand a radius of 4 km:\n\t\t\t\tmodel.init_intrusion(T=273.15, depth=2e3, thickness=2e3, radius=4e3)\n\t\t\"\"\"\n\n\t\tif phi < 0 or phi > 1:\n\t\t\traise Exception('liquid fraction must be between 0 and 1')\n\n\t\t# save intrusion properties\n\t\tself.T_int = T\n\t\tself.depth, self.thickness, self.R_int = depth, thickness, radius\n\t\tself.set_intrusion_geom(depth, thickness, radius, geometry) # get chosen geometry\n\t\tself.T[self.geom] = T # set intrusion temperature to chosen temperature\n\t\tself.phi[self.geom] = phi # set intrusion to chosen liquid fraction\n\t\tself.init_volume_averages() # update volume averages\n\n\t# define a bunch of useful functions for salty systems, unused otherwise\n\t# non-linear fit, for larger dT\n\tdef shallow_fit(self, dT, a, b, c, d):\n\t\treturn a + b * (dT + c) * (1 - np.exp(-d / dT)) / (1 + dT)\n\n\t# linear fit, for small dT\n\tdef linear_fit(self, dT, a, b):\n\t\treturn a + b * dT\n\n\t# FREEZCHEM quadratic fit for liquidus curve\n\tdef Tm_func(self, S, a, b, c):\n\t\treturn a * S ** 2 + b * S + c\n\n\tdef init_salinity(self, S=None, composition='MgSO4', concentration=12.3, rejection_cutoff=0.25, shell=False,\n\t in_situ=False, T_match=True):\n\t\t\"\"\"\n\t\tInitialize salinity properties for simulations.\n\t\tParameters:\n\t\t\tS : (nz,nx) grid\n\t\t\t\tNecessary for a custom background salinity or other configurations, e.g. a super saline layer\n\t\t\t\t-> though this could be done outside of this command so....\n\t\t\tcomposition : string\n\t\t\t\tChoose which composition the liquid should be.\n\t\t\t\tOptions: 'MgSO4', 'NaCl'\n\t\t\tconcentration : float\n\t\t\t\tInitial intrusion concentration and/or ocean concentration; if using the shell option (below),\n\t\t\t\tthis assumes that the shell was frozen out of an ocean with this concentration and composition\n\t\t\trejection_cutoff : float > 0\n\t\t\t\tLiquid fraction (phi) below which no more salt will be accepted into the remaining liquid or\n\t\t\t\tinterstitial liquid. Note: should be greater than 0\n\t\t\tshell : bool\n\t\t\t\tOption to include background salinity in the shell given the chosen composition and concentration.\n\t\t\t\tThis will automatically adjust the temperature profile to account for a salty ocean near the melting\n\t\t\t\ttemperature. If assuming something else, such as a slightly cooler convecting layer between the\n\t\t\t\tbrittle shell and the ocean, this can be adjusted afterward by calling init_T()\n\t\t\tin_situ : bool\n\t\t\t\tAssumes the intrusion is from an event that melted the shell in-situ, thus have the same\n\t\t\t\tconcentration and composition as the shell at that depth.\n\t\t\tT_match : bool\n\t\t\t\tOption to adjust the temperature profile to make the bottom be at the melting temperature of an ocean\n\t\t\t\twith the same composition and concentration. This is mostly used if making the assumption that the\n\t\t\t\tbrittle layer simulated here is directly above the ocean.\n\n\t\tUsage:\n\t\t\tPure shell, saline intrusion: Intrusion with 34 ppt NaCl salinity\n\t\t\t\tmodel.init_intrusion(composition='NaCl',concentration=12.3)\n\n\t\t\tSaline shell, in-situ melting: Ocean began with 100 ppt MgSO4 and intrusion has been created by in-situ\n\t\t\tmelting\n\t\t\t\tmodel.init_intrusion(composition='MgSO4', concentration=100., shell=True, in_situ=True)\n\t\t\"\"\"\n\t\tif in_situ == True:\n\t\t\tshell = True\n\n\t\tself.issalt = True # turn on salinity for solvers\n\t\tself.saturated = 0 # whether liquid is saturated\n\t\tself.rejection_cutoff = rejection_cutoff # minimum liquid fraction of cell to accept rejected salt\n\n\t\t# composition and concentration coefficients for fits from Buffo et al. (2019)\n\t\t# others have been calculated by additional runs using the model from Buffo et al. (2019)\n\n\t\t# dict structure {composition: [a,b,c]}\n\t\t# Liquidus curves derived from Liquius 1.0 (Buffo et al. 2019 and FREEZCHEM) for MgSO4 and NaCl\n\t\tself.Tm_consts = {'MgSO4': [-1.333489497e-5, -0.01612951864, 273.055175687],\n\t\t 'NaCl': [-9.1969758e-5, -0.03942059, 272.63617665]\n\t\t }\n\n\t\t# dict structure {composition: {concentration: [a,b,c,d]}}\n\t\tself.shallow_consts = {'MgSO4': {0: [0., 0., 0., 0.],\n\t\t 12.3: [12.21, -8.3, 1.836, 20.2],\n\t\t 100: [22.19, -11.98, 1.942, 21.91],\n\t\t 282: [30.998, -11.5209, 2.0136, 21.1628]},\n\t\t 'NaCl': {0: [0., 0., 0., 0.],\n\t\t 10: [7.662, -4.936, 2.106, 24.8],\n\t\t 34: [11.1, -4.242, 1.91, 22.55],\n\t\t 100: [0., 0., 0., 0.],\n\t\t 260: [0., 0., 0., 0.]}\n\t\t }\n\n\t\t# dict structure {composition: {concentration: [a,b]}}\n\t\tself.linear_consts = {'MgSO4': {0: [0., 0.],\n\t\t 12.3: [1.0375, 0.40205],\n\t\t 100: [5.4145, 0.69992],\n\t\t 282: [14.737, 0.62319]},\n\t\t 'NaCl': {0: [0., 0.],\n\t\t 10: [0.6442, 0.2279],\n\t\t 34: [1.9231, 0.33668],\n\t\t 100: [0., 0.],\n\t\t 260: [0., 0.]}\n\t\t }\n\n\t\t# dict structure {composition: {concentration: [a,b,c]}}\n\t\tself.depth_consts = {'MgSO4': {12.3: [1.0271, -74.0332, -4.2241],\n\t\t 100: [5.38, -135.096, -8.2515],\n\t\t 282: [14.681, -117.429, -5.4962]},\n\t\t 'NaCl': {10: [0., 0., 0.],\n\t\t 34: [1.8523, -72.4049, -10.6679],\n\t\t 100: [0., 0., 0.],\n\t\t 260: [0., 0., 0.]}\n\t\t }\n\n\t\t# create dictionary of root to switch between shallow and linear fits\n\t\t# dict structure {chosen composition: {concentration: root}}\n\t\tself.linear_shallow_roots = {composition: {}}\n\t\tfor key in self.linear_consts[composition]:\n\t\t\tself.linear_shallow_roots[composition][key] = optimize.root(lambda x:\n\t\t\t self.shallow_fit(x, *\n\t\t\t self.shallow_consts[composition][key]) \\\n\t\t\t - self.linear_fit(x, *\n\t\t\t self.linear_consts[composition][key]), 3)['x'][\n\t\t\t\t0]\n\n\t\tself.composition = composition\n\t\tself.concentration = concentration\n\n\t\tif self.composition == 'MgSO4':\n\t\t\t# Liquidus curve derived from Liquius 1.0 (Buffo et al. 2019 and FREEZCHEM) for MgSO4\n\t\t\t# changing from lambda notation to def notation for better pickling?\n\n\t\t\t# def self.Tm_func = lambda S: (-(1.333489497 * 1e-5) * S ** 2) - 0.01612951864 * S + 273.055175687\n\t\t\t# density changes for water w/ concentration of salt below\n\t\t\tself.C_rho = 1.145\n\t\t\tself.Ci_rho = 7.02441855e-01\n\n\t\t\tself.saturation_point = 282. # ppt, saturation concentration of MgSO4 in water\n\t\t\tself.constants.rho_s = 2660. # kg/m^3, density of MgSO4\n\n\t\telif self.composition == 'NaCl':\n\t\t\t# Liquidus curve derived from Liquius 1.0 (Buffo et al. 2019 and FREEZCHEM) for NaCl\n\t\t\t# linear fit for density change due to salinity S\n\t\t\tself.C_rho = 0.8644\n\t\t\tself.Ci_rho = 6.94487270e-01\n\n\t\t\tself.saturation_point = 260. # ppt, saturation concentration of NaCl in water\n\t\t\tself.constants.rho_s = 2160. # kg/m^3, density of NaCl\n\n\t\t# save array of concentrations for chosen composition for entraining salt in ice\n\t\tself.concentrations = np.sort([key for key in self.shallow_consts[composition]])\n\n\t\tif S is not None:\n\t\t\t# method for custom salinity + brine inclusion\n\t\t\tself.S = S\n\t\t\tself.S += self.phi * concentration\n\n\t\tif shell:\n\t\t\t# method for a salinity/depth profile via Buffo et al. 2019\n\t\t\ts_depth = lambda z, a, b, c: a + b / (c - z)\n\t\t\tself.S = s_depth(self.Z, *self.depth_consts[composition][concentration])\n\n\t\t\tif in_situ is False: # for water emplaced in a salty shell\n\t\t\t\tself.S[self.geom] = concentration\n\t\t\telse: # must redistribute the salt evenly to simulate real in-situ melting\n\t\t\t\tprint('Redistributing salt in intrusion')\n\t\t\t\ttry:\n\t\t\t\t\tS_int_tot = self.S[self.geom].sum()\n\t\t\t\t\tself.S[self.geom] = S_int_tot / np.shape(self.geom)[1]\n\t\t\t\t\tif self.S[self.geom].sum() / S_int_tot > 1.0 + 1e-15 or \\\n\t\t\t\t\t\t\tself.S[self.geom].sum() / S_int_tot < 1.0 - 1e-15:\n\t\t\t\t\t\tprint('S_int_new/Si =', self.S[self.geom].sum() / S_int_tot)\n\t\t\t\t\t\traise Exception('problem with salt redistribution')\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tpass\n\t\t\t\tprint('-- New intrusion salinity: {} ppt'.format(self.S[self.geom[0][0], self.geom[1][0]]))\n\n\t\t\t# update temperature profile to reflect bottom boundary condition\n\t\t\tif T_match:\n\t\t\t\tself.Tbot = self.Tm_func(s_depth(self.Lz, *self.depth_consts[composition][concentration]),\n\t\t\t\t *self.Tm_consts[composition])\n\t\t\t\tprint('-- Adjusting temperature profile: Tsurf = {}, Tbot = {}'.format(self.Tsurf, self.Tbot))\n\t\t\t\tself.init_T(Tsurf=self.Tsurf, Tbot=self.Tbot)\n\t\t\telse:\n\t\t\t\tpass\n\n\t\telse:\n\t\t\t# homogeneous brine, pure ice shell\n\t\t\tself.S = self.phi * concentration\n\n\t\t\tif T_match:\n\t\t\t\tself.Tbot = self.Tm_func(concentration, *self.Tm_consts[composition])\n\t\t\t\tprint('--Pure shell; adjusting temperature profile: Tsurf = {}, Tbot = {}'.format(self.Tsurf,\n\t\t\t\t self.Tbot))\n\t\t\t\tself.init_T(Tsurf=self.Tsurf, Tbot=self.Tbot)\n\t\t\telse:\n\t\t\t\tpass\n\n\t\t# update initial melting temperature\n\t\tself.Tm = self.Tm_func(self.S, *self.Tm_consts[composition])\n\t\t# update volume average with included salt\n\t\tself.init_volume_averages()\n\t\t# begin tracking mass\n\t\tself.total_salt = [self.S.sum()]\n\t\t# begin tracking amount of salt removed from system\n\t\tself.removed_salt, self.mass_removed, self.ppt_removed = [0], [0], [0]\n\t\tself.wat_vol = [self.geom[1].shape[0]]\n\n\t\t# update temperature of liquid to reflect salinity\n\t\ttry:\n\t\t\tself.T_int = self.Tm_func(self.S[self.geom], *self.Tm_consts[composition])[0]\n\t\t\tprint('--Updating intrusion temperature to reflect initial salinity, Tint =', self.T_int)\n\t\t\tself.T[self.geom] = self.T_int\n\t\texcept AttributeError:\n\t\t\tpass\n\t\tself.save_initials()\n\n\tdef entrain_salt(self, dT, S, composition):\n\t\t\"\"\"\n\t\tCalculate the amount of salt entrained in newly frozen ice that is dependent on the thermal gradient across\n\t\tthe ice (Buffo et al., in review).\n\t\tParameters:\n\t\t\tdT : float, array\n\t\t\t\ttemperature gradient across cell, or array of temperature gradients\n\t\t\tS : float, array\n\t\t\t\tsalinity (ppt) of newly frozen cell, or array of salinities\n\t\t\tcomposition : string\n\t\t\t\tsalt composition\n\t\t\t\toptions: 'MgSO4', 'NaCl'\n\t\tReturns:\n\t\t\tamount of salt entrained in ice, ppt\n\t\t\tor array of salt entrained in ice, ppt\n\n\t\tUsage:\n\t\t\tSee HeatSolver.update_salinity() function.\n\t\t\"\"\"\n\t\tif isinstance(dT, (int, float)): # if dT (and therefore S) is a single value\n\t\t\tif S in self.shallow_consts[composition]:\n\t\t\t\t# determine whether to use linear or shallow fit\n\t\t\t\tswitch_dT = self.linear_shallow_roots[composition][S]\n\t\t\t\tif dT > switch_dT:\n\t\t\t\t\treturn self.shallow_fit(dT, *self.shallow_consts[composition][S])\n\t\t\t\telif dT <= switch_dT:\n\t\t\t\t\treturn self.linear_fit(dT, *self.linear_consts[composition][S])\n\n\t\t\telse: # salinity not in SlushFund runs\n\t\t\t\t# find which two known concentrations current S fits between\n\t\t\t\tc_min = self.concentrations[S > self.concentrations].max()\n\t\t\t\tc_max = self.concentrations[S < self.concentrations].min()\n\n\t\t\t\t# linearly interpolate between the two concentrations at gradient dT\n\t\t\t\tm, b = np.polyfit([c_max, c_min], [self.entrain_salt(dT, c_max, composition),\n\t\t\t\t self.entrain_salt(dT, c_min, composition)], 1)\n\n\t\t\t\t# return concentration of entrained salt\n\t\t\t\treturn m * S + b\n\n\t\telse: # recursively call this function to fill an array of the same length as input array\n\t\t\treturn np.array([self.entrain_salt(t, s, composition) for t, s in zip(dT, S)], dtype=float)\n","sub_path":"IceSystem.py","file_name":"IceSystem.py","file_ext":"py","file_size_in_byte":23296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"607057984","text":"# Copyright 2018 Xanadu Quantum Technologies Inc.\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"\r\nUnit tests for the :mod:`pennylane.plugin.DefaultGaussian` device.\r\n\"\"\"\r\n# pylint: disable=protected-access,cell-var-from-loop\r\nimport unittest\r\nimport inspect\r\nimport logging as log\r\n\r\nfrom scipy.special import factorial as fac\r\nfrom scipy.linalg import block_diag\r\n\r\nfrom defaults import pennylane as qml, BaseTest\r\n\r\nfrom pennylane import numpy as np\r\n\r\nfrom pennylane.plugins.default_gaussian import fock_prob\r\n\r\nfrom pennylane.plugins.default_gaussian import (rotation, squeezing, quadratic_phase,\r\n beamsplitter, two_mode_squeezing,\r\n controlled_addition, controlled_phase)\r\nfrom pennylane.plugins.default_gaussian import (vacuum_state, coherent_state,\r\n squeezed_state, displaced_squeezed_state,\r\n thermal_state)\r\n\r\nfrom pennylane.plugins.default_gaussian import DefaultGaussian\r\n\r\n\r\nlog.getLogger('defaults')\r\n\r\n\r\nU = np.array([[0.83645892-0.40533293j, -0.20215326+0.30850569j],\r\n [-0.23889780-0.28101519j, -0.88031770-0.29832709j]])\r\n\r\n\r\nU2 = np.array([[-0.07843244-3.57825948e-01j, 0.71447295-5.38069384e-02j, 0.20949966+6.59100734e-05j, -0.50297381+2.35731613e-01j],\r\n [-0.26626692+4.53837083e-01j, 0.27771991-2.40717436e-01j, 0.41228017-1.30198687e-01j, 0.01384490-6.33200028e-01j],\r\n [-0.69254712-2.56963068e-02j, -0.15484858+6.57298384e-02j, -0.53082141+7.18073414e-02j, -0.41060450-1.89462315e-01j],\r\n [-0.09686189-3.15085273e-01j, -0.53241387-1.99491763e-01j, 0.56928622+3.97704398e-01j, -0.28671074-6.01574497e-02j]])\r\n\r\n\r\nH = np.array([[1.02789352, 1.61296440-0.3498192j],\r\n [1.61296440+0.3498192j, 1.23920938+0j]])\r\n\r\n\r\nhbar = 2\r\n\r\ndef prep_par(par, op):\r\n \"Convert par into a list of parameters that op expects.\"\r\n if op.par_domain == 'A':\r\n return [np.diag([x, 1]) for x in par]\r\n return par\r\n\r\n\r\nclass TestAuxillaryFunctions(BaseTest):\r\n \"\"\"Tests the auxillary functions\"\"\"\r\n\r\n def setUp(self):\r\n self.hbar = 2.\r\n\r\n # an arbitrary two-mode Gaussian state generated using Strawberry Fields\r\n self.mu = np.array([0.6862, 0.4002, 0.09, 0.558])*np.sqrt(self.hbar)\r\n self.cov = np.array([[0.50750512, -0.04125979, -0.21058229, -0.07866912],\r\n [-0.04125979, 0.50750512, -0.07866912, -0.21058229],\r\n [-0.21058229, -0.07866912, 0.95906208, 0.27133391],\r\n [-0.07866912, -0.21058229, 0.27133391, 0.95906208]])*self.hbar\r\n\r\n # expected Fock state probabilities\r\n self.events = [(0, 0), (0, 1), (1, 1), (2, 3)]\r\n self.probs = [0.430461524043, 0.163699407559, 0.0582788388927, 0.00167706931355]\r\n\r\n def test_fock_prob(self):\r\n \"\"\"Test fock_prob returns the correct Fock probabilities\"\"\"\r\n for idx, e in enumerate(self.events):\r\n res = fock_prob(self.mu, self.cov, e, hbar=self.hbar)\r\n self.assertAlmostEqual(res, self.probs[idx], delta=self.tol)\r\n\r\n\r\nclass TestGates(BaseTest):\r\n \"\"\"Gate tests.\"\"\"\r\n\r\n def test_rotation(self):\r\n \"\"\"Test the Fourier transform of a displaced state.\"\"\"\r\n # pylint: disable=invalid-unary-operand-type\r\n self.logTestName()\r\n\r\n alpha = 0.23+0.12j\r\n S = rotation(np.pi/2)\r\n\r\n # apply to a coherent state. F{x, p} -> {-p, x}\r\n out = S @ np.array([alpha.real, alpha.imag])*np.sqrt(2*hbar)\r\n expected = np.array([-alpha.imag, alpha.real])*np.sqrt(2*hbar)\r\n self.assertAllAlmostEqual(out, expected, delta=self.tol)\r\n\r\n def test_squeezing(self):\r\n \"\"\"Test the squeezing symplectic transform.\"\"\"\r\n self.logTestName()\r\n\r\n r = 0.543\r\n phi = 0.123\r\n S = squeezing(r, phi)\r\n\r\n # apply to an identity covariance matrix\r\n out = S @ S.T\r\n expected = rotation(phi/2) @ np.diag(np.exp([-2*r, 2*r])) @ rotation(phi/2).T\r\n self.assertAllAlmostEqual(out, expected, delta=self.tol)\r\n\r\n def test_quadratic_phase(self):\r\n \"\"\"Test the quadratic phase symplectic transform.\"\"\"\r\n self.logTestName()\r\n\r\n s = 0.543\r\n S = quadratic_phase(s)\r\n\r\n # apply to a coherent state. P[x, p] -> [x, p+sx]\r\n alpha = 0.23+0.12j\r\n out = S @ np.array([alpha.real, alpha.imag])*np.sqrt(2*hbar)\r\n expected = np.array([alpha.real, alpha.imag+s*alpha.real])*np.sqrt(2*hbar)\r\n self.assertAllAlmostEqual(out, expected, delta=self.tol)\r\n\r\n def test_beamsplitter(self):\r\n \"\"\"Test the beamsplitter symplectic transform.\"\"\"\r\n self.logTestName()\r\n\r\n theta = 0.543\r\n phi = 0.312\r\n S = beamsplitter(theta, phi)\r\n\r\n # apply to a coherent state. BS|a1, a2> -> |ta1-r^*a2, ra1+ta2>\r\n a1 = 0.23+0.12j\r\n a2 = 0.23+0.12j\r\n out = S @ np.array([a1.real, a2.real, a1.imag, a2.imag])*np.sqrt(2*hbar)\r\n\r\n T = np.cos(theta)\r\n R = np.exp(1j*phi)*np.sin(theta)\r\n a1out = T*a1 - R.conj()*a2\r\n a2out = R*a2 + T*a1\r\n expected = np.array([a1out.real, a2out.real, a1out.imag, a2out.imag])*np.sqrt(2*hbar)\r\n self.assertAllAlmostEqual(out, expected, delta=self.tol)\r\n\r\n def test_two_mode_squeezing(self):\r\n \"\"\"Test the two mode squeezing symplectic transform.\"\"\"\r\n self.logTestName()\r\n\r\n r = 0.543\r\n phi = 0.123\r\n S = two_mode_squeezing(r, phi)\r\n\r\n # test that S = B^\\dagger(pi/4, 0) [S(z) x S(-z)] B(pi/4)\r\n B = beamsplitter(np.pi/4, 0)\r\n Sz = block_diag(squeezing(r, phi), squeezing(-r, phi))[:, [0, 2, 1, 3]][[0, 2, 1, 3]]\r\n expected = B.conj().T @ Sz @ B\r\n self.assertAllAlmostEqual(S, expected, delta=self.tol)\r\n\r\n # test that S |a1, a2> = |ta1+ra2, ta2+ra1>\r\n a1 = 0.23+0.12j\r\n a2 = 0.23+0.12j\r\n out = S @ np.array([a1.real, a2.real, a1.imag, a2.imag])*np.sqrt(2*hbar)\r\n\r\n T = np.cosh(r)\r\n R = np.exp(1j*phi)*np.sinh(r)\r\n a1out = T*a1 + R*np.conj(a2)\r\n a2out = T*a2 + R*np.conj(a1)\r\n expected = np.array([a1out.real, a2out.real, a1out.imag, a2out.imag])*np.sqrt(2*hbar)\r\n self.assertAllAlmostEqual(out, expected, delta=self.tol)\r\n\r\n def test_controlled_addition(self):\r\n \"\"\"Test the CX symplectic transform.\"\"\"\r\n self.logTestName()\r\n\r\n s = 0.543\r\n S = controlled_addition(s)\r\n\r\n # test that S = B(theta+pi/2, 0) [S(z) x S(-z)] B(theta, 0)\r\n r = np.arcsinh(-s/2)\r\n theta = 0.5*np.arctan2(-1/np.cosh(r), -np.tanh(r))\r\n Sz = block_diag(squeezing(r, 0), squeezing(-r, 0))[:, [0, 2, 1, 3]][[0, 2, 1, 3]]\r\n\r\n expected = beamsplitter(theta+np.pi/2, 0) @ Sz @ beamsplitter(theta, 0)\r\n self.assertAllAlmostEqual(S, expected, delta=self.tol)\r\n\r\n # test that S[x1, x2, p1, p2] -> [x1, x2+sx1, p1-sp2, p2]\r\n x1 = 0.5432\r\n x2 = -0.453\r\n p1 = 0.154\r\n p2 = -0.123\r\n out = S @ np.array([x1, x2, p1, p2])*np.sqrt(2*hbar)\r\n expected = np.array([x1, x2+s*x1, p1-s*p2, p2])*np.sqrt(2*hbar)\r\n self.assertAllAlmostEqual(out, expected, delta=self.tol)\r\n\r\n def test_controlled_phase(self):\r\n \"\"\"Test the CZ symplectic transform.\"\"\"\r\n self.logTestName()\r\n\r\n s = 0.543\r\n S = controlled_phase(s)\r\n\r\n # test that S = R_2(pi/2) CX(s) R_2(pi/2)^\\dagger\r\n R2 = block_diag(np.identity(2), rotation(np.pi/2))[:, [0, 2, 1, 3]][[0, 2, 1, 3]]\r\n expected = R2 @ controlled_addition(s) @ R2.conj().T\r\n self.assertAllAlmostEqual(S, expected, delta=self.tol)\r\n\r\n # test that S[x1, x2, p1, p2] -> [x1, x2, p1+sx2, p2+sx1]\r\n x1 = 0.5432\r\n x2 = -0.453\r\n p1 = 0.154\r\n p2 = -0.123\r\n out = S @ np.array([x1, x2, p1, p2])*np.sqrt(2*hbar)\r\n expected = np.array([x1, x2, p1+s*x2, p2+s*x1])*np.sqrt(2*hbar)\r\n self.assertAllAlmostEqual(out, expected, delta=self.tol)\r\n\r\n\r\nclass TestStates(BaseTest):\r\n \"\"\"State tests.\"\"\"\r\n\r\n def test_vacuum_state(self):\r\n \"\"\"Test the vacuum state is correct.\"\"\"\r\n self.logTestName()\r\n wires = 3\r\n means, cov = vacuum_state(wires, hbar=hbar)\r\n self.assertAllAlmostEqual(means, np.zeros([2*wires]), delta=self.tol)\r\n self.assertAllAlmostEqual(cov, np.identity(2*wires)*hbar/2, delta=self.tol)\r\n\r\n def test_coherent_state(self):\r\n \"\"\"Test the coherent state is correct.\"\"\"\r\n self.logTestName()\r\n a = 0.432-0.123j\r\n means, cov = coherent_state(a, hbar=hbar)\r\n self.assertAllAlmostEqual(means, np.array([a.real, a.imag])*np.sqrt(2*hbar), delta=self.tol)\r\n self.assertAllAlmostEqual(cov, np.identity(2)*hbar/2, delta=self.tol)\r\n\r\n def test_squeezed_state(self):\r\n \"\"\"Test the squeezed state is correct.\"\"\"\r\n self.logTestName()\r\n r = 0.432\r\n phi = 0.123\r\n means, cov = squeezed_state(r, phi, hbar=hbar)\r\n\r\n # test vector of means is zero\r\n self.assertAllAlmostEqual(means, np.zeros([2]), delta=self.tol)\r\n\r\n R = rotation(phi/2)\r\n expected = R @ np.array([[np.exp(-2*r), 0],\r\n [0, np.exp(2*r)]]) * hbar/2 @ R.T\r\n # test covariance matrix is correct\r\n self.assertAllAlmostEqual(cov, expected, delta=self.tol)\r\n\r\n def test_displaced_squeezed_state(self):\r\n \"\"\"Test the displaced squeezed state is correct.\"\"\"\r\n self.logTestName()\r\n alpha = 0.541+0.109j\r\n a = abs(alpha)\r\n phi_a = np.angle(alpha)\r\n r = 0.432\r\n phi_r = 0.123\r\n means, cov = displaced_squeezed_state(a, phi_a, r, phi_r, hbar=hbar)\r\n\r\n # test vector of means is correct\r\n self.assertAllAlmostEqual(means, np.array([alpha.real, alpha.imag])*np.sqrt(2*hbar), delta=self.tol)\r\n\r\n R = rotation(phi_r/2)\r\n expected = R @ np.array([[np.exp(-2*r), 0],\r\n [0, np.exp(2*r)]]) * hbar/2 @ R.T\r\n # test covariance matrix is correct\r\n self.assertAllAlmostEqual(cov, expected, delta=self.tol)\r\n\r\n def thermal_state(self):\r\n \"\"\"Test the thermal state is correct.\"\"\"\r\n self.logTestName()\r\n nbar = 0.5342\r\n means, cov = thermal_state(nbar, hbar=hbar)\r\n self.assertAllAlmostEqual(means, np.zeros([2]), delta=self.tol)\r\n self.assertTrue(np.all((cov.diag*2/hbar-1)/2 == nbar))\r\n\r\n\r\n\r\nclass TestDefaultGaussianDevice(BaseTest):\r\n \"\"\"Test the default gaussian device. The test ensures that the device is properly\r\n applying gaussian operations and calculating the correct observables.\"\"\"\r\n def setUp(self):\r\n self.dev = DefaultGaussian(wires=2, shots=0, hbar=hbar)\r\n\r\n def test_operation_map(self):\r\n \"\"\"Test that default Gaussian device supports all PennyLane Gaussian CV gates.\"\"\"\r\n self.logTestName()\r\n\r\n non_supported = {'FockDensityMatrix',\r\n 'FockStateVector',\r\n 'FockState',\r\n 'CrossKerr',\r\n 'CatState',\r\n 'CubicPhase',\r\n 'Kerr'}\r\n\r\n self.assertEqual(set(qml.ops.cv.__all__) - non_supported,\r\n set(self.dev._operation_map))\r\n\r\n def test_expectation_map(self):\r\n \"\"\"Test that default Gaussian device supports all PennyLane Gaussian continuous expectations.\"\"\"\r\n self.logTestName()\r\n self.assertEqual(set(qml.expval.cv.__all__)|{'Identity'}-{'Heterodyne'},\r\n set(self.dev._expectation_map))\r\n\r\n def test_apply(self):\r\n \"\"\"Test the application of gates to a state\"\"\"\r\n self.logTestName()\r\n\r\n # loop through all supported operations\r\n for gate_name, fn in self.dev._operation_map.items():\r\n log.debug(\"\\tTesting %s gate...\", gate_name)\r\n self.dev.reset()\r\n\r\n # start in the displaced squeezed state\r\n alpha = 0.542+0.123j\r\n a = abs(alpha)\r\n phi_a = np.angle(alpha)\r\n r = 0.652\r\n phi_r = -0.124\r\n\r\n self.dev.apply('DisplacedSqueezedState', wires=[0], par=[a, phi_a, r, phi_r])\r\n self.dev.apply('DisplacedSqueezedState', wires=[1], par=[a, phi_a, r, phi_r])\r\n\r\n # get the equivalent pennylane operation class\r\n op = qml.ops.__getattribute__(gate_name)\r\n # the list of wires to apply the operation to\r\n w = list(range(op.num_wires))\r\n\r\n if op.par_domain == 'A':\r\n # the parameter is an array\r\n if gate_name == 'GaussianState':\r\n p = [np.array([0.432, 0.123, 0.342, 0.123]), np.diag([0.5234]*4)]\r\n w = list(range(2))\r\n expected_out = p\r\n elif gate_name == 'Interferometer':\r\n w = list(range(2))\r\n p = [U]\r\n S = fn(*p)\r\n expected_out = S @ self.dev._state[0], S @ self.dev._state[1] @ S.T\r\n else:\r\n # the parameter is a float\r\n p = [0.432423, -0.12312, 0.324, 0.751][:op.num_params]\r\n\r\n if gate_name == 'Displacement':\r\n alpha = p[0]*np.exp(1j*p[1])\r\n state = self.dev._state\r\n mu = state[0].copy()\r\n mu[w[0]] += alpha.real*np.sqrt(2*hbar)\r\n mu[w[0]+2] += alpha.imag*np.sqrt(2*hbar)\r\n expected_out = mu, state[1]\r\n elif 'State' in gate_name:\r\n mu, cov = fn(*p, hbar=hbar)\r\n expected_out = self.dev._state\r\n expected_out[0][[w[0], w[0]+2]] = mu\r\n\r\n ind = np.concatenate([np.array([w[0]]), np.array([w[0]])+2])\r\n rows = ind.reshape(-1, 1)\r\n cols = ind.reshape(1, -1)\r\n expected_out[1][rows, cols] = cov\r\n else:\r\n # if the default.gaussian is an operation accepting parameters,\r\n # initialise it using the parameters generated above.\r\n S = fn(*p)\r\n\r\n # calculate the expected output\r\n if op.num_wires == 1:\r\n # reorder from symmetric ordering to xp-ordering\r\n S = block_diag(S, np.identity(2))[:, [0, 2, 1, 3]][[0, 2, 1, 3]]\r\n\r\n expected_out = S @ self.dev._state[0], S @ self.dev._state[1] @ S.T\r\n\r\n self.dev.apply(gate_name, wires=w, par=p)\r\n\r\n # verify the device is now in the expected state\r\n self.assertAllAlmostEqual(self.dev._state[0], expected_out[0], delta=self.tol)\r\n self.assertAllAlmostEqual(self.dev._state[1], expected_out[1], delta=self.tol)\r\n\r\n def test_apply_errors(self):\r\n \"\"\"Test that apply fails for incorrect state preparation\"\"\"\r\n self.logTestName()\r\n\r\n with self.assertRaisesRegex(ValueError, 'incorrect size for the number of subsystems'):\r\n p = [thermal_state(0.5)]\r\n self.dev.apply('GaussianState', wires=[0], par=[p])\r\n\r\n with self.assertRaisesRegex(ValueError, 'Incorrect number of subsystems'):\r\n p = U\r\n self.dev.apply('Interferometer', wires=[0], par=[p])\r\n\r\n with self.assertRaisesRegex(ValueError, \"Invalid target subsystems provided in 'wires' argument\"):\r\n p = U2\r\n dev = DefaultGaussian(wires=4, shots=0, hbar=hbar)\r\n self.dev.apply('Interferometer', wires=[0, 1, 2], par=[p])\r\n\r\n def test_expectation(self):\r\n \"\"\"Test that expectation values are calculated correctly\"\"\"\r\n self.logTestName()\r\n\r\n dev = qml.device('default.gaussian', wires=1, hbar=hbar)\r\n\r\n # test correct mean and variance for of a displaced thermal state\r\n nbar = 0.5431\r\n alpha = 0.324-0.59j\r\n dev.apply('ThermalState', wires=[0], par=[nbar])\r\n dev.apply('Displacement', wires=[0], par=[alpha, 0])\r\n mean = dev.expval('MeanPhoton', [0], [])\r\n self.assertAlmostEqual(mean, np.abs(alpha)**2+nbar, delta=self.tol)\r\n # self.assertAlmostEqual(var, nbar**2+nbar+np.abs(alpha)**2*(1+2*nbar), delta=self.tol)\r\n\r\n # test correct mean and variance for Homodyne P measurement\r\n alpha = 0.324-0.59j\r\n dev.apply('CoherentState', wires=[0], par=[alpha])\r\n mean = dev.expval('P', [0], [])\r\n self.assertAlmostEqual(mean, alpha.imag*np.sqrt(2*hbar), delta=self.tol)\r\n # self.assertAlmostEqual(var, hbar/2, delta=self.tol)\r\n\r\n # test correct mean and variance for Homodyne measurement\r\n mean = dev.expval('Homodyne', [0], [np.pi/2])\r\n self.assertAlmostEqual(mean, alpha.imag*np.sqrt(2*hbar), delta=self.tol)\r\n # self.assertAlmostEqual(var, hbar/2, delta=self.tol)\r\n\r\n # test correct mean and variance for number state expectation ||^2\r\n # on a coherent state\r\n for n in range(3):\r\n mean = dev.expval('NumberState', [0], [np.array([n])])\r\n expected = np.abs(np.exp(-np.abs(alpha)**2/2)*alpha**n/np.sqrt(fac(n)))**2\r\n self.assertAlmostEqual(mean, expected, delta=self.tol)\r\n\r\n # test correct mean and variance for number state expectation ||^2\r\n # on a squeezed state\r\n n = 1\r\n r = 0.4523\r\n dev.apply('SqueezedState', wires=[0], par=[r, 0])\r\n mean = dev.expval('NumberState', [0], [np.array([2*n])])\r\n expected = np.abs(np.sqrt(fac(2*n))/(2**n*fac(n))*(-np.tanh(r))**n/np.sqrt(np.cosh(r)))**2\r\n self.assertAlmostEqual(mean, expected, delta=self.tol)\r\n\r\n def test_reduced_state(self):\r\n \"\"\"Test reduced state\"\"\"\r\n self.logTestName()\r\n\r\n # Test error is raised if requesting a non-existant subsystem\r\n with self.assertRaisesRegex(ValueError, \"specified wires cannot be larger than the number of subsystems\"):\r\n self.dev.reduced_state([6, 4])\r\n\r\n # Test requesting via an integer\r\n res = self.dev.reduced_state(0)\r\n expected = self.dev.reduced_state([0])\r\n self.assertAllAlmostEqual(res[0], expected[0], delta=self.tol)\r\n self.assertAllAlmostEqual(res[1], expected[1], delta=self.tol)\r\n\r\n # Test requesting all wires returns the full state\r\n res = self.dev.reduced_state([0, 1])\r\n expected = self.dev._state\r\n self.assertAllAlmostEqual(res[0], expected[0], delta=self.tol)\r\n self.assertAllAlmostEqual(res[1], expected[1], delta=self.tol)\r\n\r\n\r\nclass TestDefaultGaussianIntegration(BaseTest):\r\n \"\"\"Integration tests for default.gaussian. This test ensures it integrates\r\n properly with the PennyLane interface, in particular QNode.\"\"\"\r\n\r\n def test_load_default_gaussian_device(self):\r\n \"\"\"Test that the default plugin loads correctly\"\"\"\r\n self.logTestName()\r\n\r\n dev = qml.device('default.gaussian', wires=2, hbar=2)\r\n self.assertEqual(dev.num_wires, 2)\r\n self.assertEqual(dev.shots, 0)\r\n self.assertEqual(dev.hbar, 2)\r\n self.assertEqual(dev.short_name, 'default.gaussian')\r\n\r\n def test_args(self):\r\n \"\"\"Test that the plugin requires correct arguments\"\"\"\r\n self.logTestName()\r\n\r\n with self.assertRaisesRegex(TypeError, \"missing 1 required positional argument: 'wires'\"):\r\n qml.device('default.gaussian')\r\n\r\n def test_unsupported_gates(self):\r\n \"\"\"Test error is raised with unsupported gates\"\"\"\r\n self.logTestName()\r\n dev = qml.device('default.gaussian', wires=2)\r\n\r\n gates = set(dev._operation_map.keys())\r\n all_gates = {m[0] for m in inspect.getmembers(qml.ops, inspect.isclass)}\r\n\r\n for g in all_gates - gates:\r\n op = getattr(qml.ops, g)\r\n\r\n if op.num_wires == 0:\r\n wires = [0]\r\n else:\r\n wires = list(range(op.num_wires))\r\n\r\n @qml.qnode(dev)\r\n def circuit(*x):\r\n \"\"\"Test quantum function\"\"\"\r\n x = prep_par(x, op)\r\n op(*x, wires=wires)\r\n\r\n if issubclass(op, qml.operation.CV):\r\n return qml.expval.X(0)\r\n\r\n return qml.expval.PauliZ(0)\r\n\r\n with self.assertRaisesRegex(qml.DeviceError, \"Gate {} not supported on device default.gaussian\".format(g)):\r\n x = np.random.random([op.num_params])\r\n circuit(*x)\r\n\r\n def test_unsupported_observables(self):\r\n \"\"\"Test error is raised with unsupported observables\"\"\"\r\n self.logTestName()\r\n dev = qml.device('default.gaussian', wires=2)\r\n\r\n obs = set(dev._expectation_map.keys())\r\n all_obs = set(qml.expval.__all__)\r\n\r\n for g in all_obs - obs:\r\n op = getattr(qml.expval, g)\r\n\r\n if op.num_wires == 0:\r\n wires = [0]\r\n else:\r\n wires = list(range(op.num_wires))\r\n\r\n @qml.qnode(dev)\r\n def circuit(*x):\r\n \"\"\"Test quantum function\"\"\"\r\n x = prep_par(x, op)\r\n return op(*x, wires=wires)\r\n\r\n with self.assertRaisesRegex(qml.DeviceError, \"Expectation {} not supported on device default.gaussian\".format(g)):\r\n x = np.random.random([op.num_params])\r\n circuit(*x)\r\n\r\n def test_gaussian_circuit(self):\r\n \"\"\"Test that the default gaussian plugin provides correct result for simple circuit\"\"\"\r\n self.logTestName()\r\n dev = qml.device('default.gaussian', wires=1)\r\n\r\n p = 0.543\r\n\r\n @qml.qnode(dev)\r\n def circuit(x):\r\n \"\"\"Test quantum function\"\"\"\r\n qml.Displacement(x, 0, wires=0)\r\n return qml.expval.X(0)\r\n\r\n self.assertAlmostEqual(circuit(p), p*np.sqrt(2*hbar), delta=self.tol)\r\n\r\n def test_gaussian_identity(self):\r\n \"\"\"Test that the default gaussian plugin provides correct result for the identity expectation\"\"\"\r\n self.logTestName()\r\n dev = qml.device('default.gaussian', wires=1)\r\n\r\n p = 0.543\r\n\r\n @qml.qnode(dev)\r\n def circuit(x):\r\n \"\"\"Test quantum function\"\"\"\r\n qml.Displacement(x, 0, wires=0)\r\n return qml.expval.Identity(0)\r\n\r\n self.assertAlmostEqual(circuit(p), 1, delta=self.tol)\r\n\r\n def test_nonzero_shots(self):\r\n \"\"\"Test that the default gaussian plugin provides correct result for high shot number\"\"\"\r\n self.logTestName()\r\n\r\n shots = 10**4\r\n dev = qml.device('default.gaussian', wires=1, shots=shots)\r\n\r\n p = 0.543\r\n\r\n @qml.qnode(dev)\r\n def circuit(x):\r\n \"\"\"Test quantum function\"\"\"\r\n qml.Displacement(x, 0, wires=0)\r\n return qml.expval.X(0)\r\n\r\n runs = []\r\n for _ in range(100):\r\n runs.append(circuit(p))\r\n\r\n self.assertAlmostEqual(np.mean(runs), p*np.sqrt(2*hbar), delta=0.01)\r\n\r\n def test_supported_gates(self):\r\n \"\"\"Test that all supported gates work correctly\"\"\"\r\n self.logTestName()\r\n a = 0.312\r\n\r\n dev = qml.device('default.gaussian', wires=2)\r\n\r\n for g, qop in dev._operation_map.items():\r\n log.debug('\\tTesting gate %s...', g)\r\n self.assertTrue(dev.supported(g))\r\n dev.reset()\r\n\r\n op = getattr(qml.ops, g)\r\n if op.num_wires == 0:\r\n wires = list(range(2))\r\n else:\r\n wires = list(range(op.num_wires))\r\n\r\n @qml.qnode(dev)\r\n def circuit(*x):\r\n \"\"\"Reference quantum function\"\"\"\r\n qml.Displacement(a, 0, wires=[0])\r\n op(*x, wires=wires)\r\n return qml.expval.X(0)\r\n\r\n # compare to reference result\r\n def reference(*x):\r\n \"\"\"reference circuit\"\"\"\r\n if g == 'GaussianState':\r\n return x[0][0]\r\n\r\n if g == 'Displacement':\r\n alpha = x[0]*np.exp(1j*x[1])\r\n return (alpha+a).real*np.sqrt(2*hbar)\r\n\r\n if 'State' in g:\r\n mu, _ = qop(*x, hbar=hbar)\r\n return mu[0]\r\n\r\n S = qop(*x)\r\n\r\n # calculate the expected output\r\n if op.num_wires == 1:\r\n S = block_diag(S, np.identity(2))[:, [0, 2, 1, 3]][[0, 2, 1, 3]]\r\n\r\n return (S @ np.array([a.real, a.imag, 0, 0])*np.sqrt(2*hbar))[0]\r\n\r\n if g == 'GaussianState':\r\n p = [np.array([0.432, 0.123, 0.342, 0.123]), np.diag([0.5234]*4)]\r\n elif g == 'Interferometer':\r\n p = [np.array(U)]\r\n else:\r\n p = [0.432423, -0.12312, 0.324, 0.763][:op.num_params]\r\n\r\n self.assertAllEqual(circuit(*p), reference(*p))\r\n\r\n\r\nif __name__ == '__main__':\r\n print('Testing PennyLane version ' + qml.version() + ', default.gaussian plugin.')\r\n # run the tests in this file\r\n suite = unittest.TestSuite()\r\n for t in (TestAuxillaryFunctions,\r\n TestGates,\r\n TestStates,\r\n TestDefaultGaussianDevice,\r\n TestDefaultGaussianIntegration):\r\n ttt = unittest.TestLoader().loadTestsFromTestCase(t)\r\n suite.addTests(ttt)\r\n unittest.TextTestRunner().run(suite)\r\n","sub_path":"tests/test_default_gaussian.py","file_name":"test_default_gaussian.py","file_ext":"py","file_size_in_byte":26165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"63861039","text":"#New OD MATRIX\r\n#this script requires a network_v4.json file, this one is automatically created when the network3 script is run to create the network\r\nimport json\r\nimport os\r\nfilename = os.path.join(\"D:\\jcdoig\\gridlock\", 'Network_v4.json')\r\n\r\njson_data=open(filename)\r\ninter = json.load(json_data)\r\njson_data.close()\r\n\r\n\r\ndef NewGKODMatrix(GKCentroidConfiguration, name = \"New OD Matrix with internal and external\"):\r\n\tGKODMatrix = GKSystem.getSystem().newObject( \"GKODMatrix\", model )\r\n\tGKODMatrix.setName( name )\r\n\tGKODMatrix.setCentroidConfiguration(GKCentroidConfiguration)\r\n\treturn GKODMatrix\r\n\r\nnewODMatrix = NewGKODMatrix(target)\r\nnewODMatrix.setEnableStore(True)\r\ntarget.addODMatrix(newODMatrix)\r\n\r\nrownum = len(inter) - 2\r\ncolnum = len(inter[0]) - 2\r\nitrips = 10\r\n\r\nfor ro in range(0, rownum + 2 ):\r\n\tfor co in range(0, colnum + 2 ):\r\n\t\tfor rd in range(0, rownum + 2 ):\r\n\t\t\tfor cd in range(0, colnum + 2 ):\r\n\t\t\t\ttry:\r\n\t\t\t\t\tif (inter[ro][co][\"internal?\"] and inter[ro][co][\"O/D\"]):\r\n\t\t\t\t\t\tif (inter[rd][cd][\"internal?\"] and inter[rd][cd][\"O/D\"]):\r\n\t\t\t\t\t\t\tif (inter[ro][co][\"GKCentroid\"] != inter[rd][cd][\"GKCentroid\"]):\r\n\t\t\t\t\t\t\t\tnewODMatrix.setTrips(model.getCatalog().find(inter[ro][co][\"GKCentroid\"]), model.getCatalog().find(inter[rd][cd][\"GKCentroid\"]), itrips)\r\n\t\t\t\texcept:\r\n\t\t\t\t\tpass","sub_path":"create_new_od.py","file_name":"create_new_od.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"236101642","text":"#!/bin/env python\nfrom modules.portstate import *\nfrom modules.host import *\nfrom modules.dbinit import *\nfrom modules.datevalidator import *\nfrom modules.logger import *\nimport argparse\n\ndef run(*args, **kwargs):\n arguments = args[0]\n action = arguments['action']\n\n if not arguments['ignore_holiday']:\n if DateValidator.is_holiday() or DateValidator.is_weekend():\n Logger.log(\"Today is weekend or holiday. Do nothing.\")\n return None\n\n if arguments['init']:\n init(arguments['hosts'])\n return None\n\n if arguments['hosts']:\n hosts = arguments['hosts']\n else:\n hosts = get_host_list()\n\n # Run main procedure\n for hostname in hosts:\n host = Host(hostname)\n\n if arguments['hosts']: # if ports explicitly set - use it. Here ports can be either list or int\n ports = arguments['ports']\n else:\n host_db = DatabaseInit()\n host_info = host_db.collect_data_from_host(host)\n ports_number = host_info['ports_no'] # get ports number from hardware\n ports = ports_generator(ports_number)\n\n port_switcher(host, ports, action)\n\ndef init(hostnames):\n host_db = DatabaseInit(hostnames)\n host_db.initiate_db()\n return None\n\ndef ports_generator(ports):\n \"\"\" Takes Int as input \"\"\"\n\n return [port for port in range(1, ports_number + 1)]\n\ndef port_switcher(host, ports, action=None):\n for port in ports:\n portmanager = PortState(host, port, force=action) # Up, Down\n portmanager.switch()\n\ndef get_host_list():\n \"\"\" Return clean hostnames \"\"\"\n\n db_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), \"db\", \"network\"))\n db_files = [ x[:6] for x in os.listdir(db_path) if \"L00\" in x ] # get clean hostnames\n return db_files\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-n','--hosts', nargs=\"*\", dest=\"hosts\", help=\"Set hostnames: [HOST] [HOST] ... \")\n parser.add_argument('-p','--ports', nargs=\"*\", dest=\"ports\", help=\"Set ports: [PORT] [PORT] ... \")\n parser.add_argument('-a','--action', dest=\"action\", help=\"Set action [Up/Down]\")\n parser.add_argument('-i','--init', action=\"store_true\", dest=\"init\", help=\"Initialise database L00XXX.json\")\n parser.add_argument('--ignore-holiday', action=\"store_true\", dest=\"ignore_holiday\", help=\"Run regardless of the holiday or weekend.\")\n\n args = parser.parse_args()\n parameters = vars(args)\n #print parameters\n\n run(parameters)\n","sub_path":"cisco_port_manager.py","file_name":"cisco_port_manager.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"477486956","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# untitled.py\n# \n# Copyright 2013 Martin \n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n# \n# \n\nimport urllib2\nfrom bs4 import BeautifulSoup\nimport re\n\n#def fix_style(seed):\n#\tstring = ''\n#\tfor i in seed:\n#\t\tstring.append(str(i).strip('[]'))\n#\t\t\n#\treturn string\n\t\t\n\npage = urllib2.urlopen('http://stats.swehockey.se/ScheduleAndResults/Live/3005').read()\nsoup = BeautifulSoup(page)\nsoup.prettify()\n\ngames = soup.find_all(\"b\")\nmatches = soup.b.get_text()\n\n\nresult = []\nfor text in games:\n\tgame = text, str(text.find_next(\"a\").string)\n\tresult.append(game)\n\nfor i in result:\n\tfor p in i:\n\t\tprint(p)\n\t\n#fixed = fix_style(result)\n\nprint(result)","sub_path":"result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"648125293","text":"# https://leetcode.com/problems/reorder-list/\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def reorderList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: void Do not return anything, modify head in-place instead.\n \"\"\"\n if not (head and head.next):\n return\n slow = head\n fast = head.next\n ln = 1\n while fast.next and fast.next.next:\n slow = slow.next\n fast = fast.next.next\n ln += 1\n if not fast.next:\n ln = 2*ln\n else:\n slow = slow.next\n ln = 2*ln + 1\n if ln == 2:\n return\n # now slow is as mid of the list\n half2 = slow.next\n slow.next = None\n # reverse the second half\n if half2.next:\n temp1 = half2.next\n half2.next = None\n while temp1 and temp1.next:\n temp2 = temp1.next\n temp1.next = half2\n half2 = temp1\n temp1 = temp2\n temp1.next = half2\n half2 = temp1\n # merge the two lists\n temp1 = head\n temp2 = half2\n while temp1 and temp2:\n half2 = half2.next\n temp2.next = temp1.next\n temp1.next = temp2\n temp1 = temp2.next\n temp2 = half2\n \n","sub_path":"Algo/python/ReorderList.py","file_name":"ReorderList.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"203341021","text":"import os\nimport logging\nimport tempfile\nfrom pysparkling import Context\n\n\ndef test_local_textFile_1():\n lines = Context().textFile('tests/*textFil*.py').collect()\n print(lines)\n assert 'from pysparkling import Context' in lines\n\n\ndef test_local_textFile_2():\n line_count = Context().textFile('tests/*.py').count()\n print(line_count)\n assert line_count > 90\n\n\ndef test_local_textFile_name():\n name = Context().textFile('tests/*.py').name()\n print(name)\n assert name == 'tests/*.py'\n\n\ndef test_s3_textFile():\n if not os.getenv('AWS_ACCESS_KEY_ID'):\n return\n\n myrdd = Context().textFile(\n 's3n://aws-publicdatasets/common-crawl/crawl-data/'\n 'CC-MAIN-2015-11/warc.paths.*'\n )\n assert (\n 'common-crawl/crawl-data/CC-MAIN-2015-11/segments/1424937481488.49/'\n 'warc/CC-MAIN-20150226075801-00329-ip-10-28-5-156.ec2.'\n 'internal.warc.gz' in myrdd.collect()\n )\n\n\ndef test_http_textFile():\n myrdd = Context().textFile(\n 'https://s3-us-west-2.amazonaws.com/human-microbiome-project/DEMO/'\n 'HM16STR/46333/by_subject/1139.fsa'\n )\n assert u'TGCTGCGGTGAATGCGTTCCCGGGTCT' in myrdd.collect()\n\n\ndef test_saveAsTextFile():\n tempFile = tempfile.NamedTemporaryFile(delete=True)\n tempFile.close()\n Context().parallelize(range(10)).saveAsTextFile(tempFile.name)\n with open(tempFile.name+'/part-00000', 'r') as f:\n r = f.readlines()\n print(r)\n assert '5\\n' in r\n\n\ndef test_saveAsTextFile_gz():\n tempFile = tempfile.NamedTemporaryFile(delete=True)\n tempFile.close()\n Context().parallelize(range(10)).saveAsTextFile(tempFile.name+'.gz')\n read_rdd = Context().textFile(tempFile.name+'.gz')\n assert '5' in read_rdd.collect()\n\n\ndef test_saveAsTextFile_bz2():\n tempFile = tempfile.NamedTemporaryFile(delete=True)\n tempFile.close()\n Context().parallelize(range(10)).saveAsTextFile(tempFile.name+'.bz2')\n read_rdd = Context().textFile(tempFile.name+'.bz2')\n assert '5' in read_rdd.collect()\n\n\ndef test_pyspark_compatibility_txt():\n kv = Context().textFile('tests/pyspark/key_value.txt').collect()\n print(kv)\n assert u\"('a', 1)\" in kv and u\"('b', 2)\" in kv and len(kv) == 2\n\n\ndef test_pyspark_compatibility_bz2():\n kv = Context().textFile('tests/pyspark/key_value.txt.bz2').collect()\n print(kv)\n assert u\"a\\t1\" in kv and u\"b\\t2\" in kv and len(kv) == 2\n\n\ndef test_pyspark_compatibility_gz():\n kv = Context().textFile('tests/pyspark/key_value.txt.gz').collect()\n print(kv)\n assert u\"a\\t1\" in kv and u\"b\\t2\" in kv and len(kv) == 2\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n # test_saveAsTextFile()\n # test_local_textFile_2()\n # test_saveAsTextFile_gz()\n # test_s3_textFile()\n test_http_textFile()\n # test_pyspark_compatibility_txt()\n # test_pyspark_compatibility_gz()\n # test_pyspark_compatibility_bz2()\n","sub_path":"tests/test_textFile.py","file_name":"test_textFile.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"221175917","text":"\"\"\"\r\nAuthor: Chris Berardi\r\nSolution to STAT656 Week 10 Assigment, Spring 2017\r\nBasic Text Clustering\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport string\r\nimport nltk\r\nimport numpy as np\r\nfrom nltk import pos_tag\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.stem.snowball import SnowballStemmer\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom nltk.corpus import wordnet as wn\r\nfrom nltk.corpus import stopwords\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\nfrom sklearn.decomposition import LatentDirichletAllocation\r\n#for regression\r\nfrom Class_replace_impute_encode import ReplaceImputeEncode\r\nfrom Class_regression import linreg\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# my_analyzer replaces both the preprocessor and tokenizer\r\n# it also replaces stop word removal and ngram constructions\r\n\r\ndef my_analyzer(s):\r\n # Synonym List\r\n syns = {'veh': 'vehicle', 'car': 'vehicle', 'chev':'cheverolet', \\\r\n 'chevy':'cheverolet', 'air bag': 'airbag', \\\r\n 'seat belt':'seatbelt', \"n't\":'not', 'to30':'to 30', \\\r\n 'wont':'would not', 'cant':'can not', 'cannot':'can not', \\\r\n 'couldnt':'could not', 'shouldnt':'should not', \\\r\n 'wouldnt':'would not', }\r\n \r\n # Preprocess String s\r\n s = s.lower()\r\n s = s.replace(',', '. ')\r\n # Tokenize \r\n tokens = word_tokenize(s)\r\n tokens = [word.replace(',','') for word in tokens ]\r\n tokens = [word for word in tokens if ('*' not in word) and \\\r\n (\"''\" != word) and (\"``\" != word) and \\\r\n (word!='description') and (word !='dtype') \\\r\n and (word != 'object') and (word!=\"'s\")]\r\n \r\n # Map synonyms\r\n for i in range(len(tokens)):\r\n if tokens[i] in syns:\r\n tokens[i] = syns[tokens[i]]\r\n \r\n # Remove stop words\r\n punctuation = list(string.punctuation)+['..', '...']\r\n pronouns = ['i', 'he', 'she', 'it', 'him', 'they', 'we', 'us', 'them']\r\n stop = stopwords.words('english') + punctuation + pronouns\r\n filtered_terms = [word for word in tokens if (word not in stop) and \\\r\n (len(word)>1) and (not word.replace('.','',1).isnumeric()) \\\r\n and (not word.replace(\"'\",'',2).isnumeric())]\r\n \r\n # Lemmatization & Stemming - Stemming with WordNet POS\r\n # Since lemmatization requires POS need to set POS\r\n tagged_words = pos_tag(filtered_terms, lang='eng')\r\n # Stemming with for terms without WordNet POS\r\n stemmer = SnowballStemmer(\"english\")\r\n wn_tags = {'N':wn.NOUN, 'J':wn.ADJ, 'V':wn.VERB, 'R':wn.ADV}\r\n wnl = WordNetLemmatizer()\r\n stemmed_tokens = []\r\n for tagged_token in tagged_words:\r\n term = tagged_token[0]\r\n pos = tagged_token[1]\r\n pos = pos[0]\r\n try:\r\n pos = wn_tags[pos]\r\n stemmed_tokens.append(wnl.lemmatize(term, pos=pos))\r\n except:\r\n stemmed_tokens.append(stemmer.stem(term))\r\n return stemmed_tokens\r\n\r\n# Further Customization of Stopping and Stemming using NLTK\r\ndef my_preprocessor(s):\r\n #Vectorizer sends one string at a time\r\n s = s.lower()\r\n s = s.replace(',', '. ')\r\n print(\"preprocessor\")\r\n return(s)\r\n \r\ndef my_tokenizer(s):\r\n # Tokenize\r\n print(\"Tokenizer\")\r\n tokens = word_tokenize(s)\r\n tokens = [word.replace(',','') for word in tokens ]\r\n tokens = [word for word in tokens if word.find('*')!=True and \\\r\n word != \"''\" and word !=\"``\" and word!='description' \\\r\n and word !='dtype']\r\n return tokens\r\n\r\n# Increase Pandas column width to let pandas read large text columns\r\npd.set_option('max_colwidth', 32000)\r\n# California Cabernet Reviews\r\nfile_path = 'C:/Users/Saistout/Desktop/656 Applied Analytics/Python/Week 10 Assignment/'\r\ndf = pd.read_excel(file_path+\"CaliforniaCabernet.xlsx\")\r\n\r\n# Setup simple constants\r\nn_docs = len(df['description'])\r\nn_samples = n_docs\r\nm_features = None\r\ns_words = 'english'\r\nngram = (1,2)\r\n\r\n# Setup reviews in list 'discussions'\r\ndiscussions = []\r\nfor i in range(n_samples):\r\n discussions.append((\"%s\" %df['description'].iloc[i]))\r\n \r\n \r\n# Create Word Frequency by Review Matrix using Custom Analyzer\r\ncv = CountVectorizer(max_df=0.95, min_df=2, max_features=m_features,\\\r\n analyzer=my_analyzer, ngram_range=ngram)\r\ntf = cv.fit_transform(discussions)\r\n\r\nprint(\"\\nVectorizer Parameters\\n\", cv, \"\\n\")\r\n\r\n\r\n# LDA For Term Frequency x Doc Matrix\r\nn_topics = 9\r\nmax_iter = 5\r\nlearning_offset = 20.\r\nlearning_method = 'online'\r\n# LDA for TF-IDF x Doc Matrix\r\n# First Create Term-Frequency/Inverse Doc Frequency by Review Matrix\r\n# This requires constructing Term Freq. x Doc. matrix first\r\ntf_idf = TfidfTransformer()\r\nprint(\"\\nTF-IDF Parameters\\n\", tf_idf.get_params(),\"\\n\")\r\ntf_idf = tf_idf.fit_transform(tf)\r\n# Or you can construct the TF/IDF matrix from the data\r\ntfidf_vect = TfidfVectorizer(max_df=0.95, min_df=2, max_features=m_features,\\\r\n analyzer=my_analyzer, ngram_range=ngram)\r\ntf_idf = tfidf_vect.fit_transform(discussions)\r\nprint(\"\\nTF_IDF Vectorizer Parameters\\n\", tfidf_vect, \"\\n\")\r\n\r\nlda = LatentDirichletAllocation(n_components=n_topics, max_iter=max_iter,\\\r\n learning_method=learning_method, \\\r\n learning_offset=learning_offset, \\\r\n random_state=12345)\r\nlda.fit_transform(tf_idf)\r\nprint('{:.<22s}{:>6d}'.format(\"Number of Reviews\", tf.shape[0]))\r\nprint('{:.<22s}{:>6d}'.format(\"Number of Terms\", tf.shape[1]))\r\nprint(\"\\nTopics Identified using LDA with TF_IDF\")\r\ntf_features = cv.get_feature_names()\r\nmax_words = 15\r\ndesc = []\r\nfor topic_idx, topic in enumerate(lda.components_):\r\n message = \"Topic #%d: \" % topic_idx\r\n message += \" \".join([tf_features[i]\r\n for i in topic.argsort()[:-max_words - 1:-1]])\r\n print(message)\r\n print()\r\n desc.append([tf_features[i] for i in topic.argsort()[:-max_words - 1:-1]])\r\n \r\n#Extract which topic each review belongs to\r\ntopics = pd.DataFrame(lda.fit_transform(tf_idf))\r\nclusters = pd.DataFrame(topics.idxmax(axis=1))\r\ncol=['year','points','Region','price']\r\nclus = pd.concat([clusters,df[col]], axis=1, ignore_index=True)\r\n#rename the columns\r\nclus.columns = [\"Cluster\",\"Year\",\"Score\",\"Region\",\"Price\"]\r\n\r\n#Create a table of the average points and price per cluster, include the 15\r\n#word descriptions in the table\r\nprice = []\r\nscore = []\r\ncluster = []\r\nmean_table=pd.DataFrame()\r\nfor i in range(0,9):\r\n this_clust = clus[clus['Cluster']==i]\r\n this_price = this_clust['Price'].mean()\r\n this_score = this_clust['Score'].mean()\r\n price.append(this_price)\r\n score.append(this_score)\r\n cluster.append(i)\r\nmean_table['Cluster']=cluster\r\nmean_table['Score']=score\r\nmean_table['Price']=price\r\nmean_table['Description']=desc\r\nmean_table\r\n\r\n#Create a table of the percent of reviews in each cluster by wine region\r\np0=[]\r\np1=[]\r\np2=[]\r\np3=[]\r\np4=[]\r\np5=[]\r\np6=[]\r\np7=[]\r\np8=[]\r\nregions=['California Other', 'Central Coast','Central Valley', 'Clear Lake',\\\r\n 'High Valley', 'Lake County','Mendocino County','Mendocino Ridge',\\\r\n 'Mendocino/Lake Counties', 'Napa','Napa-Sonoma','North Coast',\\\r\n 'Red Hills Lake County','Redwood Valley','Sierra Foothills','Sonoma',\\\r\n 'South Coast']\r\npRegion=pd.DataFrame()\r\nfor name in regions:\r\n this_region = clus[clus['Region']==name]\r\n n=[]\r\n total=0\r\n for i in range(0,9):\r\n this_clus=this_region[this_region['Cluster']==i]\r\n n.append(this_clus.shape[0])\r\n total=sum(n)\r\n p0.append(n[0]/total)\r\n p1.append(n[1]/total)\r\n p2.append(n[2]/total)\r\n p3.append(n[3]/total)\r\n p4.append(n[4]/total)\r\n p5.append(n[5]/total)\r\n p6.append(n[6]/total)\r\n p7.append(n[7]/total)\r\n p8.append(n[8]/total)\r\npRegion['Region']=regions\r\npRegion['P0']=p0\r\npRegion['P1']=p1\r\npRegion['P2']=p2\r\npRegion['P3']=p3\r\npRegion['P4']=p4\r\npRegion['P5']=p5\r\npRegion['P6']=p6\r\npRegion['P7']=p7\r\npRegion['P8']=p8\r\n\r\n#Fit a linear regression model to model wine price use a 70/30 train test split\r\n#Since the regularization parameter C only exists for logistic regression\r\nattribute_map_clus = {\r\n 'Score' :[0,(80,100),[0,0]],\r\n 'Year' :[0,(1985,2016),[0,0]],\r\n 'Region' :[2,('California Other', 'Central Coast','Central Valley', \\\r\n 'Clear Lake','High Valley', 'Lake County',\\\r\n 'Mendocino County','Mendocino Ridge',\\\r\n 'Mendocino/Lake Counties', 'Napa','Napa-Sonoma',\\\r\n 'North Coast','Red Hills Lake County','Redwood Valley',\\\r\n 'Sierra Foothills','Sonoma','South Coast'),[0,0]],\r\n 'Cluster' :[2,(0,1,2,3,4,5,6,7,8),[0,0]],\r\n 'Price' :[0,(0,625),[0,0]]\r\n}\r\nvarlist = ['Price']\r\n\r\nrie_clus = ReplaceImputeEncode(data_map=attribute_map_clus, \\\r\n nominal_encoding='one-hot', \r\n interval_scale = None, drop=True, display=False)\r\nencoded_df_clus = rie_clus.fit_transform(clus)\r\n\r\nX_clus = encoded_df_clus.drop(varlist, axis=1)\r\ny_clus = encoded_df_clus[varlist]\r\nX_train, X_valid, y_train, y_valid= \\\r\ntrain_test_split(X_clus,y_clus,test_size = 0.3, random_state=7)\r\n\r\nnp_y_train = np.ravel(y_train)\r\nnp_y_valid = np.ravel(y_valid)\r\n\r\n\r\nreg = LinearRegression()\r\nreg.fit(X_train,np_y_train)\r\n\r\nlinreg.display_coef(reg,X_train,y_train,X_train.columns)\r\nlinreg.display_split_metrics(reg,X_train,y_train,X_valid,y_valid)","sub_path":"week 10.py","file_name":"week 10.py","file_ext":"py","file_size_in_byte":9778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"217401870","text":"def solve(A):\n count=0\n for i in range(len(A)):\n if A[i]&1==1 and A[i]>1 and A[i]!=2:\n j=3\n isPrime=True\n while(j*j<=A[i]):\n if A[i]%j==0:\n isPrime=False\n break\n else:\n j+=1\n if isPrime==True:\n #print(A[i])\n count+=1\n elif A[i] in [2,3,5,7]:\n count+=1\n return count\n \n \ndef main():\n A=list(map(int,input().split()))\n n=solve(A)\n print(n)\n\nif __name__ == '__main__':\n main()\n","sub_path":"primeCount.py","file_name":"primeCount.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"95213073","text":"# Copyright (c) 2016 Rackspace, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nfrom cassandra import query\nfrom oslo_log import log\nfrom six.moves import filterfalse\n\nfrom poppy.model import ssl_certificate\nfrom poppy.storage import base\n\n\nLOG = log.getLogger(__name__)\n\nCQL_CREATE_CERT = '''\n INSERT INTO certificate_info (project_id,\n flavor_id,\n cert_type,\n domain_name,\n cert_details\n )\n VALUES (%(project_id)s,\n %(flavor_id)s,\n %(cert_type)s,\n %(domain_name)s,\n %(cert_details)s)\n'''\n\nCQL_SEARCH_CERT_BY_DOMAIN = '''\n SELECT project_id,\n flavor_id,\n cert_type,\n domain_name,\n cert_details\n FROM certificate_info\n WHERE domain_name = %(domain_name)s\n'''\n\nCQL_GET_CERTS_BY_STATUS = '''\n SELECT domain_name\n FROM cert_status WHERE status = %(status)s\n'''\n\nCQL_DELETE_CERT = '''\n DELETE FROM certificate_info\n WHERE domain_name = %(domain_name)s\n'''\n\nCQL_DELETE_CERT_STATUS = '''\n DELETE FROM cert_status\n WHERE domain_name = %(domain_name)s\n'''\n\n\nCQL_INSERT_CERT_STATUS = '''\n INSERT INTO cert_status (domain_name,\n status\n )\n VALUES (%(domain_name)s,\n %(status)s)\n'''\n\nCQL_UPDATE_CERT_DETAILS = '''\n UPDATE certificate_info\n set cert_details = %(cert_details)s\n WHERE domain_name = %(domain_name)s\n IF cert_type = %(cert_type)s AND flavor_id = %(flavor_id)s\n'''\n\n\nclass CertificatesController(base.CertificatesController):\n\n \"\"\"Certificates Controller.\"\"\"\n\n @property\n def session(self):\n \"\"\"Get session.\n\n :returns session\n \"\"\"\n return self._driver.database\n\n def create_certificate(self, project_id, cert_obj):\n if self.cert_already_exist(domain_name=cert_obj.domain_name,\n comparing_cert_type=cert_obj.cert_type,\n comparing_flavor_id=cert_obj.flavor_id,\n comparing_project_id=project_id):\n raise ValueError('Certificate already exists '\n 'for {0} '.format(cert_obj.domain_name))\n\n args = {\n 'project_id': project_id,\n 'flavor_id': cert_obj.flavor_id,\n 'cert_type': cert_obj.cert_type,\n 'domain_name': cert_obj.domain_name,\n # when create the cert, cert domain has not been assigned yet\n # In future we can tweak the logic to assign cert_domain\n # 'cert_domain': '',\n 'cert_details': cert_obj.cert_details\n }\n stmt = query.SimpleStatement(\n CQL_CREATE_CERT,\n consistency_level=self._driver.consistency_level)\n self.session.execute(stmt, args)\n\n cert_status = None\n try:\n provider_status = json.loads(\n list(cert_obj.cert_details.values())[0]\n )\n cert_status = provider_status['extra_info']['status']\n except (IndexError, KeyError, ValueError) as e:\n LOG.warning(\n \"Create certificate missing extra info \"\n \"status {0}: Error {1}. \"\n \"Using 'create_in_progress' instead. \".format(\n cert_obj.cert_details, e))\n cert_status = 'create_in_progress'\n finally:\n # insert/update for cassandra\n self.insert_cert_status(cert_obj.domain_name, cert_status)\n\n def delete_certificate(self, project_id, domain_name, cert_type):\n args = {\n 'domain_name': domain_name.lower()\n }\n\n stmt = query.SimpleStatement(\n CQL_SEARCH_CERT_BY_DOMAIN,\n consistency_level=self._driver.consistency_level)\n result_set = self.session.execute(stmt, args)\n complete_results = list(result_set)\n if complete_results:\n for r in complete_results:\n r_project_id = str(r.get('project_id'))\n r_cert_type = str(r.get('cert_type'))\n if r_project_id == str(project_id) and \\\n r_cert_type == str(cert_type):\n args = {\n 'domain_name': str(r.get('domain_name'))\n }\n stmt = query.SimpleStatement(\n CQL_DELETE_CERT,\n consistency_level=self._driver.consistency_level)\n self.session.execute(stmt, args)\n stmt = query.SimpleStatement(\n CQL_DELETE_CERT_STATUS,\n consistency_level=self._driver.consistency_level)\n self.session.execute(stmt, args)\n else:\n raise ValueError(\n \"No certificate found for: {0},\"\n \"type: {1}\".format(domain_name, cert_type))\n\n def update_certificate(self, domain_name, cert_type, flavor_id,\n cert_details):\n\n args = {\n 'domain_name': domain_name,\n 'cert_type': cert_type,\n 'flavor_id': flavor_id,\n 'cert_details': cert_details\n }\n stmt = query.SimpleStatement(\n CQL_UPDATE_CERT_DETAILS,\n consistency_level=self._driver.consistency_level)\n self.session.execute(stmt, args)\n\n try:\n provider_status = json.loads(list(cert_details.values())[0])\n cert_status = provider_status['extra_info']['status']\n self.insert_cert_status(domain_name, cert_status)\n except (IndexError, KeyError, ValueError) as e:\n # certs already existing in DB should have all\n # the necessary fields\n LOG.error(\n \"Unable to update cert_status because certificate \"\n \"details are in an inconsistent \"\n \"state: {0}: {1}\".format(cert_details, e))\n\n def insert_cert_status(self, domain_name, cert_status):\n cert_args = {\n 'domain_name': domain_name,\n 'status': cert_status\n }\n stmt = query.SimpleStatement(\n CQL_INSERT_CERT_STATUS,\n consistency_level=self._driver.consistency_level)\n self.session.execute(stmt, cert_args)\n\n def get_certs_by_status(self, status):\n\n LOG.info(\"Getting domains which have \"\n \"certificate in status : {0}\".format(status))\n args = {\n 'status': status\n }\n stmt = query.SimpleStatement(\n CQL_GET_CERTS_BY_STATUS,\n consistency_level=self._driver.consistency_level)\n resultset = self.session.execute(stmt, args)\n complete_results = list(resultset)\n\n return complete_results\n\n def get_certs_by_domain(self, domain_name, project_id=None,\n flavor_id=None,\n cert_type=None):\n\n LOG.info(\"Check if cert on '{0}' exists\".format(domain_name))\n args = {\n 'domain_name': domain_name.lower()\n }\n stmt = query.SimpleStatement(\n CQL_SEARCH_CERT_BY_DOMAIN,\n consistency_level=self._driver.consistency_level)\n resultset = self.session.execute(stmt, args)\n complete_results = list(resultset)\n certs = []\n if complete_results:\n for r in complete_results:\n r_project_id = str(r.get('project_id'))\n r_flavor_id = str(r.get('flavor_id'))\n r_cert_type = str(r.get('cert_type'))\n r_cert_details = {}\n # in case cert_details is None\n cert_details = r.get('cert_details', {}) or {}\n # Need to convert cassandra dict into real dict\n # And the value of cert_details is a string dict\n for key in cert_details:\n r_cert_details[key] = json.loads(cert_details[key])\n LOG.info(\n \"Certificate for domain: {0} with flavor_id: {1}, \"\n \"cert_details : {2} and cert_type: {3} present \"\n \"on project_id: {4}\".format(\n domain_name,\n r_flavor_id,\n r_cert_details,\n r_cert_type,\n r_project_id\n )\n )\n ssl_cert = ssl_certificate.SSLCertificate(\n domain_name=domain_name,\n flavor_id=r_flavor_id,\n cert_details=r_cert_details,\n cert_type=r_cert_type,\n project_id=r_project_id\n )\n\n certs.append(ssl_cert)\n\n non_none_attrs_gen = filterfalse(\n lambda x: list(x.values())[0] is None, [{'project_id': project_id},\n {'flavor_id': flavor_id},\n {'cert_type': cert_type}])\n non_none_attrs_list = list(non_none_attrs_gen)\n non_none_attrs_dict = {}\n\n if non_none_attrs_list:\n for attr in non_none_attrs_list:\n non_none_attrs_dict.update(attr)\n\n def argfilter(certificate):\n all_conditions = True\n if non_none_attrs_dict:\n for k, v in non_none_attrs_dict.items():\n if getattr(certificate, k) != v:\n all_conditions = False\n\n return all_conditions\n\n total_certs = [cert for cert in certs if argfilter(cert)]\n\n if len(total_certs) == 1:\n return total_certs[0]\n else:\n return total_certs\n\n def cert_already_exist(self, domain_name, comparing_cert_type,\n comparing_flavor_id, comparing_project_id):\n \"\"\"cert_already_exist\n\n Check if a cert with this domain name and type has already been\n created, or if the domain has been taken by other customers\n\n :param domain_name\n :param comparing_cert_type\n :param comparing_flavor_id\n :param comparing_project_id\n\n :returns Boolean if the cert with same type exists with another user.\n \"\"\"\n cert = self.get_certs_by_domain(\n domain_name=domain_name,\n cert_type=comparing_cert_type,\n flavor_id=comparing_flavor_id\n )\n\n if cert:\n return True\n else:\n return False\n","sub_path":"poppy/storage/cassandra/certificates.py","file_name":"certificates.py","file_ext":"py","file_size_in_byte":10945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"181705252","text":"#!/usr/bin/python\nimport sys\nfrom bs4 import BeautifulSoup\n\nalpha = \"abcdefghijklmnopqrstwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\nd={'Introduction':'1','Theory':'2','Objective':'3','Virtual Lab':'4','Manual':'5','Quiz':'6','Video':'7','Reference':'8'}\nfile_read = raw_input(\"Enter the name of the source file: \")\nsoup = BeautifulSoup(open(file_read))\ndiv = ''\nfile_name = ''\nflag = 0\nexp = ''\nbl = 0\nfor i in soup.title.string:\n\t\n\tif ((i == \":\") or (i == \")\")):\n\t\tbreak\n\n\tif( i in alpha and (bl == 0)):\n\t\tbl = 1 \n\tif(bl==1):\n\t\n\t\tfile_name += i\n\n\t\tif i == \"(\":\n\t\t\tflag = 1\n\t\t\tcontinue\n\t\n\t\tif flag == 0:\n\t\t\texp += i\n\n\t\tif flag == 1:\n\t\t\tdiv += i\n\n#fo = open(file_name+\").txt\", \"w\")\n#print \"Name of the file: \", fo.name\n#fo.write(\"Title\"app+\"\\n\");\n\nfile_w = raw_input(\"Enter the name of the destination file: \")\nf = open(file_w,\"r\")\nf_content = f.read()\nf.close()\n\nso = BeautifulSoup(f_content)\n\t\natt = ''+'experiment-article-section-'+d[div]+'-content'\n\ntagger = so.findAll('div', attrs={'id':att,'class':'content'})\ntag = tagger[0]\ntag.clear() \n\nexp_name = so.findAll('header', attrs={'class':'heading','id':'experiment-article-heading'})\nexp_n = exp_name[0]\nexp_n.clear()\nexp_n.insert(1,exp)\nif (d[div]=='4' or d[div]=='7'):\n\tcont = soup.findAll('div', attrs={'class': 'divLink'})\nelse:\n\tcont = soup.findAll('div', attrs={'class': 'divContent'})\ntag.insert(1,cont[0])\n\nf = open(file_w,\"w\")\nf.write(str(so))\nf.close()\n#fo.close()\n#-----------Created by----------------------#\n#-----Pranitha and Sourav-------------------#\n#-----------Conversion of html files to new UI(blue icon theme) format------#\n","sub_path":"lik-release-0.5.0/ui-1.0-toolkit/scripts/automated-scripts/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"505903840","text":"import pytest\n\n\n@pytest.yield_fixture()\ndef setup():\n print('Running before every Test')\n yield\n print('Running after every Test')\n\n\n@pytest.yield_fixture(scope='class')\ndef one_time_setup(browser, os_type,request):\n print('Running Once before all Tests')\n if browser == 'firefox':\n print('Running Firefox Browser')\n value = 10\n elif browser == 'Chrome':\n print('Running Chrome Browser')\n value = 20\n if request.cls is not None:\n request.cls.value = value\n yield value\n print('Running Once after all Tests')\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--browser\")\n parser.addoption(\"--osType\", help=\"Please specify os Type\")\n\n\n@pytest.fixture(scope=\"session\")\ndef browser(request):\n return request.config.getoption(\"--browser\")\n\n\n@pytest.fixture(scope=\"session\")\ndef os_type(request):\n return request.config.getoption(\"--osType\")\n\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"341185819","text":"#!/usr/bin/env python\n\"\"\" Frequent Words extractor [Productivity Tool]\nScan the given directory, list all the words appear in certain type of files\nGenerator a list of that words\nMainly for vim completion\n@Author: Zhifeng(fengyu05@gmail.com)\n\"\"\"\n\nusage = \"usage: %prog [--min_length=5] [--min_fre=5] filetype directory\"\n\nimport sys\nimport re\nfrom optparse import OptionParser\nfrom os import listdir\nfrom os.path import isdir,islink,join\n\nparser = OptionParser(usage)\nparser.add_option(\"-l\", \"--min_length\", dest=\"MIN_LENGTH\",\n type=\"int\",\n default = 7,\n help=\"min length that concerns\")\n\nparser.add_option(\"-f\", \"--min_freq\", dest=\"MIN_FREQUENT\",\n type=\"int\",\n default = 7,\n help=\"min frequent that concerns\")\n\nparser.add_option(\"-d\", \"--max_deep\", dest=\"MAX_DEEP\",\n type=\"int\",\n default = 20,\n help=\"max deep of directory\")\n\nwordDict = dict()\n\ndef checkWords(filetype, directory, options, deep):\n if deep > options.MAX_DEEP:\n return\n for file in listdir(directory):\n fileFullPath = join(directory, file)\n if isdir(fileFullPath) and not islink(fileFullPath):\n checkWords(filetype, fileFullPath, options, deep + 1)\n else:\n if fileFullPath.endswith(\".\" + filetype):\n content = open(fileFullPath, \"r\").readlines()\n for line in content:\n words = re.split('\\W+', line)\n for word in words:\n if len(word) >= options.MIN_LENGTH:\n if word in wordDict:\n wordDict[word] = wordDict[word] + 1\n else:\n wordDict[word] = 1\n\ndef genWords(options):\n freqWords = [word for word, frequent in wordDict.iteritems() \n if frequent >= options.MIN_FREQUENT]\n for word in freqWords:\n print (word)\n\ndef main():\n (options, argv) = parser.parse_args()\n if len(argv) < 2:\n parser.error(\"incorrect number of arguments\")\n sys.exit(1)\n filetype = argv[0]\n directory = argv[1]\n checkWords(filetype, directory, options, 0)\n genWords(options)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"utt/freqword_extractor.py","file_name":"freqword_extractor.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"454552729","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom listings.models import Listing\nfrom realtors.models import Realtor\n\ndef index(request):\n listings=Listing.objects.order_by('list_date').filter(is_published=True)[:3]\n context = {\n 'listings':listings\n }\n return render(request,'pages/index.html', context)\n\n\ndef about(request):\n\n realtors=Realtor.objects.order_by('-hire_date')\n mpv_realtor=Realtor.objects.all().filter(is_mvp=True)\n context={\n 'realtors':realtors,\n 'mpv_realtor': mpv_realtor\n }\n \n return render(request,'pages/about.html', context)\n\n\n","sub_path":"pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"9844837","text":"import cv2, enum, time, os, math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import rankdata\n\n#############################################################################\n# User variables (static)\n#############################################################################\nclass PltFunc :\n def plot1row2Img(img1, img2):\n fig = plt.figure()\n fig.add_subplot(1,2,1)\n plt.imshow(img1)\n fig.add_subplot(1,2,2)\n plt.imshow(img2)\n plt.show() \n\nclass ALG(enum.Enum):\n MAE = 0\n MSE = 1\n RMSE = 2\n PSNR = 3\n SSIM = 4\n P_MSE = 5 \n \nclass ImgCompare :\n def cvt256gray(img) :\n img = cv2.resize(img, (256,256), interpolation=cv2.INTER_LINEAR )\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n return img\n \n def mae(img1, img2):\n img1 = ImgCompare.cvt256gray(img1)\n img2 = ImgCompare.cvt256gray(img2)\n err = np.sum(abs((img1.astype(\"float\") - img2.astype(\"float\"))))\n err /= float(img1.shape[0] * img1.shape[1])\n return err\n\n def mse(img1, img2):\n img1 = ImgCompare.cvt256gray(img1)\n img2 = ImgCompare.cvt256gray(img2)\n err = np.sum((img1.astype(\"float\") - img2.astype(\"float\")) ** 2)\n err /= float(img1.shape[0] * img1.shape[1])\n return err\n\n def p_mse(img1, img2):\n img1 = ImgCompare.cvt256gray(img1)\n img2 = ImgCompare.cvt256gray(img2)\n maxval = np.max((img1.astype(\"float\") - img2.astype(\"float\")) ** 2)\n err = np.sum((img1.astype(\"float\") - img2.astype(\"float\")) ** 2)\n err /= float(img1.shape[0] * img1.shape[1])\n return err/maxval\n\n def rmse(img1, img2):\n err = ImgCompare.mse(img1, img2)\n return math.sqrt(err)\n\n def psnr(img1, img2):\n _rmse = ImgCompare.rmse(img1,img2)\n if _rmse == 0:\n return 100\n PIXEL_MAX = 255.0\n return 20 * math.log10(PIXEL_MAX / _rmse)\n\n def percent_ssim(img1, img2) :\n from skimage.measure import compare_ssim as ssim\n #img1 = cv2.resize(img1, (256,256), interpolation=cv2.INTER_LINEAR )\n #img2 = cv2.resize(img2, (256,256), interpolation=cv2.INTER_LINEAR )\n s = ssim(img1,img2, multichannel = True)\n return s\n\nclass ViewGuide :\n def mainLoop(rBase, tImgPath) :\n start = time.time()\n tempPath = rBase\n rImageList = os.listdir(tempPath)\n rImageList.sort()\n #print(tempPath+rImageList[0])\n tImg = cv2.imread(tImgPath)\n #plt.imshow(tImg)\n #plt.show()\n tImg = cv2.resize(tImg, (256,256), interpolation=cv2.INTER_LINEAR )\n tImg = cv2.cvtColor(tImg, cv2.COLOR_BGR2RGB)\n rImgs = []\n ssimArr = []\n psnrArr = []\n pmseArr = []\n \n votingArr = [[1,3,9],[0,4,2,10],[1,5,11],[0,4,6,12],[1,3,5,7],[2,4,8,13],[3,7,14],[4,6,8,15],[5,7,16],[0,10,12,17],[1,9,11,18],[2,10,13,19],[3,9,14,20],[5,11,16,22],[6,12,15,23],[7,14,16,24],[8,13,15,25],[9,18,20],[10,17,19,21],[11,18,22],[12,17,21,23],[18,20,22,24],[13,19,21,25],[14,20,24],[15,21,23,25],[16,22,24]]\n \n for filename in rImageList :\n rImg = cv2.imread(tempPath+filename)\n plt.imshow(rImg)\n plt.show()\n rImg = rImg[150:1200, 300:1600]\n rImg = cv2.resize(rImg, (256,256), interpolation=cv2.INTER_LINEAR )\n rImg = cv2.cvtColor(rImg, cv2.COLOR_BGR2RGB)\n rImgs.append(rImg)\n #PltFunc.plot1row2Img(rImg, tImg)\n ssim = ImgCompare.percent_ssim(rImg, tImg)\n psnr = ImgCompare.psnr(rImg, tImg)\n pmse = ImgCompare.p_mse(rImg, tImg)\n #print(filename)\n #print( \"ssim : %.2f , psnr : %.2f, pmse : %.2f\" %(ssim*100.0, psnr,pmse*100.0) )\n \n ssimArr.append(ssim)\n psnrArr.append(psnr)\n pmseArr.append(pmse)\n \n \n print(\"calc Total time : \", (time.time() - start), 'sec')\n print(ssimArr)\n \n ssimRank = rankdata(ssimArr)\n psnrRank = rankdata(psnrArr)\n pmseRank = rankdata(pmseArr)\n votingArrSSIM = [0 for _ in range(26)]\n votingArrPSNR = [0 for _ in range(26)]\n votingArrPMSE = [0 for _ in range(26)]\n votingArrSum = [0 for _ in range(26)]\n \n for i in range(len(ssimRank)) :\n votingArrSSIM[i] += (ssimRank[i]-1)\n votingArrPSNR[i] += (psnrRank[i]-1)\n votingArrPMSE[i] += (pmseRank[i]-1)\n \n for j in votingArr[i] :\n votingArrSSIM[j] += (ssimRank[i]-1)/2\n votingArrPSNR[j] += (psnrRank[i]-1)/2\n votingArrPMSE[j] += (pmseRank[i]-1)/2\n \n for i in range(len(votingArrSSIM)) :\n votingArrSum[i]+=votingArrSSIM[i]\n votingArrSum[i]+=votingArrPSNR[i]\n votingArrSum[i]+=votingArrPMSE[i]\n \n votingSsimRank = rankdata(votingArrSSIM)\n votingPsnrRank = rankdata(votingArrPSNR)\n votingPmseRank = rankdata(votingArrPMSE)\n votingSumRank = rankdata(votingArrSum)\n \n #print (votingArrSSIM)\n for i in range(len(rImgs)):\n PltFunc.plot1row2Img(rImgs[i], tImg)\n print( \"ssim : %.2f , psnr : %.2f, pmse : %.2f\" %(ssimArr[i]*100.0, psnrArr[i],pmseArr[i]*100.0) )\n print( \"voting ssim : %.2f [%d]\" % (votingArrSSIM[i], votingSsimRank[i]))\n print( \"voting psnr : %.2f [%d]\" % (votingArrPSNR[i], votingPsnrRank[i]))\n print( \"voting pmse : %.2f [%d]\" % (votingArrPMSE[i], votingPmseRank[i]))\n print( \"voting sum : %.2f [%d]\" % (votingArrSum[i], votingSumRank[i]))\n \n print(\"==================================================================================\")\n print(\"Voting SSIM\")\n print(\"==================================================================================\")\n for j in range(0,10) :\n for i in range(len(votingSsimRank)) :\n if (votingSsimRank[i] == (26-j)) :\n PltFunc.plot1row2Img(rImgs[i], tImg)\n print( \"ssim : %.2f , psnr : %.2f, pmse : %.2f\" %(ssimArr[i]*100.0, psnrArr[i],pmseArr[i]*100.0) )\n print( \"voting ssim : %.2f [%d]\" % (votingArrSSIM[i], votingSsimRank[i]))\n print( \"voting psnr : %.2f [%d]\" % (votingArrPSNR[i], votingPsnrRank[i]))\n print( \"voting pmse : %.2f [%d]\" % (votingArrPMSE[i], votingPmseRank[i]))\n print( \"voting sum : %.2f [%d]\" % (votingArrSum[i], votingSumRank[i]))\n \n print(\"==================================================================================\")\n print(\"Voting PSNR\")\n print(\"==================================================================================\")\n for j in range(0,10) :\n for i in range(len(votingPsnrRank)) :\n if (votingPsnrRank[i] == (26-j)) :\n PltFunc.plot1row2Img(rImgs[i], tImg)\n print( \"ssim : %.2f , psnr : %.2f, pmse : %.2f\" %(ssimArr[i]*100.0, psnrArr[i],pmseArr[i]*100.0) )\n print( \"voting ssim : %.2f [%d]\" % (votingArrSSIM[i], votingSsimRank[i]))\n print( \"voting psnr : %.2f [%d]\" % (votingArrPSNR[i], votingPsnrRank[i]))\n print( \"voting pmse : %.2f [%d]\" % (votingArrPMSE[i], votingPmseRank[i]))\n print( \"voting sum : %.2f [%d]\" % (votingArrSum[i], votingSumRank[i]))\n \n print(\"==================================================================================\")\n print(\"Voting PMSE\")\n print(\"==================================================================================\")\n for j in range(0,10) :\n for i in range(len(votingPmseRank)) :\n if (votingPmseRank[i] == (26-j)) :\n PltFunc.plot1row2Img(rImgs[i], tImg)\n print( \"ssim : %.2f , psnr : %.2f, pmse : %.2f\" %(ssimArr[i]*100.0, psnrArr[i],pmseArr[i]*100.0) )\n print( \"voting ssim : %.2f [%d]\" % (votingArrSSIM[i], votingSsimRank[i]))\n print( \"voting psnr : %.2f [%d]\" % (votingArrPSNR[i], votingPsnrRank[i]))\n print( \"voting pmse : %.2f [%d]\" % (votingArrPMSE[i], votingPmseRank[i]))\n print( \"voting sum : %.2f [%d]\" % (votingArrSum[i], votingSumRank[i]))\n \n print(\"==================================================================================\")\n print(\"Voting SUM\")\n print(\"==================================================================================\")\n for j in range(0,10) :\n for i in range(len(votingSumRank)) :\n if (votingSumRank[i] == (26-j)) :\n PltFunc.plot1row2Img(rImgs[i], tImg)\n print( \"ssim : %.2f , psnr : %.2f, pmse : %.2f\" %(ssimArr[i]*100.0, psnrArr[i],pmseArr[i]*100.0) )\n print( \"voting ssim : %.2f [%d]\" % (votingArrSSIM[i], votingSsimRank[i]))\n print( \"voting psnr : %.2f [%d]\" % (votingArrPSNR[i], votingPsnrRank[i]))\n print( \"voting pmse : %.2f [%d]\" % (votingArrPMSE[i], votingPmseRank[i]))\n print( \"voting sum : %.2f [%d]\" % (votingArrSum[i], votingSumRank[i]))\n \n \nif __name__ == \"__main__\":\n renderImageBasePath = \"./renderimage/bonsai1/1/\"\n targetImageBasePath = \"./targetimage/bonsai1/1/\"\n targetImageFullPath = targetImageBasePath + 'bonsai1.jpg'\n \n ViewGuide.mainLoop(renderImageBasePath, targetImageFullPath)\n\n \n \n","sub_path":"SDDVR/viewpointGuiding/viewpointVoting_bonsai.py","file_name":"viewpointVoting_bonsai.py","file_ext":"py","file_size_in_byte":9733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"628009076","text":"import numpy as np\nimport os\nimport utilities_func as uf\nimport feat_analysis2 as fa\nimport pandas\nimport loadconfig\nimport ConfigParser\n#np.random.seed(24)\n\n#load configuration file\nconfig = loadconfig.load()\ncfg = ConfigParser.ConfigParser()\ncfg.read(config)\n\nSEQ_LENGTH = cfg.getint('preprocessing', 'sequence_length')\nSEQ_OVERLAP = cfg.getfloat('preprocessing', 'sequence_overlap')\n\nSOUND_FOLDER_PRETRAINING_T = cfg.get('preprocessing', 'input_audio_folder_PRETRAINING_t')\nANNOTATION_FOLDER_PRETRAINING_T = cfg.get('preprocessing', 'input_annotation_folder_PRETRAINING_t')\nOUTPUT_PREDICTORS_MATRIX_PRETRAINING_T = cfg.get('preprocessing', 'output_predictors_matrix_PRETRAINING_t')\nOUTPUT_TARGET_MATRIX_PRETRAINING_T = cfg.get('preprocessing', 'output_target_matrix_PRETRAINING_t')\n\nSOUND_FOLDER_PRETRAINING_V = cfg.get('preprocessing', 'input_audio_folder_PRETRAINING_v')\nANNOTATION_FOLDER_PRETRAINING_V = cfg.get('preprocessing', 'input_annotation_folder_PRETRAINING_v')\nOUTPUT_PREDICTORS_MATRIX_PRETRAINING_V = cfg.get('preprocessing', 'output_predictors_matrix_PRETRAINING_v')\nOUTPUT_TARGET_MATRIX_PRETRAINING_V = cfg.get('preprocessing', 'output_target_matrix_PRETRAINING_v')\n\nPREDICTORS_TOBEMERGED = cfg.get('preprocessing', 'output_predictors_matrix_t')\nTARGET_TOBEMERGED = cfg.get('preprocessing', 'output_target_matrix_t')\n\nOUTPUT_MERGED_PREDICTORS = cfg.get('preprocessing', 'output_predictors_matrix_merged')\nOUTPUT_MERGED_TARGET = cfg.get('preprocessing', 'output_target_matrix_merged')\n\nbounds_dict = np.load('../dataset/OMG2017/bounds_dict.npy')\nbounds_dict = bounds_dict.item()\n\nTARGET_DELAY = cfg.getint('preprocessing', 'target_delay')\nSR = cfg.getint('sampling', 'sr')\nHOP_SIZE = cfg.getint('stft', 'hop_size')\n\nfps = 25 #annotations per second\nhop_annotation = SR /fps\nframes_per_annotation = hop_annotation/float(HOP_SIZE)\n#frames_per_annotation = int(np.round(frames_per_annotation))\n'''\nreminder = frames_per_annotation % 1\n\nif reminder != 0.:\n raise ValueError('Hop size must be a divider of annotation hop (640)')\nelse:\n frames_per_annotation = int(frames_per_annotation)\n'''\nframes_delay = int(TARGET_DELAY * frames_per_annotation)\n\n\ndef merge_matrices(dataset1_path, dataset2_path, output_path):\n data1 = np.load(dataset1_path)\n data2 = np.load(dataset2_path)\n out_data = np.vstack((data1, data2))\n np.save(output_path, out_data)\n print(\"Successfully merged: \" + dataset1_path + ' AND ' + dataset2_path + ' INTO ' + output_path)\n print('Shape of merged matrix: ' + str(out_data.shape))\n\ndef preprocess_datapoint(input_sound, input_annotation):\n '''\n generate predictors (stft) and target (valence sequence)\n of one sound file from the OMG dataset\n '''\n name = input_sound.split('/')[-1].split('.')[0]\n start = bounds_dict[name]['start']\n end = bounds_dict[name]['end']\n startframe = int(np.round(start * fps))\n endframe = int(np.round(end*fps))\n startsamps = int(np.round(start*SR))\n endsamps = int(np.round(end*SR))\n\n sr, samples = uf.wavread(input_sound) #read audio\n samples = samples[startsamps:endsamps]\n e_samples = uf.preemphasis(samples, sr) #apply preemphasis\n feats = fa.extract_features(e_samples) #extract features\n annotation = np.load(input_annotation) #read annotations\n annotation = annotation[startframe:endframe]\n annotated_frames = int(len(annotation) * frames_per_annotation)\n feats = feats[:annotated_frames] #discard non annotated final frames\n annotation = annotation[TARGET_DELAY:] #shift back annotations by target_delay\n feats2 = feats[:-frames_delay]\n\n return feats, annotation\n\ndef segment_datapoint(features, annotation, sequence_length, sequence_overlap):\n '''\n segment features and annotations of one long audio file\n into smaller matrices of length \"sequence_length\"\n and overlapped by \"sequence_overlap\"\n '''\n step = sequence_length*sequence_overlap #segmentation overlap step\n num_datapoints = int(len(annotation) / step)\n pointer = np.arange(0,len(annotation), step, dtype='int') #initail positions of segments\n predictors = []\n target = []\n #slice arrays and append datapoints to vectors\n for start in pointer:\n start_annotation = start\n stop_annotation = start + sequence_length\n start_features = int(start_annotation * frames_per_annotation)\n stop_features = int(stop_annotation * frames_per_annotation)\n #print start_annotation, stop_annotation, start_features, stop_features\n if stop_annotation <= len(annotation):\n temp_predictors = features[start_features:stop_features]\n temp_target = annotation[start_annotation:stop_annotation]\n predictors.append(temp_predictors)\n target.append(temp_target)\n #target.append(np.mean(temp_target))\n else: #last datapoint has a different overlap\n temp_predictors = features[-int(sequence_length*frames_per_annotation):]\n temp_target = annotation[-sequence_length:]\n predictors.append(temp_predictors)\n target.append(temp_target)\n #target.append(np.mean(temp_target))\n predictors = np.array(predictors)\n target = np.array(target)\n\n return predictors, target\n\n\ndef preprocess_dataset(sound_folder, annotation_folder):\n '''\n build dataset numpy matrices:\n -predictors: contatining audio features\n -target: contatining correspective valence annotations\n both are NOT normalized\n datapoints order is randomly scrambled\n '''\n predictors = []\n target = []\n annotations = os.listdir(annotation_folder)\n #filtered_list = filter_items(annotations, target_subject, target_story)\n num_sounds = len(annotations)\n #process all files in folders\n index = 0\n for datapoint in annotations:\n print(datapoint)\n annotation_file = annotation_folder + '/' + datapoint\n name = datapoint.split('.')[0]\n sound_file = sound_folder + '/' + name +\".mp4.wav\" #get correspective sound\n long_predictors, long_target = preprocess_datapoint(sound_file, annotation_file) #compute features\n cut_predictors, cut_target = segment_datapoint(long_predictors, long_target, #slice feature maps\n SEQ_LENGTH, SEQ_OVERLAP)\n\n predictors.append(cut_predictors)\n target.append(cut_target)\n perc_progress = (index * 100) / num_sounds\n index += 1\n print(\"processed files: \" + str(index) + \" over \" + str(num_sounds) + \" | progress: \" + str(perc_progress) + \"%\")\n\n predictors = np.concatenate(predictors, axis=0) #reshape arrays\n target = np.concatenate(target, axis=0)\n #scramble datapoints order\n shuffled_predictors = []\n shuffled_target = []\n num_datapoints = target.shape[0]\n random_indices = range(num_datapoints)\n np.random.shuffle(random_indices)\n for i in random_indices:\n shuffled_predictors.append(predictors[i])\n shuffled_target.append(target[i])\n shuffled_predictors = np.array(shuffled_predictors)\n shuffled_target = np.array(shuffled_target)\n\n return shuffled_predictors, shuffled_target\n\ndef build_matrices(output_predictors_matrix, output_target_matrix, sound_folder, annotation_folder):\n '''\n build matrices and save numpy files\n '''\n predictors, target = preprocess_dataset(sound_folder, annotation_folder)\n np.save(output_predictors_matrix, predictors)\n np.save(output_target_matrix, target)\n print(\"Matrices saved succesfully\")\n print('predictors shape: ' + str(predictors.shape))\n print('target shape: ' + str(target.shape))\n\ndef crossval_preprocessing(target_subject_t, target_story_t, target_subject_v, target_story_v):\n ''' build matrices for one defined crossvalidation instalce'''\n #set output matrices as default temp crossvalidation ones\n OUTPUT_PREDICTORS_MATRIX_T = '../dataset/matrices/crossval_temp_predictors_t.npy'\n OUTPUT_TARGET_MATRIX_T = '../dataset/matrices/crossval_temp_target_t.npy'\n OUTPUT_PREDICTORS_MATRIX_V = '../dataset/matrices/crossval_temp_predictors_v.npy'\n OUTPUT_TARGET_MATRIX_V = '../dataset/matrices/crossval_temp_target_v.npy'\n #substitute config target subject and stories with the ones of the experiment\n TARGET_SUBJECT_T = target_subject_t\n TARGET_STORY_T = target_story_t\n TARGET_SUBJECT_V = target_subject_v\n TARGET_STORY_V = target_story_v\n\n #build training matrix\n build_matrices(OUTPUT_PREDICTORS_MATRIX_T, OUTPUT_TARGET_MATRIX_T,\n SOUND_FOLDER_T, ANNOTATION_FOLDER_T, TARGET_SUBJECT_T, TARGET_STORY_T)\n\n\nif __name__ == '__main__':\n '''\n build training and validation matrices\n '''\n build_matrices(OUTPUT_PREDICTORS_MATRIX_PRETRAINING_T, OUTPUT_TARGET_MATRIX_PRETRAINING_T, SOUND_FOLDER_PRETRAINING_T, ANNOTATION_FOLDER_PRETRAINING_T)\n build_matrices(OUTPUT_PREDICTORS_MATRIX_PRETRAINING_V, OUTPUT_TARGET_MATRIX_PRETRAINING_V, SOUND_FOLDER_PRETRAINING_V, ANNOTATION_FOLDER_PRETRAINING_V)\n\n #merge_matrices(OUTPUT_PREDICTORS_MATRIX_2017, PREDICTORS_TOBEMERGED, OUTPUT_MERGED_PREDICTORS)\n #merge_matrices(OUTPUT_TARGET_MATRIX_2017, TARGET_TOBEMERGED, OUTPUT_MERGED_TARGET)\n","sub_path":"src/OMG_challenge/preprocessing_PRETRAININGdataset.py","file_name":"preprocessing_PRETRAININGdataset.py","file_ext":"py","file_size_in_byte":9233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"178798368","text":"from typing import List, Optional\n\nfrom QCompute import UnrollCircuitModule, CompressGateModule\n\nfrom QCompute.OpenModule import ModuleImplement\nfrom QCompute.QPlatform import BackendName, Error\n\n\ndef filterModule(backendName: Optional[str], moduleList: List['ModuleImplement']) \\\n -> List['ModuleImplement']:\n if backendName is None:\n return moduleList\n\n if backendName in [\n BackendName.LocalBaiduSim2.value,\n \n ]:\n return _filterSimulator(backendName, moduleList)\n \n else:\n return moduleList\n\n\ndef _filterSimulator(backendName: str, moduleList: List['ModuleImplement']) -> List['ModuleImplement']:\n unrollCircuitModule = None # type: Optional[UnrollCircuitModule]\n compressGateModule = None # type: Optional[CompressGateModule]\n ret = [] # type: List['ModuleImplement']\n for module in moduleList:\n \n if module.__class__.__name__ == 'UnrollCircuitModule':\n unrollCircuitModule = module\n elif module.__class__.__name__ == 'CompressGateModule':\n compressGateModule = module\n elif not module.disable:\n ret.append(module)\n if unrollCircuitModule is not None:\n if not unrollCircuitModule.disable:\n ret.append(unrollCircuitModule)\n else:\n ret.append(UnrollCircuitModule())\n if backendName not in [\n \n ]:\n if compressGateModule is not None:\n if not compressGateModule.disable:\n ret.append(compressGateModule)\n else:\n ret.append(CompressGateModule())\n return ret\n\n\n\n","sub_path":"QCompute/QPlatform/Processor/ModuleFilter.py","file_name":"ModuleFilter.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"524392632","text":"import os\nimport numpy as np\nfrom numpy import *\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom matplotlib import pyplot\nimport sys, math, joblib, gc\nfrom sklearn import preprocessing\nimport pickle\nfrom sklearn import metrics\nimport transform_helper as Transform \nfrom Model import Model\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, BatchNormalization, Activation\nfrom keras.optimizers import Adam\nimport tensorflow as tf\n\n\ndef repeat_softmax(X, y, preBuilt=False, model=None):\n\tX_train = X \n\ty_train = y\n\t\n\tif not preBuilt:\n\t\tX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=1)\n\t\t\n\t\tmodel = LogisticRegression(C=0.040980805223454236, tol=0.0037189066625450827, penalty='l2', max_iter=100,\n\t\t\t\t\t\t\t solver='newton-cg', warm_start=True)\n\t\n\t\n\ttrees = ExtraTreesClassifier(random_state=1)\n\ttrees.fit(X_train, y_train)\n\tselector = SelectFromModel(trees, prefit=True, threshold=-np.inf)\n\t\t\n\t#NEW X_TRAIN FROM SELECTED FEATURES:\n\tX_train = selector.transform(X_train)\n\n\t#standardize data\n\tX_train = preprocessing.StandardScaler().fit_transform(X_train)\n\t\n\tmodel.fit(X_train, y_train)\n\n\treturn model\n\ndef repeat_neural_network(X, y, preBuilt=False, model=None):\n\tX_train = X \n\ty_train = y\n\t\n\tif not preBuilt:\n\t\tX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=1)\n\t\t\n\t\tcols = X.shape[1]\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(100,input_dim=cols))\n\t\tmodel.add(Dropout(0.4))\n\t\tmodel.add(BatchNormalization())\n\t\tmodel.add(Activation('relu'))\n\t\tmodel.add(Dense(100))\n\t\tmodel.add(Dropout(0.4))\n\t\tmodel.add(BatchNormalization())\n\t\tmodel.add(Activation('relu'))\n\t\tmodel.add(Dense(1, activation='sigmoid'))\n\t\tmodel.compile(optimizer=Adam(lr=0.01), loss=\"binary_crossentropy\", metrics=[tf.keras.metrics.AUC()])\n\t\t\n\ttrees = ExtraTreesClassifier(random_state=1)\n\ttrees.fit(X_train, y_train)\n\tselector = SelectFromModel(trees, prefit=True, threshold=-np.inf)\n\t\n\t#NEW X_TRAIN FROM SELECTED FEATURES:\n\tX_train = selector.transform(X_train)\n\t\n\t#standardize data\n\tX_train = preprocessing.StandardScaler().fit_transform(X_train)\n\t\n\tmodel.fit(X_train, y_train)\n\t\n\treturn model\n\n\ndef evaluate_model(X, y, model, nn):\n\tX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=1)\n\t\n\ttrees = ExtraTreesClassifier(random_state=1)\n\ttrees.fit(X_train, y_train)\n\t\n\tselector = SelectFromModel(trees, prefit=True, threshold=-np.inf)\n\t\n\tX_test = selector.transform(X_test)\n\tX_test = preprocessing.StandardScaler().fit_transform(X_test) \n\t\n\tyhat = model.predict_proba(X_test)\n\tyhat = yhat[:, 1] if nn is False else yhat\n\t\n\tscore = metrics.roc_auc_score(y_test, yhat, average=None)\n\t\n\tprint(f\"\\nAUC Score: {score}\\n\")\n\t\n\nif __name__ == \"__main__\":\n\t\n\tMODEL_NUM = 1 # 1 - softmax, 2 - neural network\n\tTRAIN_CHUNKS = 0 # 0 - False, 1 - True\n\t\n\tmodel_dict = {\n\t\t1: 'softmax',\n\t\t2: 'neural_network',\n\t\t3: 'random_forest'\n\t}\n\t\n\tif len(sys.argv) > 2:\n\t\tMODEL_NUM = int(sys.argv[1])\n\t\tTRAIN_CHUNKS = int(sys.argv[2])\n\t\t\n\telif len(sys.argv) > 1:\n\t\tMODEL_NUM = int(sys.argv[1])\n\t\t\n\tdata_path = f'data{os.sep}'\n\ttrain_path = \"\"\n\t\n\ttrain_path = data_path + 'train.csv'\n\t\n\tdtypes = Transform.get_dtypes()\n\t\n\tprint('Reading from csv...')\n\t\n\ttrain_data = pd.read_csv(train_path, nrows=100000, dtype=dtypes)\n\t\n\tprint('Done\\n')\n\n\tytr = train_data[\"HasDetections\"].to_numpy()\n\t\n\tprint('Transforming Dataframe...')\n\t\n\ttrain_data = Transform.transform_dataframe(train_data)\n\t\n\ttrain_data = Transform.transform_categorical(train_data) # perform one-hot encoding on categorical columns\n\n\t# ONLY NEED FOR FULL NEURAL NETWORK \n\t# if os.path.isfile(\"test_submission2.csv\"):\n\t# \ttest_path = \"test_submission2.csv\"\n\t# \ttest_data = pd.read_csv(test_path, nrows=1)\n\t# \ttrain_data = Transform.make_matching_invert(train_data, test_data)\n\t\n\tlabels = list(train_data.columns)\n\t\n\ttmp_df = pd.DataFrame(columns=labels)\n\ttmp_df.to_csv('final_train.csv', index=False)\n\n\ttrain_data = train_data.drop(['MachineIdentifier', 'HasDetections'], axis=1) # drop unnecessary columns\n\t\n\tprint('Done\\n')\n\t\n\tprint('Training model...')\n\t\n\tselection = None\n\tmodel = None\n\t\n\tif TRAIN_CHUNKS == 1:\n\t\tXtr = train_data.to_numpy(dtype='float64')\n\t\tXtr = np.nan_to_num(Xtr)\n\t\t\n\t\t#Xtr_evaluator = np.copy(Xtr)\n\t\t\n\t\ttrain_chunks = Transform.split_dataframe(train_data, chunk_size=100000) # 100000\n\t\tytr_chunks = Transform.split_dataframe(ytr, chunk_size=100000)\n\t\t\n\t\tdel train_data \n\t\tgc.collect()\n\t\t\n\t\tlist_of_chunks = []\n\t\t\n\t\tfor i,chunk in enumerate(train_chunks):\n\t\t\tprint(f'Chunk #{i}')\n\t\t\tXtr = chunk.to_numpy(dtype='float64')\n\t\t\tXtr = np.nan_to_num(Xtr)\n \n\t\t\tif MODEL_NUM == 2:\n\t\t\t\tif i != 0:\n\t\t\t\t\tmodel = tf.keras.models.load_model('chunk_model_tf')\n\t \n\t\t\t\tmodel = repeat_neural_network(Xtr, ytr_chunks[i], i>0, model)\n\t\n\t\t\t\tmodel.save('chunk_model_tf', save_format='tf')\n\n\t\t\telse:\n\t\t\t\tmodel = repeat_softmax(Xtr, ytr_chunks[i], i > 0, model)\n \n\t\tdel Xtr, train_chunks, ytr_chunks\n\t\tgc.collect()\n \n\t\tprint('\\nEvaluating Model...')\n\t\t\n\t\t#evaluate_model(Xtr_evaluator, ytr, model, MODEL_NUM==2)\n\t\t \n\t\t\t\n\telse:\n\t\tXtr = train_data.to_numpy(dtype='float64')\n\t\tXtr = np.nan_to_num(Xtr)\n\t\n\t\tmodel = Model(Xtr, ytr, labels, MODEL_NUM)\n\t\t\n\t\tmodel = model.train_model()\n\t\n\t\n\tprint('Done\\n')\n\t\n\t# save the model to disk\n\tmodel_name = model_dict.get(MODEL_NUM)\n\t\n\tprint('Saving....\\n')\n\t\n\tif MODEL_NUM == 2:\n\t\tmodel_json = model.to_json()\n\t\n\t\twith open(f'saved_models{os.sep}model.json', 'w') as json_file:\n\t\t\tjson_file.write(model_json)\n\n\t\t# serialize the weights\n\t\tmodel.save_weights(f'saved_models{os.sep}model.h5')\n\t\t\n\telse:\n\t\tfilename = f'saved_models{os.sep}model.sav'\n\t\tjoblib.dump(model, filename)\n\t\n\tf = open('model_num.pckl', 'wb')\n\tpickle.dump(MODEL_NUM, f)\n\tf.close()\n\t\n\tprint(f'{model_name} model saved')\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":6013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"3519646","text":"\"\"\"In this file, we test to ensure that the output of\ncheck_syntax is as expected.\n\nWe also test to ensure that check_syntax does not accidently\nchange any existing error handling settings.\n\n\"\"\"\n\nimport friendly_traceback as friendly\n\n\ndef test_check_syntax():\n # set-up\n bad_code_syntax = \"True = 1\"\n bad_code_exec = \"a = b\" # Not a syntax error, but a NameError\n good_code = \"c = 1\"\n\n friendly.set_stream(\"capture\")\n original_verbosity = friendly.get_verbosity()\n installed = friendly.is_installed()\n # ----- end of set-up\n\n # When a SyntaxError is raised, check_syntax returns False\n\n assert not friendly.advanced_check_syntax(source=bad_code_syntax)\n result = friendly.get_output() # content is flushed\n assert \"Python exception\" in result\n assert \"SyntaxError\" in result\n\n assert not friendly.get_output() # confirm that content was flushed\n\n # When no SyntaxError is raised, check_syntax returns a tuple\n # containing a code object and a file name\n assert friendly.advanced_check_syntax(source=bad_code_exec)\n assert friendly.advanced_check_syntax(source=good_code)\n assert not friendly.get_output() # no new exceptions recorded\n\n try:\n exec(bad_code_syntax, {})\n except Exception:\n assert not friendly.get_output()\n\n # When friendly-traceback is not installed, a call to check_syntax\n # will end with verbosity set to 0, which corresponds to normal Python\n # tracebacks\n friendly.uninstall()\n friendly.advanced_check_syntax(source=bad_code_syntax)\n assert friendly.get_verbosity() == 0\n friendly.advanced_check_syntax(source=bad_code_syntax, verbosity=4)\n assert friendly.get_verbosity() == 0\n\n # When friendly-traceback is \"installed\", a call to check_syntax\n # leaves its verbosity unchanged.\n friendly.install(redirect=\"capture\")\n\n friendly.set_verbosity(3)\n friendly.advanced_check_syntax(source=bad_code_syntax)\n assert friendly.get_verbosity() == 3\n friendly.advanced_check_syntax(source=bad_code_syntax, verbosity=4)\n assert friendly.get_verbosity() == 3\n\n # A call to advanced_code_syntax, with a language specified as an argument\n # should leave the previous language unchanged.\n\n friendly.set_lang(\"en\")\n assert not friendly.advanced_check_syntax(source=bad_code_syntax, lang=\"fr\")\n result = friendly.get_output()\n assert \"Exception Python\" in result # French heading\n assert friendly.get_lang() == \"en\"\n\n # Clean up and restore for other tests\n friendly.get_output()\n friendly.set_stream(None)\n if installed:\n friendly.uninstall()\n friendly.set_verbosity(original_verbosity)\n\n\nif __name__ == \"__main__\":\n test_check_syntax()\n print(\"Success!\")\n","sub_path":"tests/unit/test_check_syntax.py","file_name":"test_check_syntax.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"493786579","text":"import jieba\nimport re\nimport os\nimport tool.expand_tool as expand_tool\n\n# 创建停用词列表\ndef stopwordslist():\n stopwords = [line.strip() for line in open(\"tool\" + os.sep + 'baidu_stopwords.txt', encoding='UTF-8').readlines()]\n return stopwords\n\n\ndef cut_text(input_str):\n cut_list = jieba.cut(input_str, cut_all=False)\n #res_list = [c1 for c1 in cut_list if c1 not in stop_words]\n return cut_list\n\n\ndef do_judge_type(question, hint):\n hint = ' '.join(hint)\n hint = expand_tool.remove_symbol(hint)\n question = expand_tool.remove_symbol(question)\n if len(hint) > len(question):\n if question in hint:\n return ['A']\n else:\n if hint in question:\n return ['A']\n return ['B']\n\ndef do_muti_selection(selections,question_content,hint):\n hint = ' '.join(hint)\n sel_list = []\n letters = ['A','B','C','D','E','F']\n count = find_continue_blank_count(question_content)\n if len(selections) == count:\n for i in range(count):\n sel_list.append(letters[i])\n return sel_list\n for i in range(len(selections)):\n if expand_tool.remove_symbol(selections[i]) in hint:\n sel_list.append(letters[i])\n if len(sel_list) < count:#判断出的选项不够,随机补一个\n n1 = count - len(sel_list)\n t1 = 0\n for let in letters:\n if let not in sel_list and t1 < n1:\n sel_list.append(let)\n t1 += 1\n\n return sel_list\n\ndef do_single_selection(selections, question_content, hint_text):\n hint_text = ''.join(hint_text)\n hint_text = expand_tool.remove_symbol(hint_text)\n ratio = [0]*len(selections)\n res_list = []\n letters = ['A', 'B', 'C', 'D', 'E', 'F']\n\n #判断题\n if expand_tool.remove_symbol(selections[0]) == \"正确\" and expand_tool.remove_symbol(selections[1]) == \"错误\":\n res_list = do_judge_type(question_content,hint_text)\n return res_list\n\n #反选题\n in_count = 0\n out_index = 0\n for i in range(len(selections)):\n if selections[i] in hint_text:\n ratio[i] = 1\n in_count += 1\n else:\n out_index = i\n if in_count == len(selections)-1:#单选题有三个选项在提示里面,说明是反选,选择不在提示中的那个\n res_list = [letters[i]]\n return res_list\n\n\n\n for m in range(len(selections)):\n if len(selections[m]) > len(hint_text):\n short_str = hint_text\n long_str = selections[m]\n else:\n short_str = selections[m]\n long_str = hint_text\n short_str = expand_tool.remove_symbol(short_str)\n long_str = expand_tool.remove_symbol(long_str)\n match_length = 0\n for i in range(len(short_str)):\n if short_str[i] in long_str:\n match_length += 1\n ratio[m] = match_length\n max_value = 0\n max_index = 0\n for i in range(len(ratio)):\n if ratio[i] > max_value:\n max_value = ratio[i]\n max_index = i\n res_list.append(letters[max_index])\n return res_list\n\n\ndef find_continue_blank_count(msg):\n pattern = r'()'\n res = re.findall(pattern,msg)\n return len(res)\n\ndef test():\n str = \"今天烟台很多地方下了雪,包括 、 、和 。haiyou \"\n find_continue_blank_count(str)\n\n#stop_words = stopwordslist()\n\nif __name__ == \"__main__\":\n test()\n","sub_path":"SourcePackages/tool/SemanticAnalyze.py","file_name":"SemanticAnalyze.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"405920914","text":"import numpy as np\nimport scipy as sp\n\n#frequentist\ndef proportion_test(c1, c2, n1, n2, mode = 'two_sided'):\n p = (c1+c2) / (n1+n2)\n p1 = c1 / n1\n p2 = c2 / n2\n z = (p1-p2) / np.sqrt(p*(1-p)*(1/n1 + 1/n2))\n if mode=='two_sided':\n p = 2*(1-sp.stats.norm.cdf(abs(z)))\n elif mode=='one_sided':\n p = 1-sp.stats.norm.cdf(abs(z))\n else:\n raise ValueError('Available modes are `one_sided` and `two_sided`')\n return z, p\n\ndef proportion_ci(c,n, p_value=0.05):\n p = c/n\n se = np.sqrt(p*(1-p)/n)\n z = sp.stats.norm.ppf(1-p_value/2)\n return p-z*se, p, p+z*se\n\n#bayesian\ndef sample_proportion(c,n,a=1,b=1,sim_size=100000): \n return np.random.beta(c+a,n-c+b,sim_size)\n\ndef proportion_test_b(c1,c2,n1,n2,a1=1,a2=1,b1=1,b2=1,sim_size=100000):\n p1 = sample_proportion(c1,n1,a1,b1,sim_size)\n p2 = sample_proportion(c2,n2,a2,b2,sim_size)\n return (p1 > p2).mean()\n\ndef proportion_ratio(c1,c2,n1,n2,a1=1,a2=1,b1=1,b2=1,sim_size=100000):\n p1 = sample_proportion(c1,n1,a1,b1,sim_size)\n p2 = sample_proportion(c2,n2,a2,b2,sim_size)\n return p1/p2\n\ndef proportion_ci_b(c1,c2,n1,n2,p_value=0.05,a1=1,a2=1,b1=1,b2=1,sim_size=100000):\n ratios = proportion_ratio(c1,c2,n1,n2,a1,a2,b1,b2,sim_size)\n return np.quantile(ratios,[p_value/2,1-p_value/2])\n\ndef value_remaining(c1,c2,n1,n2,q=95,sim_size=100000,a1=1,a2=1,b1=9,b2=9):\n p1 = sample_proportion(c1,n1,a1,b1,sim_size)[:,None]\n p2 = sample_proportion(c2,n2,a2,b2,sim_size)[:,None]\n p = np.concatenate([p1,p2],1)\n p_max = p.max(1)\n best_idx = np.argmax([p1.mean(),p2.mean()])\n p_best = p[:,best_idx]\n vs = (p_max-p_best)/p_best\n return np.percentile(vs,q)","sub_path":"stat_tests.py","file_name":"stat_tests.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"407145718","text":"import cherrypy\nimport simplejsonrpc\n\nimport methods\n\nsimplejsonrpc.registry.register_method('multiply', methods.multiply)\nsimplejsonrpc.registry.register_method('shlex', methods.shlex)\n\n# from http://tools.cherrypy.org/wiki/DirectToDiskFileUpload\ndef noBodyProcess():\n \"\"\"Sets cherrypy.request.process_request_body = False, giving\n us direct control of the file upload destination. By default\n cherrypy loads it to memory, we are directing it to disk.\"\"\"\n cherrypy.request.process_request_body = False\n\ncherrypy.tools.noBodyProcess = cherrypy.Tool('before_request_body', noBodyProcess)\n\nclass JsonRpcService:\n \"\"\" Request handler for JSON-RPC requests. \"\"\"\n\n exposed = True\n\n def __init__(self):\n self.handler = simplejsonrpc.handler.RequestHandler()\n\n def handle_response(self, status, response_headers):\n cherrpy.response.status = status\n cherrpy.response.headers.update(response_headers)\n\n @cherrypy.expose\n @cherrypy.tools.noBodyProcess()\n def index(self, **whatever):\n return self.handler.handle_request(\n cherrypy.request,\n cherrypy.request.body.fp.read(),\n self.handle_response)\n\nif __name__ == '__main__':\n cherrypy.quickstart(JsonRpcService())\n","sub_path":"examples/cherrpy.py","file_name":"cherrpy.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"254589238","text":"# SilNet is referenced from U-Net segmentation.\n# It used three 2-strided convolutions and three deconvolutions.\nimport cv2\nimport os\nfrom keras.optimizers import Adam\nfrom keras.layers import *\nfrom keras.models import Model\n\nfrom GeoConGAN.SilNet.unet.data import *\n\nfrom PIL import Image\n\n\nresult_path = \"./unet/data/result\"\n\n\nclass DataLoader:\n def __init__(self, batch_size, paths):\n self.batch_size = batch_size\n self.paths = paths\n self.n_batch = 0\n self.load_path()\n\n def load_path(self):\n self.path_trainA = os.listdir(self.paths[0])\n self.path_trainB = os.listdir(self.paths[1])\n\n self.path_testA = os.listdir(self.paths[2])\n self.path_testB = os.listdir(self.paths[3])\n\n print(self.path_trainA)\n print(self.path_trainB)\n print(self.path_testA)\n print(self.path_testB)\n\n def data_load(self, is_train=True):\n\n if is_train:\n path_a = self.path_trainA\n path_b = self.path_trainB\n root_a = self.paths[0]\n root_b = self.paths[1]\n\n else:\n path_a = self.path_testA\n path_b = self.path_testB\n root_a = self.paths[2]\n root_b = self.paths[3]\n\n\n self.n_batch = int(min(len(path_a)//self.batch_size, len(path_b)//self.batch_size))\n\n #idx = np.random.choice(self.n_batch*self.batch_size, self.n_batch*self.batch_size, replace=False)\n\n for n in range(self.n_batch):\n imgs_A = []\n imgs_B = []\n for i in range(self.batch_size):\n img_A = imread(os.path.join(root_a, path_a[n*self.batch_size + i]))\n img_B = imread(os.path.join(root_b, path_b[n*self.batch_size + i]))\n\n imgs_A.append(img_A)\n imgs_B.append(img_B)\n\n imgs_A = np.array(imgs_A)/127.5 - 1\n imgs_B = np.array(imgs_B)/127.5 - 1\n\n yield imgs_A, imgs_B\n\n\ndef imread(path):\n image = np.array(Image.open(path))\n if image.shape[0] == 512:\n image.resize((256,256,1))\n\n image = image.reshape((256,256,1))\n return image\n\n\nclass SilNet:\n\n def __init__(self, shape, train_generator, data_loader, batch_size):\n self.shape = shape\n self.model = self.make_model2()\n self.compile_model()\n self.train_generator = train_generator\n self.test_generator = data_loader\n self.batch_size = batch_size\n\n def make_model(self):\n def normalization():\n return BatchNormalization()\n def conv(input_layer, filter):\n c = Conv2D(filters=filter, kernel_size=3, strides=1, padding='same')(input_layer)\n a = ReLU()(c)\n n = normalization()(a)\n c = Conv2D(filters=filter, kernel_size=3, strides=2, padding='same')(n)\n return c\n def resnet(input_layer, filter):\n n = normalization()(input_layer)\n a = ReLU()(n)\n c = Conv2D(filters=filter, kernel_size=5, strides=1, padding='same')(a)\n n = normalization()(c)\n a = ReLU()(n)\n c = Conv2D(filters=filter, kernel_size=5, strides=1, padding='same')(a)\n return Add()([input_layer, c])\n\n def deconv2d(input_layer, filter, concat):\n upsample = UpSampling2D(size=2)(input_layer)\n merge = concatenate([upsample,concat],axis=3)\n conv2d_layer = Conv2D(filters=filter, kernel_size=3, strides=1, padding='same')(merge)\n n = normalization()(conv2d_layer)\n return n\n filter_size = 64\n input_layer = Input(self.shape)\n\n\n conv_layer_1 = Conv2D(filters=filter_size, kernel_size=3, strides=1, padding='same')(input_layer)\n a = ReLU()(conv_layer_1)\n n = normalization()(a)\n conv_layer_2 = Conv2D(filters=filter_size, kernel_size=3, strides=2, padding='same')(n)\n a = ReLU()(conv_layer_2)\n n = normalization()(a)\n\n conv_layer_3 = Conv2D(filters=filter_size*2, kernel_size=3, strides=1, padding='same')(n)\n a = ReLU()(conv_layer_3)\n n = normalization()(a)\n conv_layer_4 = Conv2D(filters=filter_size*2, kernel_size=3, strides=2, padding='same')(n)\n\n res_net = resnet(conv_layer_4, filter_size*2)\n for i in range(0, 5):\n res_net = resnet(res_net, filter_size*2)\n\n deconv_1 = deconv2d(res_net, filter_size*2, conv_layer_3)\n deconv_2 = deconv2d(deconv_1, filter_size, conv_layer_1)\n\n output_layer = Conv2D(filters=1, kernel_size=1, strides=1, padding='same', activation='tanh')(deconv_2)\n\n return Model(inputs=input_layer, outputs=output_layer)\n\n def compile_model(self):\n self.model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])\n\n def train_on_batch(self, epoch):\n for epoch_idx in range(0, epoch):\n## for idx, (input_image, label) in enumerate(self.data_loader.data_load()):\n## loss = self.model.train_on_batch(input_image, label)\n loss = self.model.fit_generator(self.train_generator, steps_per_epoch=300, epochs=5)\n\n print(loss)\n self.test_save(epoch_idx)\n\n def make_model2(self):\n def normalize_layer():\n return BatchNormalization()\n\n filters = 64\n input_layer = Input(self.shape)\n c1 = Conv2D(filters=filters, kernel_size=3, padding='same',strides=1, activation='relu')(input_layer)\n n = normalize_layer()(c1)\n c1 = Conv2D(filters=filters, kernel_size=3, padding='same',strides=1, activation='relu')(n)\n n = normalize_layer()(c1)\n c1_d = Conv2D(filters=filters, kernel_size=3, padding='same',strides=2, activation='relu')(n)\n n = normalize_layer()(c1_d)\n# max_pool = MaxPooling2D()(n)\n\n c2 = Conv2D(filters=filters*2, kernel_size=3, padding='same',strides=1, activation='relu')(n)\n n = normalize_layer()(c2)\n c2 = Conv2D(filters=filters*2, kernel_size=3, padding='same',strides=1, activation='relu')(n)\n n = normalize_layer()(c2)\n c2_d = Conv2D(filters=filters*2, kernel_size=3, padding='same',strides=2, activation='relu')(n)\n n = normalize_layer()(c2_d)\n# max_pool = MaxPooling2D()(n)\n\n c3 = Conv2D(filters=filters*4, kernel_size=3, padding='same', strides=1, activation='relu')(n)\n n = normalize_layer()(c3)\n c3 = Conv2D(filters=filters*4, kernel_size=3, padding='same', strides=1, activation='relu')(n)\n\n up_sample = concatenate([Conv2DTranspose(filters=filters*2, kernel_size=2,strides=2,padding='same')(c3), c2],axis=3)\n\n c4 = Conv2D(filters=filters*2, kernel_size=3, padding='same', strides=1, activation='relu')(up_sample)\n n = normalize_layer()(c4)\n c4 = Conv2D(filters=filters*2, kernel_size=3, padding='same', strides=1, activation='relu')(n)\n\n up_sample = concatenate([Conv2DTranspose(filters=filters, kernel_size=2,strides=2,padding='same')(c4), c1],axis=3)\n\n\n c5 = Conv2D(filters=filters, kernel_size=3, padding='same', strides=1, activation='relu')(up_sample)\n n = normalize_layer()(c5)\n c5 = Conv2D(filters=filters, kernel_size=3, padding='same', strides=1, activation='relu')(n)\n\n output = Conv2D(filters=1, kernel_size=1, padding='same',strides=1, activation='tanh')(c5)\n\n return Model(inputs=input_layer, outputs=output)\n\n def test_save(self, epoch_idx):\n os.makedirs(result_path+\"/{0}\".format(epoch_idx), exist_ok=True)\n\n for i, (image, label) in enumerate(self.test_generator.data_load(False)):\n\n results = self.model.predict(image)\n\n results = (results + 1) * 127.5\n\n for b in range(self.batch_size):\n result = np.asarray(results[b], dtype=np.uint8)\n save_path = result_path+\"/{0}/result_{1}.png\".format(epoch_idx, i*self.batch_size+b)\n\n result = cv2.cvtColor(result,cv2.COLOR_GRAY2RGB)\n result = cv2.cvtColor(result,cv2.COLOR_RGB2GRAY)\n cv2.imwrite(save_path, result)\n '''\n result_image = Image.fromarray(result)\n result_image.save(result_path+\"/{0}/result_{1}.png\".format(epoch_idx, i*self.batch_size+b))\n '''\n\nif __name__ == \"__main__\":\n paths = [\n \"./unet/data/train/input\",\n \"./unet/data/train/label\",\n \"./unet/data/test/input\",\n \"./unet/data/test/label\"\n ]\n\n data_gen_args = dict(rotation_range=0.2,\n width_shift_range=0.05,\n height_shift_range=0.05,\n shear_range=0.05,\n zoom_range=0.05,\n horizontal_flip=True,\n fill_mode='nearest',\n validation_split=0.2)\n data_loader = DataLoader(batch_size=4, paths=paths)\n\n myGene = trainGenerator(4,'./unet/data/train','input','label',data_gen_args,save_to_dir = None)\n silnet = SilNet((256,256,1), myGene, data_loader, 4)\n silnet.model.summary()\n silnet.train_on_batch(500)\n Conv2DTranspose()\n","sub_path":"GeoConGAN/SilNet/silnet.py","file_name":"silnet.py","file_ext":"py","file_size_in_byte":9124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"433479613","text":"# vimspector - A multi-language debugging system for Vim\n# Copyright 2018 Ben Jackson\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport json\n\nfrom collections import namedtuple\n\nfrom vimspector import utils\n\nPendingRequest = namedtuple( 'PendingRequest',\n [ 'msg', 'handler', 'failure_handler' ] )\n\n\nclass DebugAdapterConnection( object ):\n def __init__( self, handler, send_func ):\n self._logger = logging.getLogger( __name__ )\n utils.SetUpLogging( self._logger )\n\n self._Write = send_func\n self._SetState( 'READ_HEADER' )\n self._buffer = bytes()\n self._handler = handler\n self._next_message_id = 0\n self._outstanding_requests = {}\n\n def DoRequest( self, handler, msg, failure_handler=None ):\n this_id = self._next_message_id\n self._next_message_id += 1\n\n msg[ 'seq' ] = this_id\n msg[ 'type' ] = 'request'\n\n self._outstanding_requests[ this_id ] = PendingRequest( msg,\n handler,\n failure_handler )\n self._SendMessage( msg )\n\n def Reset( self ):\n self._Write = None\n self._handler = None\n\n def OnData( self, data ):\n data = bytes( data, 'utf-8' )\n # self._logger.debug( 'Received ({0}/{1}): {2},'.format( type( data ),\n # len( data ),\n # data ) )\n\n self._buffer += data\n\n while True:\n if self._state == 'READ_HEADER':\n data = self._ReadHeaders()\n\n if self._state == 'READ_BODY':\n self._ReadBody()\n else:\n break\n\n if self._state != 'READ_HEADER':\n # We ran out of data whilst reading the body. Await more data.\n break\n\n def _SetState( self, state ):\n self._state = state\n if state == 'READ_HEADER':\n self._headers = {}\n\n def _SendMessage( self, msg ):\n msg = json.dumps( msg )\n self._logger.debug( 'Sending Message: {0}'.format( msg ) )\n\n data = 'Content-Length: {0}\\r\\n\\r\\n{1}'.format( len( msg ), msg )\n # self._logger.debug( 'Sending: {0}'.format( data ) )\n self._Write( data )\n\n def _ReadHeaders( self ):\n parts = self._buffer.split( bytes( '\\r\\n\\r\\n', 'utf-8' ), 1 )\n\n if len( parts ) > 1:\n headers = parts[ 0 ]\n for header_line in headers.split( bytes( '\\r\\n', 'utf-8' ) ):\n if header_line.strip():\n key, value = str( header_line, 'utf-8' ).split( ':', 1 )\n self._headers[ key ] = value\n\n # Chomp (+4 for the 2 newlines which were the separator)\n # self._buffer = self._buffer[ len( headers[ 0 ] ) + 4 : ]\n self._buffer = parts[ 1 ]\n self._SetState( 'READ_BODY' )\n return\n\n # otherwise waiting for more data\n\n def _ReadBody( self ):\n try:\n content_length = int( self._headers[ 'Content-Length' ] )\n except KeyError:\n # Ug oh. We seem to have all the headers, but no Content-Length\n # Skip to reading headers. Because, what else can we do.\n self._logger.error( 'Missing Content-Length header in: {0}'.format(\n json.dumps( self._headers ) ) )\n self._buffer = bytes( '', 'utf-8' )\n self._SetState( 'READ_HEADER' )\n return\n\n if len( self._buffer ) < content_length:\n # Need more data\n assert self._state == 'READ_BODY'\n return\n\n payload = str( self._buffer[ : content_length ], 'utf-8' )\n self._buffer = self._buffer[ content_length : ]\n\n message = json.loads( payload )\n\n self._logger.debug( 'Message received: {0}'.format( message ) )\n\n try:\n self._OnMessageReceived( message )\n finally:\n # Don't allow exceptions to break message reading\n self._SetState( 'READ_HEADER' )\n\n def _OnMessageReceived( self, message ):\n if not self._handler:\n return\n\n if message[ 'type' ] == 'response':\n request = self._outstanding_requests.pop( message[ 'request_seq' ] )\n\n if message[ 'success' ]:\n if request.handler:\n request.handler( message )\n else:\n reason = message.get( 'message' )\n if not message:\n fmt = message.get( 'body', {} ).get( 'error', {} ).get( 'format' )\n if fmt:\n # TODO: Actually make this work\n reason = fmt\n else:\n message = 'No reason'\n\n self._logger.error( 'Request failed: {0}'.format( reason ) )\n if request.failure_handler:\n request.failure_handler( reason, message )\n else:\n utils.UserMessage( 'Request failed: {0}'.format( reason ) )\n elif message[ 'type' ] == 'event':\n method = 'OnEvent_' + message[ 'event' ]\n if method in dir( self._handler ):\n getattr( self._handler, method )( message )\n else:\n utils.UserMessage( 'Unhandled event: {0}'.format( message[ 'event' ] ),\n persist = True )\n","sub_path":"python3/vimspector/debug_adapter_connection.py","file_name":"debug_adapter_connection.py","file_ext":"py","file_size_in_byte":5408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"602968818","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error\nfrom sklearn import linear_model\nimport sys\nfrom tqdm import tqdm\nfrom cycler import cycler\nimport string\nfrom itertools import cycle\n\n\ndef label_axes(fig, labels=None, loc=None, **kwargs):\n if labels is None:\n labels = string.ascii_lowercase\n labels = cycle(labels)\n if loc is None:\n loc = (-0.1, 1.1)\n axes = [ax for ax in fig.axes if ax.get_label() != '']\n for ax, lab in zip(axes, labels):\n ax.annotate('(' + lab + ')', size=16, xy=loc,\n xycoords='axes fraction',\n **kwargs)\n\n\nplt.style.use(['science', 'grid'])\nprop_cycle = plt.rcParams['axes.prop_cycle']\ncolors = prop_cycle.by_key()['color']\ncolors = [colors[3], colors[1], colors[0]]\ncolors_nipy1 = mpl.cm.nipy_spectral(np.linspace(0.1, 0.9, 6))\ncolors_nipy2 = mpl.cm.nipy_spectral(np.linspace(0.6, 0.9, 7))\ncolors_nipy = list(colors_nipy1[0:3]) + list(colors_nipy2[3:-2]) + list(colors_nipy1[-1:])\nplt.rcParams['axes.prop_cycle'] = cycler(color=colors)\n\ndf_tddft = pd.read_csv('TDDFT_MOPSSAM_test_data.csv')\ndf_mopssam = pd.read_csv('xtb_tddft_calib_data.csv')\n\nfig = plt.figure(num=2, figsize=[7, 4], dpi=300, clear=True)\nax = fig.add_subplot(1, 1, 1)\nplt.plot(df_tddft['S1'], df_mopssam['aug-cc-TDDFT'], '.', color=colors_nipy[1])\nx = np.linspace(0, 10, 100)\nplt.plot(x, x, 'k--')\nplt.xlim(2, 7)\nplt.ylim(2, 7)\nax.set_axisbelow(True)\nplt.grid(True)\nax.set_aspect('equal', adjustable='box')\n# plt.legend(markerscale=6, fontsize=14)\nplt.xlabel('Independent TD-DFT S$_1$ (eV)', fontsize=16)\nplt.ylabel('MOPSSAM S$_1$ (eV)', fontsize=16)\nplt.savefig('mopssam_S1_comp.png')\n","sub_path":"scripts/comp_TDDFT_settings.py","file_name":"comp_TDDFT_settings.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"14519238","text":"import tkinter as tk\r\nimport cv2\r\nimport random\r\n\r\ndef webcam():\r\n video = cv2.VideoCapture(0)\r\n classificador = cv2.CascadeClassifier('cascades/haarcascade_frontalface_alt.xml')\r\n\r\n while True:\r\n conectado, frame = video.read()\r\n # print(conectado)\r\n # print(frame)\r\n\r\n frameCinza = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n facesDetectadas = classificador.detectMultiScale(frameCinza, minSize=(70, 70))\r\n for (x, y, l, a) in facesDetectadas:\r\n cv2.rectangle(frame, (x, y), (x + l, y + a), (0, 0, 255), 2)\r\n\r\n cv2.imshow('Video', frame)\r\n\r\n if cv2.waitKey(1) == ord('q'):\r\n break\r\n\r\n video.release()\r\n cv2.destroyAllWindows()\r\n\r\ndef face():\r\n classificador = cv2.CascadeClassifier('cascades/haarcascade_frontalface_alt.xml')\r\n #classificador = cv2.CascadeClassifier('cascades/haarcascade_frontalcatface.xml')\r\n #classificador = cv2.CascadeClassifier('cascades/cars.xml')\r\n #classificador = cv2.CascadeClassifier('cascades/relogios.xml')\r\n\r\n fotos = ['pessoas/j.jpg', 'pessoas/k.jpg','pessoas/fed.jpg','pessoas/h.jpg','pessoas/abc.jpg', 'pessoas/def.jpg', 'pessoas/cba.jpg', 'pessoas/beatles.jpg', 'pessoas/faceolho.jpg', 'pessoas/pessoas1.jpg', 'pessoas/pessoas2.jpg', 'pessoas/pessoas3.jpg', 'pessoas/pessoas4.jpg']\r\n #fotos = ['pessoas/carro1.jpg', 'pessoas/carro2.jpg', 'pessoas/carro3.jpg']\r\n #fotos = ['pessoas/gato1.jpg', 'pessoas/gato2.jpg', 'pessoas/gato3.jpg']\r\n\r\n imagem = cv2.imread(random.choice(fotos))\r\n imagemCinza = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY)\r\n\r\n facesDetectadas = classificador.detectMultiScale(imagemCinza, scaleFactor=1.1, minNeighbors=9, minSize=(30, 30))\r\n\r\n for (x, y, l, a) in facesDetectadas:\r\n imagem = cv2.rectangle(imagem, (x, y), (x + l, y + a), (0, 0, 255), 2)\r\n\r\n cv2.imshow(\"Faces encontradas\", imagem)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\njanela = tk.Tk()\r\njanela.title('Captura de Faces')\r\njanela['bg'] = 'gray'\r\nbt1 = tk.Button(janela, width=20, text='Webcam', command=webcam)\r\nbt1.place(x=80, y=150)\r\nbt2 = tk.Button(janela, width=20, text='Imagens', command=face)\r\nbt2.place(x=80, y=180)\r\nbt3 = tk.Button(janela, width=20, text = 'Sair', command=janela.destroy)\r\nbt3.place(x=80, y=210)\r\njanela.geometry('300x300+200+200')\r\n\r\nimg = tk.PhotoImage(file='pessoas/tst.png')\r\nimg1 = tk.Label(janela, imag = img)\r\nimg1.place(x=100, y=30)\r\n#lb = tk.Label(janela, text='oi')\r\n#lb.place(x=100, y=100)\r\n\r\njanela.mainloop()\r\n\r\n","sub_path":"computerVision/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"227319525","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2013 - Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom mistral.db.v2.sqlalchemy import models\nfrom mistral.openstack.common import log as logging\nfrom mistral.tests import base\nfrom mistral.workbook import parser as spec_parser\nfrom mistral.workflow import direct_workflow as d_wf\nfrom mistral.workflow import states\nfrom mistral.workflow import utils as wf_utils\n\nLOG = logging.getLogger(__name__)\n\nWORKBOOK = \"\"\"\n---\nversion: '2.0'\n\nname: my_wb\n\nworkflows:\n wf1:\n type: direct\n\n tasks:\n task1:\n action: std.echo output=\"Hey\"\n publish:\n res1: <% $.task1 %>\n on-complete:\n - task2: <% $.res1 = 'Hey' %>\n - task3: <% $.res1 = 'Not Hey' %>\n\n task2:\n action: std.echo output=\"Hi\"\n\n task3:\n action: std.echo output=\"Hoy\"\n\"\"\"\n\n\nclass DirectWorkflowHandlerTest(base.BaseTest):\n def setUp(self):\n super(DirectWorkflowHandlerTest, self).setUp()\n\n wb_spec = spec_parser.get_workbook_spec_from_yaml(WORKBOOK)\n\n wf_ex = models.WorkflowExecution()\n wf_ex.update({\n 'id': '1-2-3-4',\n 'spec': wb_spec.get_workflows().get('wf1').to_dict(),\n 'state': states.IDLE\n })\n\n self.wf_ex = wf_ex\n self.wb_spec = wb_spec\n self.handler = d_wf.DirectWorkflowHandler(wf_ex)\n\n def _create_db_task(self, id, name, state):\n tasks_spec = self.wb_spec.get_workflows()['wf1'].get_tasks()\n\n task_ex = models.TaskExecution()\n task_ex.update({\n 'id': id,\n 'name': name,\n 'spec': tasks_spec[name].to_dict(),\n 'state': state\n })\n\n self.wf_ex.task_executions.append(task_ex)\n\n return task_ex\n\n def test_start_workflow(self):\n commands = self.handler.start_workflow()\n\n self.assertEqual(1, len(commands))\n self.assertEqual('task1', commands[0].task_spec.get_name())\n self.assertEqual(states.RUNNING, self.wf_ex.state)\n\n def test_on_task_result(self):\n self.wf_ex.update({'state': states.RUNNING})\n\n task1_db = self._create_db_task('1-1-1-1', 'task1', states.RUNNING)\n\n # Emulate finishing 'task1'.\n commands = self.handler.on_task_result(\n task1_db,\n wf_utils.TaskResult(data='Hey')\n )\n\n self.assertEqual(1, len(commands))\n self.assertEqual('task2', commands[0].task_spec.get_name())\n\n self.assertEqual(states.RUNNING, self.wf_ex.state)\n self.assertEqual(states.SUCCESS, task1_db.state)\n\n # Emulate finishing 'task2'.\n task2_db = self._create_db_task('1-1-1-2', 'task2', states.RUNNING)\n\n commands = self.handler.on_task_result(\n task2_db,\n wf_utils.TaskResult(data='Hi')\n )\n\n self.assertEqual(0, len(commands))\n\n self.assertEqual(states.SUCCESS, self.wf_ex.state)\n self.assertEqual(states.SUCCESS, task1_db.state)\n self.assertEqual(states.SUCCESS, task2_db.state)\n\n def test_stop_workflow(self):\n # TODO(rakhmerov): Implement.\n pass\n\n def test_resume_workflow(self):\n # TODO(rakhmerov): Implement.\n pass\n","sub_path":"mistral/tests/unit/workflow/test_direct_workflow.py","file_name":"test_direct_workflow.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"396522720","text":"def merge_excel(input_filename,sheetname_list,output_filename='merge_log.csv',depth_name='DEPTH(m)'):\r\n '''\r\n \r\n '''\r\n import pandas as pd\r\n \r\n rawdata = dict()\r\n # read all excel files\r\n for sheetname in sheetname_list:\r\n rawdata[sheetname] = pd.read_excel(input_filename,sheetname=sheetname)\r\n \r\n # get the min_depth and max_depth \r\n min_depth = max([rawdata[elem][depth_name][0] for elem in rawdata])\r\n max_depth = min([rawdata[elem][depth_name][len(rawdata[elem])-1] for elem in rawdata])\r\n print('mindepth ',min_depth,'max depth ',max_depth)\r\n \r\n # data cleaning, select data between min and max depth\r\n for elem in rawdata:\r\n #print(elem,' raw length',len(rawdata[elem]))\r\n rawdata[elem] =rawdata[elem][(rawdata[elem][depth_name]<= max_depth) & (rawdata[elem][depth_name] >= min_depth) ] #\r\n rawdata[elem]=rawdata[elem].reset_index() #del rawdata[elem]['index']\r\n del rawdata[elem]['index']\r\n #print(elem,' processed length',len(rawdata[elem]))\r\n \r\n # merge to single dataframe \r\n df_logs_merge = pd.DataFrame()\r\n for i,elem in enumerate(rawdata):\r\n columns = list(rawdata[elem].columns.values) \r\n print(columns)\r\n if i>0: # only select depth once\r\n columns.remove(depth_name)\r\n df_logs_merge[columns] = rawdata[elem][columns]\r\n # save to csv file\r\n df_logs_merge.to_csv(output_filename,index=False)\r\n print('merge excel finished')","sub_path":"subfunctions/NMR_ML.py","file_name":"NMR_ML.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"245590617","text":"# When running locally install requirements using\n# pip3 install -t lib -r requirements.txt\n# then run with\n# FLASK_APP=main.py FLASK_DEBUG=1 python -m flask run\n\nimport base64, re\nimport cloud_utils\nimport numpy as np\nfrom flask import Flask, jsonify, request, render_template\nfrom io import BytesIO\nfrom PIL import Image\nfrom googleapiclient import discovery\n\n\napp = Flask(__name__)\n\nservice = discovery.build('ml', 'v1')\n\n@app.route('/')\ndef index():\n return render_template('homepage.html')\n\n\"\"\"\nGeneral Landing Page for projects.\n\"\"\"\n@app.route('/projects')\ndef projects():\n return render_template('projects.html')\n\n\"\"\"\nLA county health scores project.\n\"\"\"\n@app.route('/health_scores')\ndef health_scores():\n return render_template('health_scores.html')\n\n\"\"\"\nDraw a digit and predict with MNIST layers project.\nIf RPC to ML-Engine spin up time is too long, host model locally.\n\"\"\"\n@app.route('/digits', methods=['GET', 'POST'])\ndef digits():\n if request.method == 'POST':\n dataURI = request.data.decode('UTF-8')\n image_data = re.sub('^data:image/png;base64,', '', dataURI)\n drawn_input = Image.open(BytesIO(base64.b64decode(image_data)))\n\n bounding_box = drawn_input.getbbox()\n if bounding_box is None:\n return jsonify(error=\"Draw something first\")\n\n # Expand bounding box, in order to center the image a bit more.\n expansion_coef = (-20, -20, 20, 20)\n enlarged_boundary = [sum(x) for x in zip(bounding_box, expansion_coef)]\n drawn_input = drawn_input.crop(enlarged_boundary)\n drawn_input = drawn_input.resize((28, 28), Image.ANTIALIAS)\n\n # For visualizing the crop and rescale.\n buffered = BytesIO()\n drawn_input.save(buffered, format=\"PNG\")\n data64 = base64.b64encode(buffered.getvalue())\n img_str = u'data:img/png;base64,'+ data64.decode('utf-8')\n\n pixels = list(drawn_input.getdata())\n # For the PNG the color is in the alpha channel. Normalize to [0,1].\n b_w = list(map(lambda rgba: rgba[3]/255.0, pixels))\n data = np.array(b_w)\n data.shape = (28, 28)\n data = data.tolist()\n # Prepare to send to ML Instance.\n req = {\"instances\": [{\"x\": data}]}\n try:\n prediction = cloud_utils.mnist_prediction(service, req)\n top_3 = sorted(enumerate(prediction['predictions'][0]['probabilities']),\n key=lambda x: x[1],\n reverse=True)[0:3]\n return jsonify(prediction=top_3, img_uri=img_str)\n except:\n return jsonify(error=\"Error in the ML Instance, Please try again.\")\n return render_template('digits.html')\n","sub_path":"flask/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"497514715","text":"#most star python project on github \nimport requests\nimport pygal\nfrom pygal.style import LightColorizedStyle as lcs, LightenStyle as ls\n\nurl='https://api.github.com/search/repositories?q=language:Perl&sort=stars'\nr=requests.get(url)\n\nprint('Status code :',r.status_code)\n\nresponse_dict=r.json() \nprint('total_respons_sum :',response_dict['total_count'])\n\nrepo_dicts=response_dict['items']\nprint('itesms sum :',len(repo_dicts))\n\n#repo_dict=repo_dicts[0]\n#print('\\nKeys_sum :',len(repo_dict))\n#print('name: ',repo_dict['name'])\n#print('selected information about each repository')\n#for repo_dict in repo_dicts:\n#\tprint('\\nname :',repo_dict['name'])\n#\tprint('owner :',repo_dict['owner']['login'])\n#\tprint('repository :',repo_dict['html_url'])\n#\tprint('description :',repo_dict['description'])\n\t\nnames,plot_dicts = [],[]\nfor repo_dict in repo_dicts:\n\tnames.append(repo_dict['name'])\n\tplot_dict={\n\t\t'value':repo_dict['stargazers_count'],\n\t\t'label':repo_dict['description'],\n\t\t'xlink':repo_dict['html_url'],\n\t}\n\tplot_dicts.append(plot_dict)\n\t\nmy_style=ls('#336699',base_style=lcs)\nmy_config=pygal.Config()\nmy_config.x_label_rotation=45\nmy_config.show_legend=False\nmy_config.title_font_size=30\nmy_config.label_font_size=20\nmy_config.major_label_font_size=30\nmy_config.truncate_label=15\nmy_config.show_y_guides=False\nmy_config.width=1260\n\nchart = pygal.Bar(my_config,style=my_style)\nchart.title='Most-Star Perl Projects on Github'\nchart.x_labels=names\n\nchart.add('',plot_dicts)\nchart.render_to_file('python_github_Perl.svg')\n","sub_path":"python_repos.py","file_name":"python_repos.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"424609121","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/6/15 23:31\n# @Author : flyhawk\n# @Email : flyhawksz@163.com\n# @File : singleton.py\n# @Software: PyCharm\n\n\nclass Singleton(object):\n def __new__(cls, *args, **kwargs):\n if not hasattr(cls, 'instance'):\n cls.instance = super(Singleton, cls).__new__(cls)\n return cls.instance\n\n\ndef main():\n m1 = Singleton()\n m2 = Singleton()\n print(m1)\n print(m2)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"1.Create Model/singleton.py","file_name":"singleton.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"349636688","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 2 13:11:09 2020\r\n\r\n@author: Kat\r\n\"\"\"\r\n#RUNNING MANY GAMES TO SEE THE BETTER COMPUTER METHOD\r\n\r\ndef main():\r\n import numpy as np\r\n import random\r\n \r\n #game board, 8 x 8\r\n def start_board(): #Create the board set-up with pieces in starting position\r\n board = []\r\n for i in range(8):\r\n board.append([' '] * 8) # 8 rows made of 8 empty strings. \r\n board[0][1] = 'O'\r\n board[0][3] = 'O'\r\n board[0][5] = 'O'\r\n board[0][7] = 'O'\r\n board[1][0] = 'O'\r\n board[1][2] = 'O'\r\n board[1][4] = 'O'\r\n board[1][6] = 'O'\r\n board[2][1] = 'O'\r\n board[2][3] = 'O'\r\n board[2][5] = 'O'\r\n board[2][7] = 'O'\r\n board[5][0] = 'X'\r\n board[5][2] = 'X'\r\n board[5][4] = 'X'\r\n board[5][6] = 'X'\r\n board[6][1] = 'X'\r\n board[6][3] = 'X'\r\n board[6][5] = 'X'\r\n board[6][7] = 'X'\r\n board[7][0] = 'X'\r\n board[7][2] = 'X'\r\n board[7][4] = 'X'\r\n board[7][6] = 'X'\r\n return board\r\n\r\n #this contains the \"rules\" for what kinds of moves are allowed. \r\n def valid_move_rules(board, marker, marker_king, xstart, ystart): \r\n #is a move to this space by this player legal? \r\n #should return true is this move can be made\r\n #define player's and opponent's pieces\r\n if marker == 'X': \r\n opponent = 'O'\r\n opponent_king = '0'\r\n player = 1\r\n elif marker == 'O':\r\n opponent = 'X'\r\n opponent_king = 'K'\r\n player = 2 \r\n piece = board[xstart][ystart] #assigns a marker to the piece\r\n #start move scenario to check if there are legal moves to make\r\n moves_to_make = [] \r\n mandatory_moves_to_make = []\r\n piece_to_capture = [] #if a move to make jumps over and captures a piece, the piece captured will be the move -1 in the direction it came grom and that space will be changed to empty\r\n if player == 1:\r\n if piece == marker: #piece is a pawn, can only move forward (up for X's)\r\n for xdirection, ydirection in [ [-1,-1], [-1,1]]: #the 2 ways this piece could moveS\r\n x = xstart\r\n y = ystart\r\n x = x + xdirection #check for diag moves\r\n y = y + ydirection\r\n if is_on_board(x, y) and board[x][y] == ' ':\r\n moves_to_make.append([x, y])\r\n elif is_on_board(x, y) and board[x][y] in (opponent, opponent_king): #move one more space in that direction to see if the piece can be jumped\r\n xjump = x + xdirection \r\n yjump = y + ydirection\r\n if is_on_board(xjump, yjump) and board[xjump][yjump] == ' ':\r\n moves_to_make.append([xjump, yjump])\r\n mandatory_moves_to_make.append([xjump, yjump])\r\n piece_to_capture.append([x, y]) #this stores the location of the capturable piece so it can be changed to ' ' later\r\n elif piece == marker_king: #piece is a king and can move in diag. forwards and back. \r\n for xdirection, ydirection in [ [-1,-1], [1,-1], [-1,1], [1,1]]: #the 4 ways this piece could moveS\r\n x = xstart\r\n y = ystart\r\n x = x + xdirection #check for diag moves\r\n y = y + ydirection\r\n if is_on_board(x, y) and board[x][y] == ' ':\r\n moves_to_make.append([x, y])\r\n elif is_on_board(x, y) and board[x][y] in (opponent, opponent_king):\r\n xjump = x + xdirection \r\n yjump = y + ydirection\r\n if is_on_board(xjump, yjump) and board[xjump][yjump] == ' ':\r\n moves_to_make.append([xjump, yjump])\r\n mandatory_moves_to_make.append([xjump, yjump])\r\n piece_to_capture.append([x, y])\r\n #player 2 can only move down the board so the x and y directions must be changed. \r\n elif player == 2:\r\n if piece == marker: \r\n for xdirection, ydirection in [ [1,-1], [1,1]]:\r\n x = xstart\r\n y = ystart\r\n x = x + xdirection \r\n y = y + ydirection\r\n if is_on_board(x, y) and board[x][y] == ' ':\r\n moves_to_make.append([x, y])\r\n elif is_on_board(x, y) and board[x][y] in (opponent, opponent_king):\r\n xjump = x + xdirection \r\n yjump = y + ydirection\r\n if is_on_board(xjump, yjump) and board[xjump][yjump] == ' ':\r\n moves_to_make.append([xjump, yjump])\r\n mandatory_moves_to_make.append([xjump, yjump])\r\n piece_to_capture.append([x, y]) \r\n elif piece == marker_king: #piece is a king, can move forwards and backwards \r\n for xdirection, ydirection in [ [-1,-1], [1,-1], [-1,1], [1,1]]: \r\n x = xstart\r\n y = ystart\r\n x = x + xdirection \r\n y = y + ydirection\r\n if is_on_board(x, y) and board[x][y] == ' ':\r\n moves_to_make.append([x, y])\r\n elif is_on_board(x, y) and board[x][y] in (opponent, opponent_king):\r\n xjump = x + xdirection \r\n yjump = y + ydirection\r\n if is_on_board(xjump, yjump) and board[xjump][yjump] == ' ':\r\n moves_to_make.append([xjump, yjump])\r\n mandatory_moves_to_make.append([xjump, yjump])\r\n piece_to_capture.append([x, y])\r\n if len(moves_to_make) == 0:\r\n return False #there were no valid moves\r\n return (piece_to_capture, moves_to_make, mandatory_moves_to_make)\r\n \r\n def is_move_valid(board, marker, marker_king, xstart, ystart): \r\n if valid_move_rules(board, marker, marker_king, xstart, ystart) != False:\r\n piece_to_capture, moves_to_make, mandatory_moves_to_make = valid_move_rules(board, marker, marker_king, xstart, ystart)\r\n if len(moves_to_make) == 0:\r\n return False\r\n else:\r\n return moves_to_make\r\n else:\r\n return False\r\n \r\n def get_possible_moves(board, marker, marker_king):\r\n #creates a list of ALL the possible moves this player can make \r\n #need to show only jump moves in the event of a jump opportunity\r\n possible_moves = []\r\n mandatory_moves = []\r\n for x in range(8):\r\n for y in range(8):\r\n if is_move_valid(board, marker, marker_king, x, y) != False: #this will look at every space on the board check if a move to this location would be a legal using the is move valid function. \r\n #if false, then there is no valid move to that space. \r\n piece_to_capture, moves_to_make, mandatory_moves_to_make = valid_move_rules(board, marker, marker_king, x,y)\r\n possible_moves = possible_moves + moves_to_make #this creates a list of all the possible moves. \r\n mandatory_moves = mandatory_moves + mandatory_moves_to_make\r\n if len(mandatory_moves) != 0:\r\n possible_moves = mandatory_moves\r\n return possible_moves\r\n \r\n def get_movable_pieces(board, marker, marker_king):\r\n #creates a list of all the possible moves this player can make \r\n #need to show only jump moves in the event of a jump opportunity\r\n movable_pieces = []\r\n mandatory_pieces = []\r\n for x in range(8):\r\n for y in range(8):\r\n if is_move_valid(board, marker, marker_king, x, y) != False: #this will look at every space on the board check if a move to this location would be a legal using the is move valid function. \r\n #if false, then there is no valid move to that space. \r\n movable_pieces.append([x,y]) #this creates a list of all the possible moves. \r\n piece_to_capture, moves_to_make, mandatory_moves_to_make = valid_move_rules(board, marker, marker_king, x,y)\r\n if len(mandatory_moves_to_make) != 0:\r\n mandatory_pieces.append([x,y])\r\n if len(mandatory_pieces) != 0:\r\n movable_pieces = mandatory_pieces\r\n for x in range(8):\r\n for y in range(8):\r\n for x, y in movable_pieces:\r\n if x > (x+1) or x < (x-1): #if the x val changes by more than 1, that means a piece could be jumped\r\n mandatory_pieces.append([x,y])\r\n if len(mandatory_pieces) != 0:\r\n movable_pieces = mandatory_pieces\r\n return movable_pieces\r\n \r\n def make_move(board, marker, marker_king, xstart, ystart, xnew, ynew): \r\n if board[xstart][ystart] == marker:\r\n board[xnew][ynew] = marker\r\n board[xstart][ystart] = ' '\r\n if board[xstart][ystart] == marker_king:\r\n board[xnew][ynew] = marker_king\r\n board[xstart][ystart] = ' '\r\n \r\n def capture_piece(board, marker, marker_king, xstart, ystart, xnew, ynew):\r\n #give location of piece that was captured\r\n if xnew == (xstart + 2):\r\n if ynew == (ystart + 2):\r\n board[(xstart+1)][(ystart+1)] = ' '\r\n if ynew == (ystart - 2):\r\n board[(xstart+1)][(ystart-1)] = ' '\r\n elif xnew == (xstart - 2):\r\n if ynew == (ystart + 2):\r\n board[(xstart-1)][(ystart+1)] = ' '\r\n if ynew == (ystart - 2):\r\n board[(xstart-1)][(ystart-1)] = ' '\r\n #this will be part of updating the board during gameplay\r\n \r\n def opposite_end(marker, xnew, ynew):\r\n #has the player's piece reached the opposite end of the board? true for p1 if in row 0 and for p2 if in row 7. This is part 1 of determining if the piece will become a king\r\n #note*limiting the piece to one that is not already a king prevents the computer from just moving one king piece back and forth the whole time\r\n if marker == 'X':\r\n if xnew == 0:\r\n return True\r\n else:\r\n return False\r\n if marker == 'O':\r\n if xnew == 7:\r\n return True\r\n else:\r\n return False\r\n \r\n def piece_becomes_king(board, marker, marker_king, xstart, ystart, xnew, ynew):\r\n #if the piece has reached the end of the board, it will become a king. If this is true, then in the gameplay, we will create an if loop where if this is true, then marker becomes marker_king. \r\n if board[xstart][ystart] == marker_king:\r\n return False\r\n if opposite_end(marker, xnew, ynew) == True:\r\n board[xnew][ynew] = marker_king\r\n return True #keeps pieces that are already kings from being marked as becoming a king \r\n \r\n def get_computer_move_1(board, marker, marker_king):\r\n pieces = get_movable_pieces(board, marker, marker_king) \r\n move_info = [] #this will contain the position of both the piece start and end location for a move. \r\n king_move_info = [] #groups moves that would give the player a king piece\r\n for x, y in pieces:\r\n xstart = x\r\n ystart = y\r\n piece = [xstart, ystart]\r\n if valid_move_rules(board, marker, marker_king, xstart, ystart) == False:\r\n break\r\n piece_to_capture, moves_to_make, mandatory_moves_to_make = valid_move_rules(board, marker, marker_king, xstart, ystart)\r\n if len(mandatory_moves_to_make) != 0:\r\n moves_to_make = mandatory_moves_to_make\r\n for x, y in moves_to_make:\r\n xnew = x\r\n ynew = y\r\n move = [xnew, ynew]\r\n move_info.append([piece, move]) #many pieces will have two moves that they can make. This way, if a piece can make 2 moves, there will be a [piece,move] for each\r\n if piece_becomes_king(board, marker, marker_king, xstart, ystart, xnew, ynew) == True:\r\n king_move_info.append([piece, move])\r\n if len(king_move_info) != 0:\r\n move_info = king_move_info #A move that will get the computer a king piece will always be chosen if available and legal to make.\r\n [xstart, ystart], [xnew, ynew] = random.choice(move_info) #this will pick a random move in the list of move_info. A move that will get the computer a king piece. It will also redefine and select the computer's x/y start and end ('new') values\r\n return xstart, ystart, xnew, ynew\r\n \r\n def get_computer_move_2(board, marker, marker_king):\r\n pieces = get_movable_pieces(board, marker, marker_king) \r\n move_info = [] \r\n end_space = []\r\n #move pieces toward the middle and help the computer control the center of the board\r\n for x, y in pieces:\r\n xstart = x\r\n ystart = y\r\n piece = [xstart, ystart]\r\n piece_to_capture, moves_to_make, mandatory_moves_to_make = valid_move_rules(board, marker, marker_king, xstart, ystart)\r\n if len(mandatory_moves_to_make) != 0:\r\n moves_to_make = mandatory_moves_to_make\r\n for x, y in moves_to_make:\r\n xnew = x\r\n ynew = y\r\n move = [xnew, ynew]\r\n move_info.append([piece, move])\r\n for x in range(len(move_info)):\r\n end_space.append(move_info[x][1])\r\n #find which move will put the player closest to the center \r\n center = [3,4] #approx center of board\r\n end = np.array(end_space)#make a copy of end_space\r\n end = np.abs(end - center) #get list of closeness to center\r\n end = np.min(end, axis=1)\r\n find = np.min(end, axis=0) #which one is closest\r\n val = end.tolist().index(find) #pull location of closest end space in list\r\n [xstart, ystart], [xnew, ynew] = move_info[val] #use location to get closet ending move\r\n return xstart, ystart, xnew, ynew\r\n \r\n def get_computer_move(comp_level, board, marker, marker_king):\r\n if comp_level == 1:\r\n xstart, ystart, xnew, ynew = get_computer_move_1(board, marker, marker_king)\r\n if comp_level == 2:\r\n xstart, ystart, xnew, ynew = get_computer_move_2(board, marker, marker_king)\r\n return xstart, ystart, xnew, ynew\r\n \r\n def get_winner(board):\r\n #how many pieces are left on the board...\r\n player1_pieces = 0\r\n player2_pieces = 0 \r\n for x in range (8):\r\n for y in range(8):\r\n if board[x][y] == 'X':\r\n player1_pieces = player1_pieces + 1\r\n if board[x][y] == 'K':\r\n player1_pieces = player1_pieces + 2\r\n #this is because in checkers, a 'king' piece is 2 stacked regular pieces. so you essentiall win back a piece\r\n if board[x][y] == 'O':\r\n player2_pieces = player2_pieces + 1\r\n if board[x][y] == '0':\r\n player2_pieces = player2_pieces + 2\r\n if player2_pieces > player1_pieces:\r\n winner = 'Player 2 wins!'\r\n if player2_pieces < player1_pieces:\r\n winner = 'Player 1 wins!'\r\n elif player2_pieces == player1_pieces:\r\n winner = \"It's a tie!\"\r\n return winner\r\n \r\n \r\n ###################################################################\r\n \r\n player1_type = 'comp'\r\n p1comp_level = 1\r\n player2_type = 'comp'\r\n p2comp_level = 2\r\n \r\n player1_marker = 'X'\r\n player2_marker = 'O'\r\n player1_marker_king = 'K'\r\n player2_marker_king = '0'\r\n\r\n turn = 1 \r\n game_board = start_board()\r\n game_over = False \r\n \r\n while not game_over:\r\n if (turn % 2) != 0: #first turn is 1, which is odd. \r\n possible_moves = get_possible_moves(game_board, player1_marker, player1_marker_king)\r\n if len(possible_moves) == 0:\r\n game_over = True\r\n break \r\n if player1_type == 'human': #get the human's move\r\n pass\r\n if player1_type == 'comp':\r\n xstart, ystart, xnew, ynew = get_computer_move(p1comp_level, game_board, player1_marker, player1_marker_king)\r\n make_move(game_board, player1_marker, player1_marker_king, xstart, ystart, xnew, ynew)\r\n capture_piece(game_board, player1_marker, player1_marker_king, xstart, ystart, xnew, ynew)\r\n piece_becomes_king(game_board, player1_marker, player1_marker_king, xstart, ystart, xnew, ynew)\r\n turn = turn + 1\r\n if (turn % 2) == 0:\r\n possible_moves = get_possible_moves(game_board, player2_marker, player2_marker_king)\r\n if len(possible_moves) == 0:\r\n game_over = True\r\n break\r\n if player2_type == 'human':\r\n pass\r\n if player2_type == 'comp': \r\n xstart, ystart, xnew, ynew = get_computer_move(p2comp_level, game_board, player2_marker, player2_marker_king)\r\n make_move(game_board, player2_marker, player2_marker_king, xstart, ystart, xnew, ynew)\r\n capture_piece(game_board, player2_marker, player2_marker_king, xstart, ystart, xnew, ynew)\r\n piece_becomes_king(game_board, player2_marker, player2_marker_king, xstart, ystart, xnew, ynew)\r\n turn = turn + 1 \r\n winner = get_winner(game_board)\r\n return winner\r\n\r\np1wins = 0\r\np2wins = 0\r\ntie = 0\r\n\r\nfor i in range(100):\r\n winner = main()\r\n gamenum = i + 1\r\n if winner == 'Player 1 wins!':\r\n p1wins = p1wins + 1\r\n if winner == 'Player 2 wins!':\r\n p2wins = p2wins + 1\r\n else:\r\n tie = tie + 1\r\n\r\nprint(gamenum, 'games. p1 won', p1wins, 'p2 won', p2wins)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":18410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"121513791","text":"import string\nimport matplotlib.pyplot as plt\n\nf = open(\"../../../stats.txt\")\nfrequency = []\nprobability = []\n\n# get just the words frequency\nfor line in f:\n data = string.split(line, \" \")\n\n # ignore the last character (\\n)\n frequency.append((int)(data[1]))\n probability.append((float)(data[2][:-1]))\n\n# y = f(x) -> prob = f(freq)\nplt.plot(frequency, probability, linestyle='---', marker='o', color='r')\n# plt.scatter(frequency, probability)\nplt.gca().invert_xaxis()\nplt.ylabel(\"Probability\")\nplt.xlabel(\"Frequency\")\n\n# plt.show()\nplt.savefig('graph.png', dpi=200)\n","sub_path":"src/week1/plotting/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"441340484","text":"#!/usr/bin/env python3\n# coding=utf-8\n\nimport os\nimport cv2\nimport random\nimport os.path as osp\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom mpl_toolkits.mplot3d import Axes3D\nplt.switch_backend('agg')\nplt.ioff()\n\nimport h5py\nfrom tqdm import trange\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--seq_num', type=int, default=1, help='Specify the number of sequences to render')\nparser.add_argument('--save_dir', type=str, default=\"../vis/\", help='Specify the directory the save the visualization')\nparser.add_argument('--in_filename', type=str, default= \"../data/h36m_valid_pred_3d.h5\", help=\"Speicfy the dataset to load from\")\nargs = parser.parse_args()\nseq_num = args.seq_num \nsave_dir = args.save_dir\nin_filename = args.in_filename\nos.makedirs(save_dir, exist_ok=True)\n\nv3d_to_ours = [3, 2, 1, 4, 5, 6, 16, 15, 14, 11, 12, 13, 8, 0, 7, 9, 10]\npairs = [(0, 1), (1, 2), (2, 13), (3, 13), (3, 4), (4, 5), (6, 7), (7, 8), (8, 12), (9, 10),(9, 12), (10, 11),(12, 14), (12, 15), (13, 14), (15, 16)]\npairs_left = [(3, 13), (3, 4), (4, 5), (9, 10), (9, 12), (10, 11)]\npairs_right = [(0, 1), (1, 2), (2, 13), (6, 7), (7, 8), (8, 12)]\n\ncolors = {\n 'pink': np.array([197, 27, 125]), # L lower leg\n 'light_pink': np.array([233, 163, 201]), # L upper leg\n 'light_green': np.array([161, 215, 106]), # L lower arm\n 'green': np.array([77, 146, 33]), # L upper arm\n 'red': np.array([215, 48, 39]), # head\n 'light_red': np.array([252, 146, 114]), # head\n 'light_orange': np.array([252, 141, 89]), # chest\n 'purple': np.array([118, 42, 131]), # R lower leg\n 'light_purple': np.array([175, 141, 195]), # R upper\n 'light_blue': np.array([145, 191, 219]), # R lower arm\n 'blue': np.array([69, 117, 180]), # R upper arm\n 'gray': np.array([130, 130, 130]), #\n 'white': np.array([255, 255, 255]), #\n}\njcolors = [\n 'light_pink', 'light_pink', 'light_pink', 'pink', 'pink', 'pink',\n 'light_blue', 'light_blue', 'light_blue', 'blue', 'blue', 'blue',\n 'purple', 'purple', 'red', 'green', 'green', 'white', 'white'\n]\necolors = {\n 0: 'light_pink',\n 1: 'light_pink',\n 2: 'light_pink',\n 3: 'pink',\n 4: 'pink',\n 5: 'pink',\n 6: 'light_blue',\n 7: 'light_blue',\n 8: 'light_blue',\n 9: 'blue',\n 10: 'blue',\n 11: 'blue',\n 12: 'purple',\n 13: 'light_green',\n 14: 'light_green',\n 15: 'purple'\n}\n\nroot = \"/yzbdata/MeshTrack/Data/HMR/Human/Subject/\"\nimage_root = osp.join(root, \"datapre_all\")\n\nin_filename = \"../data/h36m_valid_pred_3d4118.h5\"\nin_filename_ssadv = \"../data/h36m_valid_pred_3dssadv.h5\"\n\nprint(\"Read from\", in_filename)\nf = h5py.File(in_filename, \"r\")\nimagenames = [name.decode() for name in f['imagename'][:]]\n# 2d joints in the order of v3d convention\n# poses2d = np.array(f['joint_2d_gt'])[:, v3d_to_ours]\nposes2d = np.array(f['joint_2d_gt'])\nposes3d = np.array(f['joint_3d_pre'])\nposes3d_gt = np.array(f['joint_3d_gt'])\nposes3d_gt = poses3d_gt - poses3d_gt[:, 13:14]\nf.close()\n\nf = h5py.File(in_filename_ssadv, \"r\")\nposes3d_ssadv = np.array(f['joint_3d_pre'])\nf.close()\n\nt = trange(0, len(imagenames))\nprocessed_video_names = []\n\ndef plot_skeleton_2d(all_frames, joints_2d): \n out_frames = []\n radius = max(4, (np.mean(all_frames[0].shape[:2]) * 0.01).astype(int))\n for idx in range(len(all_frames)): \n for pair in pairs: \n i, j = pair \n pt1, pt2 = joints_2d[idx, i], joints_2d[idx, j] \n x11, y11 = pt1 \n x22, y22 = pt2 \n if pair in pairs_left: \n color = (205, 0, 0)\n elif pair in pairs_right: \n color = (0, 205, 0)\n else: \n color = (0, 165, 255)\n cv2.line(all_frames[idx], (int(x11), int(y11)), (int(x22), int(y22)), color, radius-2)\n \ndef get_xxyys(names): \n xxyys = []\n # should be subject, action, camera\n splits = names[0].split('/')\n video_name = '/'.join(splits[:-1])\n part_label_path = osp.join(root, splits[0], 'MySegmentsMat', 'PartLabels',\n splits[1] + (\"cam\" + splits[2]).replace('cam0', '.54138969').replace('cam2','.58860488').replace('cam1', '.55011271').replace('cam3', '.60457274') + \".mat\")\n f = h5py.File(part_label_path, \"r\")\n for idx, name in enumerate(names): \n partmask = f[f['Feat'][idx*30, 0]][()].T \n yp, xp = np.where(partmask != 0)\n xmin, xmax = np.min(xp), np.max(xp) + 1 \n ymin, ymax = np.min(yp), np.max(yp) + 1 \n xxyys.append((xmin, xmax, ymin, ymax))\n f.close()\n return xxyys\n\ndef crop_image(all_frames, xxyys, scale_factor=0.25): \n out_frames = []\n for frame, xxyy in zip(all_frames, xxyys): \n h, w = frame.shape[:2]\n xmin, xmax, ymin, ymax = xxyy \n xc, yc = (xmin + xmax) / 2, (ymin + ymax) / 2\n l = max(xmax - xmin, ymax - ymin)\n xmin, xmax = max(0, xc - l/2), min(w, xc + l / 2)\n ymin, ymax = max(0, yc - l/2), min(h, yc + l / 2)\n xmin, xmax = int(xmin), int(xmax)\n ymin, ymax = int(ymin), int(ymax)\n frame = frame[ymin:ymax, xmin:xmax, :].copy()\n frame = cv2.resize(frame, (int(scale_factor * w), int(scale_factor * h)))\n frame = frame[::-1, :, ::-1] / 255\n out_frames.append(frame)\n return out_frames\n\nfor imageid in t:\n name = imagenames[imageid]\n splits = name.split('/')\n video_name = '/'.join(splits[:3])\n if len(processed_video_names) == seq_num: \n print(\"Finished! Rendered {} sequences, saved to {}\".format(seq_num, save_dir))\n break\n if video_name in processed_video_names:\n continue \n else:\n processed_video_names.append(video_name)\n print(video_name)\n recs = [(idx, name) for idx, name in enumerate(imagenames) if video_name in name]\n # downsample \n recs = recs[::30]\n # cand_list = [x*5 for x in [440, 565, 770]]\n # cand_list = [200, 250, 300, 350, 400, 450, 500, 520, 550, 590, 620, 660, 700, 740, 770, 800, 830, 845]\n # recs = list(filter(lambda x: x[0] in cand_list, recs))\n # recs = list(filter(lambda x: x[0] in [65*5, 100*5, 905*5, 1160*5], recs))\n recs = sorted(recs, key=lambda x: int(x[1].split('/')[-1]))\n names_in_video = [rec[1] for rec in recs]\n indices_in_video = [rec[0] for rec in recs]\n path_format = osp.join(image_root, splits[0], splits[1].replace(' ', '_'), \"cam\" + splits[2], \"{:06d}.jpg\")\n poses3d_in_video = poses3d[indices_in_video]\n poses2d_in_video = poses2d[indices_in_video]\n poses3d_ssadv_in_video = poses3d_ssadv[indices_in_video]\n poses3d_gt_in_video = poses3d_gt[indices_in_video]\n all_frames = [cv2.imread(path_format.format(int(name.split('/')[-1])+1)) for name in names_in_video]\n print(\"Ploting 2d skeleton...\")\n plot_skeleton_2d(all_frames, poses2d_in_video)\n # scale_factor = 0.25\n # all_frames = [cv2.resize(frame, (int(scale_factor * frame.shape[1]), int(scale_factor * frame.shape[0])))[::-1, :, ::-1] / 255 for frame in all_frames]\n print(\"Getting bounding boxes...\")\n xxyys = get_xxyys(names_in_video)\n print(\"Cropping images...\")\n all_frames = crop_image(all_frames, xxyys, scale_factor=0.2)\n print(\"Generating gifs...\")\n\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot(111, projection='3d')\n ax.view_init(elev=10., azim=45.)\n lines_3d, lines_3d_gt = [], []\n lines_3d_ssadv = []\n radius = 0.75 \n initialized = False\n num_render = len(names_in_video)\n print(num_render, \" frames to plot\")\n\n def update_video(frame_idx):\n global initialized, lines_3d, lines_3d_gt, lines_3d_ssadv\n print(\"{}/{} \".format(frame_idx, num_render), end='\\r')\n pose2d = poses2d_in_video[frame_idx]\n pose3d = poses3d_in_video[frame_idx]\n pose3d_ssadv = poses3d_ssadv_in_video[frame_idx]\n pose3d_gt = poses3d_gt_in_video[frame_idx]\n if not initialized:\n for idx, pair in enumerate(pairs):\n i, j = pair\n if pair in pairs_left: \n color = \"blue\"\n elif pair in pairs_right: \n color = \"green\"\n else: \n color = \"darkorange\"\n # pt1, pt2 = pose3d[i], pose3d[j]\n # x11, y11, z11 = pt1\n # x22, y22, z22 = pt2\n # lines_3d.append(ax.plot([z11, z22], [x11, x22], [-y11, -y22], c='red', linewidth=3, label=\"pre\"))\n pt1, pt2 = pose3d_gt[i], pose3d_gt[j]\n x11, y11, z11 = pt1 \n x22, y22, z22 = pt2 \n lines_3d_gt.append(ax.plot([z11, z22], [x11, x22], [-y11, -y22], c=color, linewidth=3, label=\"gt\"))\n # pt1, pt2 = pose3d_ssadv[i], pose3d_ssadv[j]\n # x11, y11, z11 = pt1 \n # x22, y22, z22 = pt2\n # lines_3d_ssadv.append(ax.plot([z11, z22], [x11, x22], [-y11, -y22], c=\"red\", linewidth=3, label=\"ssadv\"))\n initialized = True\n else:\n for idx, pair in enumerate(pairs):\n i, j = pair\n # pt1, pt2 = pose3d[i], pose3d[j]\n # x11, y11, z11 = pt1\n # x22, y22, z22 = pt2\n # lines_3d[idx][0].set_xdata([z11, z22])\n # lines_3d[idx][0].set_ydata([x11, x22])\n # lines_3d[idx][0].set_3d_properties([-y11, -y22])\n pt1, pt2 = pose3d_gt[i], pose3d_gt[j]\n x11, y11, z11 = pt1\n x22, y22, z22 = pt2\n lines_3d_gt[idx][0].set_xdata([z11, z22])\n lines_3d_gt[idx][0].set_ydata([x11, x22])\n lines_3d_gt[idx][0].set_3d_properties([-y11, -y22])\n # pt1, pt2 = pose3d_ssadv[i], pose3d_ssadv[j]\n # x11, y11, z11 = pt1\n # x22, y22, z22 = pt2\n # lines_3d_ssadv[idx][0].set_xdata([z11, z22])\n # lines_3d_ssadv[idx][0].set_ydata([x11, x22])\n # lines_3d_ssadv[idx][0].set_3d_properties([-y11, -y22])\n\n xroot, yroot, zroot = pose3d_gt[13, 0], -pose3d_gt[13, 1], pose3d_gt[13, 2]\n ax.set_ylim3d([-radius+xroot, radius+xroot])\n ax.set_zlim3d([-radius+yroot, radius+yroot])\n ax.set_xlim3d([-2.5 * radius+zroot, radius+zroot])\n ax.get_xaxis().set_ticklabels([])\n ax.get_yaxis().set_ticklabels([])\n ax.set_zticklabels([])\n\n white = (1.0, 1.0, 1.0, 0.0)\n ax.w_xaxis.set_pane_color(white)\n ax.w_yaxis.set_pane_color(white)\n\n ax.w_xaxis.line.set_color(white)\n ax.w_yaxis.line.set_color(white)\n ax.w_zaxis.line.set_color(white)\n\n r = 0.95\n # radius = max(4, (np.mean(all_frames[0].shape[:2]) * 0.01).astype(int))\n xx = np.linspace(-r * radius + xroot, r * radius + xroot, all_frames[frame_idx].shape[1])\n yy = np.linspace(-r * radius + yroot, r * radius + yroot, all_frames[frame_idx].shape[0])\n xx, yy = np.meshgrid(xx, yy)\n zz = np.ones_like(xx) * (-3.2* radius + zroot)\n ax.set_xlabel('Z', fontsize=13)\n ax.set_ylabel(\"X\", fontsize=13)\n ax.set_zlabel(\"Y\", fontsize=13)\n ax.plot_surface(zz, xx, yy, rstride=1, cstride=1, facecolors=all_frames[frame_idx], shade=False)\n plt.savefig(osp.join(save_dir, f\"{video_name.replace('/', '_')}_{frame_idx}.png\"))\n\n for idx in range(len(names_in_video)): \n update_video(idx)\n ani = animation.FuncAnimation(fig, update_video, range(len(names_in_video)), interval=20)\n save_name = name.replace('/', '_')\n ani.save(osp.join(save_dir, f\"{save_name}.gif\"), writer='imagemagick', fps=20)\n t.set_postfix(index=int(imageid))\n","sub_path":"body/human_pose/ambiguity_aware/scripts/plot1.py","file_name":"plot1.py","file_ext":"py","file_size_in_byte":11678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"412681508","text":"from django.http import HttpResponse\nfrom django.test import TestCase, RequestFactory\nfrom django.contrib.sessions.backends.db import SessionStore as DatabaseSession\n\nfrom unittest import TestSuite\n\nfrom experiments import conf\nfrom experiments.experiment_counters import ExperimentCounter\nfrom experiments.middleware import ExperimentsRetentionMiddleware\nfrom experiments.signal_handlers import transfer_enrollments_to_user\nfrom experiments.utils import DummyUser, SessionUser, AuthenticatedUser, participant\nfrom experiments.models import Experiment, ENABLED_STATE, Enrollment\n\nfrom django.contrib.auth import get_user_model\n\nTEST_ALTERNATIVE = 'blue'\nEXPERIMENT_NAME = 'backgroundcolor'\n\n\nclass WebUserIncorporateTestCase(object):\n def __init__(self, *args, **kwargs):\n super(WebUserIncorporateTestCase, self).__init__(*args, **kwargs)\n self.experiment_counter = ExperimentCounter()\n\n def test_can_incorporate(self):\n self.incorporating.incorporate(self.incorporated)\n\n def test_incorporates_enrollment_from_other(self):\n if not self._has_data():\n return\n\n try:\n experiment = Experiment.objects.create(name=EXPERIMENT_NAME, state=ENABLED_STATE)\n self.incorporated.set_alternative(EXPERIMENT_NAME, TEST_ALTERNATIVE)\n self.incorporating.incorporate(self.incorporated)\n self.assertEqual(self.incorporating.get_alternative(EXPERIMENT_NAME), TEST_ALTERNATIVE)\n finally:\n self.experiment_counter.delete(experiment)\n\n def _has_data(self):\n return not isinstance(self.incorporated, DummyUser) and not isinstance(self.incorporating, DummyUser)\n\n\ndef dummy(incorporating):\n return DummyUser()\n\n\ndef anonymous(incorporating):\n return SessionUser(session=DatabaseSession())\n\n\ndef authenticated(incorporating):\n User = get_user_model()\n return AuthenticatedUser(user=User.objects.create(username=['incorporating_user', 'incorporated_user'][incorporating]))\n\n\nclass Dummy2DummyIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Dummy2DummyIncorporateTestCase, self).setUp()\n self.incorporating = dummy(True)\n self.incorporated = dummy(False)\n\n\nclass Dummy2AnonymousIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Dummy2AnonymousIncorporateTestCase, self).setUp()\n self.incorporating = dummy(True)\n self.incorporated = anonymous(False)\n\n\nclass Dummy2AuthenticatedIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Dummy2AuthenticatedIncorporateTestCase, self).setUp()\n self.incorporating = dummy(True)\n self.incorporated = authenticated(False)\n\n\nclass Anonymous2DummyIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Anonymous2DummyIncorporateTestCase, self).setUp()\n self.incorporating = anonymous(True)\n self.incorporated = dummy(False)\n\n\nclass Anonymous2AnonymousIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Anonymous2AnonymousIncorporateTestCase, self).setUp()\n self.incorporating = anonymous(True)\n self.incorporated = anonymous(False)\n\n\nclass Anonymous2AuthenticatedIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Anonymous2AuthenticatedIncorporateTestCase, self).setUp()\n self.incorporating = anonymous(True)\n self.incorporated = authenticated(False)\n\n\nclass Authenticated2DummyIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Authenticated2DummyIncorporateTestCase, self).setUp()\n self.incorporating = authenticated(True)\n self.incorporated = dummy(False)\n\n\nclass Authenticated2AnonymousIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Authenticated2AnonymousIncorporateTestCase, self).setUp()\n self.incorporating = authenticated(True)\n self.incorporated = anonymous(False)\n\n\nclass Authenticated2AuthenticatedIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Authenticated2AuthenticatedIncorporateTestCase, self).setUp()\n self.incorporating = authenticated(True)\n self.incorporated = authenticated(False)\n\n\nclass IncorporateTestCase(TestCase):\n def setUp(self):\n self.experiment = Experiment.objects.create(name=EXPERIMENT_NAME, state=ENABLED_STATE)\n self.experiment_counter = ExperimentCounter()\n\n User = get_user_model()\n self.user = User.objects.create(username='incorporate_user')\n self.user.is_confirmed_human = True\n\n request_factory = RequestFactory()\n self.request = request_factory.get('/')\n self.request.session = DatabaseSession()\n participant(self.request).confirm_human()\n\n def tearDown(self):\n self.experiment_counter.delete(self.experiment)\n\n def _login(self):\n self.request.user = self.user\n transfer_enrollments_to_user(None, self.request, self.user)\n\n def test_visit_incorporate(self):\n alternative = participant(self.request).enroll(self.experiment.name, ['alternative'])\n\n ExperimentsRetentionMiddleware().process_response(self.request, HttpResponse())\n\n self.assertEqual(\n dict(self.experiment_counter.participant_goal_frequencies(self.experiment,\n alternative,\n participant(self.request)._participant_identifier()))[conf.VISIT_NOT_PRESENT_COUNT_GOAL],\n 1\n )\n\n self.assertFalse(Enrollment.objects.all().exists())\n self._login()\n\n self.assertTrue(Enrollment.objects.all().exists())\n self.assertIsNotNone(Enrollment.objects.all()[0].last_seen)\n self.assertEqual(\n dict(self.experiment_counter.participant_goal_frequencies(self.experiment,\n alternative,\n participant(self.request)._participant_identifier()))[conf.VISIT_NOT_PRESENT_COUNT_GOAL],\n 1\n )\n self.assertEqual(self.experiment_counter.goal_count(self.experiment, alternative, conf.VISIT_NOT_PRESENT_COUNT_GOAL), 1)\n self.assertEqual(self.experiment_counter.participant_count(self.experiment, alternative), 1)\n\n","sub_path":"experiments/tests/test_webuser_incorporate.py","file_name":"test_webuser_incorporate.py","file_ext":"py","file_size_in_byte":6561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"272251381","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 8 17:43:22 2019\n\n@author: ellen\n\"\"\"\n\nfrom SimPEG import Mesh, Utils\nfrom discretize.utils import mkvc, refine_tree_xyz\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nstyle_list = ['default', 'classic'] + sorted(\n style for style in plt.style.available if style != 'classic')\n\nplt.close('all')\n\n# sphinx_gallery_thumbnail_number = 4\n\n###############################################\n# Basic Example\n# -------------\n#\n# Here we demonstrate the basic two step process for creating a 2D tree mesh\n# (QuadTree mesh). The region of highest discretization if defined within a\n# rectangular box. We use the keyword argument *octree_levels* to define the\n# rate of cell width increase outside the box.\n#\n\n#size dh x nbc(base2)\n#Dimensoes na horizontal\ndh = 40 # minimum cell width (base mesh cell width)\n\n#Dimensao eixo vertical( base 2)\nnbcx =128# number of base mesh cells in x\nnbcy =128\nnbcz =128\n# Define base mesh (domain and finest discretization)\nhx = dh*np.ones(nbcx)\nhy = dh*np.ones(nbcy)\nhz = dh*np.ones(nbcz)\n\nM = Mesh.TreeMesh([hx,hy])\n\n#definir a camada \n\nxp, yp = np.meshgrid( [0., 5120.], [1000., 999.]) #layer\nxy = np.c_[mkvc(xp), mkvc(yp)] # mkvc creates vectors\n\n# Discretize to finest cell size within rectangular box\nM = refine_tree_xyz(\n M, xy, octree_levels=[1, 1], method='box', finalize=False\n )\n\n\n\n# Define objeto\nxp, yp = np.meshgrid( [2400., 2600.], [2000, 1999.]) #goal\nxy = np.c_[mkvc(xp), mkvc(yp)] # mkvc creates vectors\n\n# Discretize to finest cell size within rectangular box\nM = refine_tree_xyz(\n M, xy, octree_levels=[4, 4], method='radial', finalize=False\n )\n\n\n#=========================================\n#Criando mais uma área de dricretização\n#=========================================\n\n\n\n\nM.finalize() # Must finalize tree mesh before use\nx=M\nM.plotGrid(showIt=True)\nax = plt.gca()\nax.invert_yaxis()\nplt.show()\n\nnC = M.nC\nprint(nC)\n#print(\"Aqui!\")\n#mesh.plotGrid(showIt=True)","sub_path":"temp/malha2D.py","file_name":"malha2D.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"424300456","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport json\nimport os\nimport uuid\n\nimport random\nfrom jinja2 import Environment, FileSystemLoader\nfrom random_generator_libs import specific_generator\nimport shutil\nimport uuid\n\nx=sys.argv[1]\ndata=json.loads(x)\n\nif not \"filename\" in data:\n\texit()\n\nif not \"templates\" in data:\n\texit()\n\nif data['templates'] == None:\n\texit()\n\nbase_directory_path = os.path.dirname(os.path.abspath(__file__))\ntemplate_directory_path = \"{}/random_generator_libs/template\".format(base_directory_path)\nzip_directory_path = base_directory_path.replace('scripts', 'storage')\n\nenv = Environment(loader=FileSystemLoader(template_directory_path, encoding='utf8'))\ntpl = env.get_template('outline.tpl.html')\n\nu4 = data['filename'] #str(uuid.uuid4()) \nwork_dir_path = '{}/{}'.format(base_directory_path, u4);\nif os.path.exists(work_dir_path):\n\tshutil.rmtree(work_dir_path)\n\nshutil.copytree('{}/htmls'.format(base_directory_path), work_dir_path)\n\ntext = \"\"\nfor template in data['templates']:\n\ttext += specific_generator.generate_html(template)\n\nhtml = tpl.render({'text': text})\n\nwith open('{}/index.html'.format(work_dir_path), mode='w', encoding='utf-8') as f:\n\tf.write(html)\n\nshutil.make_archive('{}/app/public/{}'.format(zip_directory_path, u4), 'zip', root_dir=work_dir_path)\nshutil.rmtree(work_dir_path)\nprint(u4)\n","sub_path":"scripts/run3.py","file_name":"run3.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"160570034","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pyttsx,time,pyaudio,time,wave,sys\nfrom definitions import *\nimport speech_recognition as sr\n\n\nr = sr.Recognizer()\n\ndef log(d):\n\tt = '[' + time.strftime(\"%Y-%m-%d %H:%M:%S\") + '] :: '\n\tfileLocation = \"C:\\wamp\\www\\Piyu-UI\\interface.txt\"\n\t_file = open(fileLocation,\"a\")\n\t_file.write(t + d+\"\\n\")\n\t_file.close()\n\tprint(t + d)\n\ndef say(word):\n\tengine = pyttsx.init()\n\tengine.setProperty('rate', RATE)\n\tengine.say(word)\n\tengine.runAndWait()\n\n\ndef speak(a_name,lang):\t\n\tCHUNK = 1024\n\tif(lang=='ta'):\n\t\tBASE_DIR\t=\t'voices\\\\tamil'\n\telse:\n\t\tBASE_DIR\t=\t'voices'\n\taudioFileName = BASE_DIR + '\\\\' + a_name + WAV\n\ttry:\n\t\twf = wave.open(audioFileName, 'rb')\n\texcept FileNotFoundError:\n\t\twf = wave.open('voices\\\\beep'+ WAV, 'rb')\n\tp = pyaudio.PyAudio()\n\t_log = '' + '[[Audio]]==>' + a_name + WAV + '\\t\\t' + ''\n\tprint('PIYU CORE v.1.0 - '+ _log)\n\tlog(_log)\n\tstream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\n\t channels=wf.getnchannels(),\n\t rate=wf.getframerate(),\n\t output=True)\n\tdata = wf.readframes(CHUNK)\n\n\twhile data != '':\n\t stream.write(data)\n\t data = wf.readframes(CHUNK)\n\n\tstream.stop_stream()\n\tstream.close()\n\tp.terminate()\n\t\n\n\ndef action(source):\n audio = r.listen(source)\n try:\n \tr.recognize(audio)\n \ttxtData = r.recognize(audio)\n \tprint(txtData)\n \t#mapText(txtData); \n except (KeyboardInterrupt, SystemExit):\n raise\n except LookupError:\n \tspeak(VOICE.repeat)\n \tsleepTime = getSleepTime(VOICE.repeat)\n \tprint(\"Sleeping....\")\n \tprint(sleepTime)\n \ttime.sleep(.1)\n \tprint(\"Wokeup...\")\n \n\ndef listen():\t\n\twith sr.Microphone() as source: # use the default microphone as the audio source\n\t\twhile(1):\n\t\t\tprint(\"Listening....\")\n\t\t\taction(source) # listen for the first phrase and extract it into audio data\n\t\t\t\n\ndef mapText(data):\n txts = TXT();\n txts = [txts for txts in dir(txts) \n if not txts.startswith('__')]\n\n var1 = TXT();\n obj_found = False\n for txt in txts:\n #print(var1.__getitem__(txt))\n if(data in var1.__getitem__(txt)):\n obj_found = True\n return str(txt)\n\n if(obj_found == False):\n return 'repeat'\n\ndef mapTextTamil(data):\n txts = TXTtamil();\n txts = [txts for txts in dir(txts) \n if not txts.startswith('__')]\n\n var1 = TXTtamil();\n obj_found = False\n for txt in txts:\n #print(var1.__getitem__(txt))\n if(data in var1.__getitem__(txt)):\n obj_found = True\n return str(txt)\n\n if(obj_found == False):\n return 'repeat'\n\n\nclass pre:\n def time():\n speak(getattr(TAMIL,'time'),'ta')\n speak(getattr(TAMIL,'_'+ time.strftime(\"%H\")),'ta')\n speak(getattr(TAMIL,'hour'),'ta')\n speak(getattr(TAMIL,'_'+ time.strftime(\"%M\")),'ta')\n speak(getattr(TAMIL,'minute'),'ta')\n\n def date():\n speak(getattr(TAMIL,'today'),'ta')\n speak(getattr(TAMIL,'twothousand'),'ta')\n speak(getattr(TAMIL,'_'+ time.strftime(\"%Y\").strip('20')),'ta')\n speak(getattr(TAMIL,'aam'),'ta')\n speak(getattr(TAMIL,'year'),'ta')\n speak(getattr(TAMIL,time.strftime(\"%B\").lower()),'ta')\n speak(getattr(TAMIL,'month'),'ta')\n speak(getattr(TAMIL,'_'+str(int(time.strftime(\"%d\")))),'ta')\n speak(getattr(TAMIL,'aam'),'ta')\n speak(getattr(TAMIL,'day'),'ta')\n\n","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"126744004","text":"# importing our library of hashing algorithms\nimport hashlib\nfrom hashlib import md5\n\nwhile True:\n # asking for user input regarding which file we want to hash\n file_to_hash = raw_input(\n \"Enter the name of the file you wish to hash [Type 'exit' to exit]...\\n\")\n\n # If user types 'exit' exit\n if file_to_hash == 'exit':\n break\n\n # opening our target file in binary format\n with open('suspicious_files/' + file_to_hash, 'rb') as afile:\n # read the selected file\n buf = afile.read()\n\n # assigning the hash value to a new varible\n hash = md5(buf).hexdigest()\n\n # printing out the resulting hash value to the screen\n print(hash + \"\\n\" + \"Done!\")\n","sub_path":"hash_machine.py","file_name":"hash_machine.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"312690040","text":"# Averaging spatially resolved strain in reconstructed \n# objects of nominally zero strain to determine approximate \n# strain resolution of BCDI measurement.\n\nimport numpy as np\nimport scipy.io as sio\n\nfrom argparse import Namespace\nfrom scipy.ndimage.morphology import binary_erosion\n\n\ndata = Namespace( \n **sio.loadmat( \n '/home/smaddali/ANL/Manuscripts/HEBCDI/data/estimatedStrains_correctScale.mat' \n ) \n)\n\n##########################################################\n\nrho = data.rho_stdSample\nstrain = data.strain_stdSample\nlabel=r'Au nanoparticle'\nnumErosions = 3\n\n#rho = data.rho_111_A\n#strain = data.strain_111_A\n#label = r'Grain $111$' \n#numErosions = 5\n\n#rho = data.rho_111_B\n#strain = data.strain_111_B\n#label=r'Grain $\\bar{1}\\bar{1}\\bar{1}$'\n#numErosions = 5\n\nhistbins = 50\n\n##########################################################\n\nsup = ( np.absolute( rho ) > 0. ).astype( float )\nfor n in list( range( numErosions ) ):\n sup = binary_erosion( sup )\n\nstraindata = strain[ np.where( sup > 0.5 ) ]\n\n\n#plt.clf()\nplt.hist( \n straindata, \n bins=np.linspace( straindata.min(), straindata.max(), histbins ), \n histtype='step' ,\n linewidth=2, \n label=label\n)\n\n\n\n","sub_path":"Python/strainResolution.py","file_name":"strainResolution.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"601912505","text":"import collections\nimport sys\nimport string\n\"\"\"\n1. Expression evaluation\na. Infix Notation: Operators are written between the operands they operate on, e.g. 3 + 4 including\nbrackets\n\nb. postflix or Reverse Poland calculator: Operators are written after operands.\n\nc. Given reverse polish expression create a tree\n\n Expression tree is a binary tree in which each internal node corresponds to operator and each leaf node \n corresponds to operand\n\"\"\"\n\ndef qn1a(input):\n\tarr = input.split(\" \")\n\toperand_stack = []\n\toperator_stack = []\n\toperators = [\"+\", \"-\", \"/\", \"*\"]\n\ti = 0\t\n\twhile i < len(arr):\n\t\tc = arr[i]\n\n\t\tif c not in operators and c not in [\"(\" ,\")\"]:\n\t\t\toperand_stack.append(c)\n\n\t\telif c in [\"*\" , \"/\", \"(\"]:\n\t\t\toperator_stack.append(c)\n\n\t\telif c in [\"+\", \"-\"]:\n\t\t\twhile len(operator_stack) > 0 and operator_stack[-1] in [\"*\" , \"/\"]:\n\t\t\t\tfirst_operand = operand_stack.pop()\n\t\t\t\tsecond_operand = operand_stack.pop()\n\t\t\t\toperator = operator_stack.pop()\t\t\t\n\t\t\t\tevaluation = eval(first_operand + operator + second_operand)\n\t\t\t\toperand_stack.append(str(evaluation))\n\t\t\toperator_stack.append(c)\n\n\t\telif c == \")\":\n\t\t\twhile operator_stack[-1] != \"(\":\n\t\t\t\tfirst_operand = operand_stack.pop()\n\t\t\t\tsecond_operand = operand_stack.pop()\n\t\t\t\toperator = operator_stack.pop()\n\t\t\t\tevaluation = eval(first_operand + operator + second_operand)\n\t\t\t\toperand_stack.append(str(evaluation))\n\t\t\toperator_stack.pop()\n\t\ti += 1\n\n\twhile len(operand_stack) > 1:\n\t\tfirst_operand = operand_stack.pop()\n\t\tsecond_operand = operand_stack.pop()\n\t\toperator = operator_stack.pop()\n\t\tevaluation = eval(first_operand+operator+second_operand)\n\t\toperand_stack.append(str(evaluation))\n\n\treturn operand_stack[-1]\n\"\"\"\n Test Cases: \n \"10 + 2 * 6\" ---> 22 \n \"100 * 2 + 12\" ---> 212 \n \"100 * ( 2 + 12 )\" ---> 1400 \n \"100 * ( 2 + 12 ) / 14 ---> 100 \n\"\"\"\n#print(qn1a(\"10 + 2 * 6\"))\n#print(qn1a(\"100 * 2 + 12\"))\n#print(qn1a(\"100 * ( 2 + 12 )\"))\n#print(qn1a(\"100 * ( 2 + 12 ) / 14\"))\n\ndef qn1b(s):\n\n\tmy_stack = []\n\tsplit_ = s.split(\" \")\n\toperators = [\"+\", \"-\", \"/\", \"*\"]\n\n\tfor i in range(len(split_)):\n\t\tif split_[i] not in operators :\n\t\t\tmy_stack.append(split_[i])\n\t\telse:\n\t\t\tnumber_1 = my_stack.pop()\n\t\t\tnumber_2 = my_stack.pop()\n\t\t\teval_ = eval(number_1 + \" \" + split_[i] + \" \" + number_2)\n\t\t\tmy_stack.append(str(eval_))\n\n\treturn my_stack.pop()\n\n\n\"\"\"\n2. \na. input: [] [[]] [][][] ][, ]], [[[ ]]]\n \nwrite a function to return T/F given the input is a valid (syntax correct bracket order)\n\nb.Given a compressed string in which a number followed by [] indicate how many times those characters occur, \ndecompress the string\nEg. : a3[b2[c1[d]]]e will be decompressed as a bcdcd bcdcd bcdcd e.\nAssume the string is well formed and number will always be followed by a [].\n\"\"\"\nimport collections\ndef qn2(input):\n\tif len(input) == 0:\n\t\traise ValueError(\"length must be more than zero\")\n \n\tif len(input) % 2 == 1:\n\t\treturn False\n\tstack_ = collections.deque()\n\n\tfor i in range(len(input)):\n\t\tif input[i] == \"[\":\n\t\t\tstack_.append(\"[\");\n\n\t\telif input[i] == \"]\":\t\n\t\t\tif len(stack_) <= 0 or stack_.pop() == \"]\":\n\t\t\t\treturn False \n\t\telse:\n\t\t\traise ValueError(\"only allow [ or ]\")\n \n\tif len(stack_) > 0:\n\t\treturn False\n\telse:\n\t\treturn True\n\n\n\n\"\"\"\n3. Design a stack that allow to pop min in O (1): \n\n a. O(1) time and O(n) extra space => have another stack and keep duplicate min into this stack\n b. Design a stack that supports getMin() in O(1) time and O(1) extra space\n\n\"\"\"\nclass Qn3Stacka:\n\tdef __init__(self):\n\t\tself.min_stack = []\n\t\tself.stack = []\n\n\tdef push(self, value):\n\t\tself.stack.append(value)\n\t\tif len(self.min_stack) == 0:\n\t\t\tself.min_stack.append(value)\n\t\telse:\n\t\t\tcurrent_min = min(value, min_stack[-1])\n\t\t\tself.min_stack.append(current_min)\n\n\tdef pop(self, value):\n\t\tself.min_stack.pop()\n\t\treturn self.stack.pop()\n\n\tdef get_min(self):\n\t\treturn self.min_stack[-1]\n\nclass Qn3Stackb:\n\tdef __init__(self):\n\t\tself.min = -sys.maxsize\n\t\tself.stack = []\n\n\tdef push(self, value):\n\t\tif len(self.stack) == 0 or value >= self.min:\n\t\t\tself.stack.append(value)\n\t\t\tif len(self.stack) == 0:\n\t\t\t\tself.min = value\n\t\telse:\n\t\t\tself.stack.append(2*value - self.min)\n\t\t\tself.min = value\n\n\tdef pop(self):\n\t\ty = self.stack.pop()\n\t\tif y < self.min:\n\t\t\ty = self.min\n\t\t\tself.min = 2 * self.min -y\n\n\t\treturn y\n\n\tdef get_min(self):\n\t\treturn self.min\n\n\n\"\"\"\n4. implements a queue using 2 stacks\n\"\"\"\n\nclass MyQueue:\n\tdef __init__(self):\n\t\tself.pop_stack = stack.Stack()\n\t\tself.push_stack = stack.Stack()\n\n\tdef push(self,value):\n\t\twhile self.pop_stack.isEmpty() == False:\n\t\t\tself.push_stack.push(self.pop_stack.pop())\n\n\t\tself.push_stack.push(value)\n\n\tdef pop(self):\n\t\twhile self.push_stack.isEmpty() == False:\n\t\t\tself.pop_stack.push(self.push_stack.pop())\n\n\t\treturn self.pop_stack.pop()\n\n\tdef peek(self):\n\t\twhile self.push_stack.isEmpty() == False:\n\t\t\tself.pop_stack.push(self.push_stack.pop())\n\n\t\treturn self.pop_stack.peek()\n\n\tdef isEmpty(self):\n\t\tif self.push_stack.isEmpty() and self.pop_stack.isEmpty():\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\n\n\"\"\"\n7. BST with no parent pointer\n\na. Implement an iterator over a binary search tree (BST). \nYour iterator will be initialized with the root node of a BST.\n\nThe first call to next() will return the smallest number in BST. \nCalling next() again will return the next smallest number in the BST, and so on.\n\n Note: next() and hasNext() should run in average O(1) time and uses O(h) memory, where h is the height of the tree.\nTry to optimize the additional space complexity apart from the amortized time complexity. \n\ndimilar: Find if there is a triplet in a Balanced BST that adds to zero => similar to a\n\n\nb. Given a binary search tree T, where each node contains a positive integer, and an integer K, \nyou have to find whether or not there exist two different nodes A and B such that A.value + B.value = K.\nReturn 1 to denote that two such nodes exist. Return 0, otherwise.\nNotes\nYour solution should run in linear time and not take memory more than O(height of T).\nAssume all values in BST are distinct.\nno parent pointer\n\nInput 1: \n\nT : 10\n / \\\n 9 20\n\nK = 19\nReturn: 1\nInput 2: \n\nT: 10\n / \\\n 9 20\nK = 40\nReturn: 0\n\nc. check if 2 bst are similar in O(n) time and O(h) time. Similar means nodes are the same but different structure\n\n\nd. Kth Smallest Element In Tree\nGiven a binary search tree, write a function to find the kth smallest element in the tree.\n\nInput : \n 2\n / \\\n1 3\n\nand k = 2\n\nReturn : 2\n\nAs 2 is the second smallest element in the tree.\nYou may assume 1 <= k <= Total number of nodes in BST \n\ne. we have a list of trees (roots are given), check whether all of them are similar.\n Whether two trees are similar depends on the leaves from left to right.\n\ne.g. \n 1 \n 2 3\n4 5 6\n\nIf we print leaves from left to right we will get 4, 5 ,6\n\n 1\n 2 6 \n 3 \n4 5\n\nIf we print leaves from left to right we will get 4, 5, 6\n\nTherefore, the two trees above are similar.\n\"\"\"\n\nclass Node:\n\tdef __init__(self, x):\n\t\tself.value = x\n\t\tself.left = None\n\t\tself.right = None\n\nclass qn7a:\n # @param root, a binary search tree's root node\n\tdef __init__(self, root):\n\t\tself.stack= []\n\t\tself.root = root\n\t\tself.populate_min()\n\n # @return a boolean, whether we have a next smallest number\n\tdef hasNext(self):\n\t\treturn self.stack != []\n\n # @return an integer, the next smallest number\n\tdef next(self):\n\t\tif self.hasNext():\n\t\t\tvalue_to_return = self.stack.pop()\n\n\t\t\tif value_to_return.right != None:\n\t\t\t\tnext_stack_element = value_to_return.right\n\t\t\t\twhile next_stack_element != None:\n\t\t\t\t\tself.stack.append(next_stack_element)\n\t\t\t\t\tnext_stack_element = next_stack_element.left\n\n\t\t\treturn value_to_return\n\n\t\treturn -1\n\n\tdef populate_min(self):\n\t\tcurrent = self.root\n\t\twhile current != None:\n\t\t\tself.stack.append(current)\n\t\t\tcurrent = current.left\n\n\n\ndef qn7b(root, k):\n\tmin_stack = []\n\tmax_stack = []\n\n\telement_of_min_stack = root.left\n\twhile element_of_min_stack != None:\n\t\tmin_stack.append(element_of_min_stack)\n\t\telement_of_min_stack = element_of_min_stack.left\n\n\telement_of_max_stack = root\n\twhile element_of_max_stack != None:\n\t\tmax_stack.append(element_of_max_stack)\n\t\telement_of_max_stack = element_of_max_stack.right\n\n\n\twhile len(min_stack) >0 and len(max_stack) > 0:\n\t\tif min_stack[-1].value + max_stack[-1].value == k:\n\t\t\treturn True\n\n\t\telif min_stack[-1].value + max_stack[-1].value < k:\n\t\t\t# pop min stack\n\t\t\tcurrent = min_stack.pop()\n\t\t\telement_to_add = current.right\n\t\t\twhile element_to_add != None:\n\t\t\t\tmin_stack.append(element_to_add)\n\t\t\t\telement_to_add = element_to_add.left\n\n\t\telse:\n\t\t\tcurrent = max_stack.pop()\n\t\t\telement_to_add = current.left\n\t\t\twhile element_to_add != None:\n\t\t\t\tmax_stack.append(element_to_add)\n\t\t\t\telement_to_add = element_to_add.right\n\n\n\treturn False\n\n\ndef qn7c(root1, root2):\n\tstack1 = []\n\tstack2 = []\n\n\twhile root1 != None:\n\t\tstack1.append(root1)\n\t\troot1 = root1.left\n\n\twhile root2 != None:\n\t\tstack2.append(root2)\n\t\troot2 = root2.left\n\n\twhile len(stack1) > 0 and len(stack2) > 0:\n\t\tif stack1[-1].value != stack2[-1].value:\n\t\t\treturn False\n\n\t\tcurrent1 = stack1.pop()\n\t\tnext1 = current1.right\n\t\twhile next1 != None:\n\t\t\tstack1.append(next1)\n\t\t\tnext1 = next1.left\n\n\t\tcurrent2 = stack2.pop()\n\t\tnext2 = current2.right\n\t\twhile next2 != None:\n\t\t\tstack2.append(next2)\n\t\t\tnext2 = next2.left\n\n\treturn len(stack1) == 0 and len(stack2) == 0\n\ndef qn7d_stack(root, k):\n\tcurrent = root\n\tstack = []\n\twhile current != None:\n\t\tstack.append(current)\n\t\tcurrent = current.left\n\n\twhile len(stack) > 0 and k > 0:\n\t\tcurrent = stack.pop()\n\t\tk -= 1\n\t\tif current.right != None:\n\t\t\tnext_node = current.right\n\t\t\twhile next_node != None:\n\t\t\t\tstack.append(next_node)\n\t\t\t\tnext_node = next_node.left\n\n\treturn current.value\n\ndef qn7d_morris(root, k):\n\tcurrent = root\n\t\n\twhile k > 0 and current != None:\n\t\tif current.left == None:\n\t\t\tk -= 1\n\t\t\tcurrent = current.right\n\t\telse:\n\t\t\tpre = current.right\n\t\t\twhile pre.right != None and pre.right != current:\n\t\t\t\tpre = pre.right\n\n\t\t\tif pre.right == None:\n\t\t\t\tpre.right == current # attach parent to the right\n\t\t\t\tcurrent = current.left\n\n\t\t\telse:\n\t\t\t\tpre.right =None\n\t\t\t\tk -=1\n\t\t\t\tcurrent = current.right\n\n\treturn current.value\n\n\n\n\n\n\"\"\"\n8.\nIn a party of N people, only one person is known to everyone (celebrity). That celebrity doesn’t know anyone in the party. Find the celebrity if such a person exist.\n\nInput Format\nThe first line of the input will contain N ( the number of people) and K. Each of the next K lines will contain 2 space separated integers i and j stating that person i knows person j.\n\nOutput Format\nThe person who is a celebrity.\n\nSample Input\n\n4 \n\n1 3\n2 3\n4 3\n2 1\n1 4\n\nCelebrity is 3 \n\n=> this solution uses stack but possible to use 2 pointers\n\"\"\"\n#relationship =[(1,3),(2,3)...]\ndef qn8(n, relationship):\n\tstorage = [[0] * n for i in range(n)]\n\n\tfor (m,n) in relationship:\n\t\tstorage[m-1][n-1] = 1\n\n\tstack = list(range(n))\n\n\twhile len(stack) > 1:\n\t\tfirst = stack.pop()\n\t\tsecond = stack.pop()\n\t\tif storage[first][second] == 1 and storage[second][first] == 0:\n\t\t\tstack.append(second) # first is definitely not celebrity\n\t\telif storage[first][second] == 0 and storage[second][first] == 1:\n\t\t\tstack.append(first)\n\n\treturn stack[-1] + 1\n\nn = 4\nrelationship =[(1,3),(2,3), (4,3), (2 ,1),(1,4)]\n#print(qn8(n, relationship))\n\n\n\n\"\"\"\n9. /**\n * Write a program to sort a stack such that the smallest items are in the top\n . You may use at most one additional\n * stack to hold items, but you may not copy the elements into\n * any other data structure (such as an array). The stack supports\n * the following operations: push, pop, peek, and isEmpty.\n */\n\"\"\"\n\ndef qn9(input_stack):\n\ttemp_stack = Stack()\n\ttemp_stack.push(input_stack.pop())\n\n\twhile input_stack.isEmpty() == False:\n\t\tif input_stack.peek() >= temp_stack.peek():\n\t\t\ttemp_stack.push(input_stack.pop())\n\t\telse:\n\t\t\ttemp = input_stack.pop()\n\t\t\twhile temp < temp_stack.peek:\n\t\t\t\tinput_stack.push(temp_stack.pop())\n\t\t\ttemp_stack.push(temp)\n\n\twhile temp_stack.isEmpty() == False:\n\t\tinput_stack.push(temp_stack.pop())\n\n\treturn input_stack\n\n\"\"\"\n10. \n\na. Next Greater Element Given an array of integers, replace every number with the next higher number to its right.\nIf a number can’t be replaced, we leave it as-it is. For example, 5, 2, 1, 4, 6, 7 \nneeds to be changed to 6, 4, 4, 6, 7, 7.\n\n\nb. Largest Rectangular Area in a Histogram \nFind the largest rectangular area possible in a given histogram where the largest rectangle \ncan be made of a number of contiguous bars. For simplicity, assume that all bars have same \nwidth and the width is 1 unit.\n\n\nc\nSliding Window Maximum\nGiven an array nums, there is a sliding window of size k which is moving from the very left of\n the array to the very right. You can only see the k numbers in the window. \n Each time the sliding window moves right by one position. Return the max sliding window.\n\nExample:\n\nInput: nums = [1,3,-1,-3,5,3,6,7], and k = 3\nOutput: [3,3,5,5,6,7] \nExplanation: \n\nWindow position Max\n--------------- -----\n[1 3 -1] -3 5 3 6 7 3\n 1 [3 -1 -3] 5 3 6 7 3\n 1 3 [-1 -3 5] 3 6 7 5\n 1 3 -1 [-3 5 3] 6 7 5\n 1 3 -1 -3 [5 3 6] 7 6\n 1 3 -1 -3 5 [3 6 7] 7\nNote: \nYou may assume k is always valid, 1 ≤ k ≤ input array's size for non-empty array.\n\nFollow up:\nCould you solve it in linear time?\n\"\"\"\n\ndef qn10a(arr):\n\tstack = []\n\n\tfor i in range(len(arr)):\n\t\tif len(stack) == 0 or arr[i] <= arr[stack[-1]]:\n\t\t\tstack.append(i)\n\t\telse:\n\t\t\twhile len(stack) > 0 and arr[i] > arr[stack[-1]]:\n\t\t\t\tcurrent_top_of_stack_index = stack.pop()\n\t\t\t\tarr[current_top_of_stack_index] = arr[i]\n\t\t\tstack.append(i)\n\treturn arr\n\n#print(qn6([5, 2, 1, 4, 6, 7 ]))\n\n\ndef qn10b(arr):\n\tstack = []\n\tout = 0\n\tfor i in range(len(arr)):\n\t\tif len(stack) == 0 or arr[i] >= arr[stack[-1]]: \n\t\t\tstack.append(i)\n\t\telse:\n\t\t\t#keep popping if top is less than current array\n\t\t\twhile len(stack) > 0 and arr[i] < arr[stack[-1]]:\n\t\t\t\tcurrent_index = stack.pop()\n\t\t\t\tlength = i if len(stack) == 0 else i - stack[-1] - 1 # use the previous stack index for all the left\n\t\t\t\t\n\t\t\t\tcurrent_area = arr[current_index] * length\n\t\t\t\tout = max(out, current_area)\n\t\t\tstack.append(i)\n\n\tlast_index = len(arr) # 7\n\twhile len(stack) > 0: \n\t\tcurrent_index = stack.pop()\n\t\tlength = last_index if len(stack) == 0 else last_index - stack[-1] - 1\n\t\tcurrent_area = arr[current_index] * length\n\t\tout = max(out, current_area)\n\treturn out\n\n#print(qn5([6, 2, 5, 4, 5, 1, 6]))\n#print(qn5([1,2,5,6,3]))\n\n\n\ndef qn10c(arr, k):\n\tif len(arr) == 0:\n\t\treturn 0\n\n\tdouble_queue = collections.deque()\n\tfor i in range(k):\n\t\twhile len(double_queue) > 0 and arr[double_queue[-1]] < arr[i]:\n\t\t\tdouble_queue.pop()\n\n\t\tdouble_queue.append(i)\n\n\tres = []\n\tfor i in range(k, len(arr)):\n\t\tres.append(arr[double_queue[0]])\n\t\t\n\t\twhile len(double_queue) > 0 and arr[double_queue[-1]] < arr[i]:\n\t\t\tdouble_queue.pop()\n\n\t\tdouble_queue.append(i)\n\n\t\twhile i - double_queue[0]>=k:\n\t\t\tdouble_queue.popleft()\n\n\tres.append(arr[double_queue[0]])\n\treturn res\n\narr = [1,3,-1,-3,5,3,6,7]\nk = 3\nprint(qn16(arr,k))\n\narr = [5,0,1,4]\nk = 3\nprint(qn16(arr,k))\n\n\n\"\"\"\n11. Given an encoded string, return it's decoded string.\n\nThe encoding rule is: k[encoded_string], where the encoded_string inside the square brackets is being repeated exactly k times. Note that k is guaranteed to be a positive integer.\n\nYou may assume that the input string is always valid; No extra white spaces, square brackets are well-formed, etc.\n\nFurthermore, you may assume that the original data does not contain any digits and that digits are only for those repeat numbers, k. For example, there won't be input like 3a or 2[4].\n\nExamples:\n\ns = \"3[a]2[bc]\", return \"aaabcbc\".\ns = \"3[a2[c]]\", return \"accaccacc\".\ns = \"2[abc]3[cd]ef\", return \"abcabccdcdcdef\".\n\"\"\"\n\ndef qn11(str):\n\tstack = []\n\tres = \"\"\n\tfor c in str:\n\t\tif len(stack) == 0 or c != \"]\":\n\t\t\tstack.append(c)\n\t\telse:\n\t\t\tcur = \"\"\n\t\t\twhile stack[-1] != \"[\":\n\t\t\t\tcur = stack.pop() + cur\n\t\t\t#pop \"[\"\n\t\t\tstack.pop()\n\t\t\tcounter = \"\"\n\t\t\twhile len(stack) >0 and stack[-1] in string.digits:\n\t\t\t\tcounter = stack.pop() + counter\n\t\t\tcur = cur * int(counter)\n\t\t\tstack.append(cur)\n\tfor c in stack:\n\t\tres += c\n\treturn res\ns = \"3[a]2[bc]\"\nprint(qn11(s))\n\ns = \"3[a2[c]]\"\nprint(qn11(s))\n\ns = \"2[abc]3[cd]ef\"\nprint(qn11(s))\ns = \"100[a]\"\nprint(qn11(s))","sub_path":"stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":16371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"422530185","text":"#!/usr/bin/env python3\n# encoding: utf-8\n# Public Domain\n\n\ndef quick_sort(t):\n\tif len(t) < 2:\n\t\treturn t\n\t\n\t# copy the list (don't modify it)\n\tt = t[:]\n\t\n\t# pick the middle element as the pivot\n\tpivot = t.pop((len(t) - 1)//2)\n\t\n\tleft = list(filter(lambda x: x < pivot, t))\n\tright = list(filter(lambda x: x >= pivot, t))\n\t\n\treturn quick_sort(left) + [pivot] + quick_sort(right)\n\n\nif __name__ == '__main__':\n\timport random\n\tnums = [random.randrange(100) for _ in range(20)]\n\t\n\tprint(quick_sort(nums))","sub_path":"quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"278048501","text":"from tensorforce import Agent, Environment\r\nfrom tensorforce.agents import PPOAgent\r\nfrom tensorforce.environments import OpenAIGym\r\n\r\n# Pre-defined or custom environment\r\n# environment = Environment.create(\r\n# environment='gym', level='CartPole', max_episode_timesteps=500\r\n# )\r\n\r\n# Network as list of layers\r\nnetwork_spec = [\r\n dict(type='dense', size=32, activation='tanh'),\r\n dict(type='dense', size=32, activation='tanh')\r\n]\r\n\r\nenvironment = OpenAIGym('CartPole-v0', visualize=True, max_episode_steps=500)\r\n\r\n\r\n# Instantiate a Tensorforce agent\r\n# agent = Agent.create(\r\n# agent='tensorforce',\r\n# environment=environment, # alternatively: states, actions, (max_episode_timesteps)\r\n# memory=10000,\r\n# update=dict(unit='timesteps', batch_size=64),\r\n# optimizer=dict(type='adam', learning_rate=3e-4),\r\n# policy=dict(network='auto'),\r\n# objective='policy_gradient',\r\n# reward_estimation=dict(horizon=20)\r\n# )\r\n\r\nagent = Agent.create(\r\n agent='ppo', environment=environment, batch_size=10, learning_rate=1e-3\r\n)\r\n\r\n# agent = PPOAgent(\r\n# states_spec=environment.states,\r\n# actions_spec=environment.actions,\r\n# network_spec=network_spec,\r\n# batch_size=4096,\r\n# # BatchAgent\r\n# keep_last_timestep=True,\r\n# # PPOAgent\r\n# step_optimizer=dict(\r\n# type='adam',\r\n# learning_rate=1e-3\r\n# ),\r\n# optimization_steps=10,\r\n# # Model\r\n# scope='ppo',\r\n# discount=0.99,\r\n# # DistributionModel\r\n# distributions_spec=None,\r\n# entropy_regularization=0.01,\r\n# # PGModel\r\n# baseline_mode=None,\r\n# baseline=None,\r\n# baseline_optimizer=None,\r\n# gae_lambda=None,\r\n# # PGLRModel\r\n# likelihood_ratio_clipping=0.2,\r\n# # summary_spec=None,\r\n# # distributed_spec=None\r\n# )\r\n\r\n# Train for 300 episodes\r\nfor _ in range(300):\r\n\r\n # Initialize episode\r\n states = environment.reset()\r\n terminal = False\r\n\r\n while not terminal:\r\n # Episode timestep\r\n actions = agent.act(states=states)\r\n states, terminal, reward = environment.execute(actions=actions)\r\n agent.observe(terminal=terminal, reward=reward)\r\n\r\nagent.close()\r\nenvironment.close()\r\n","sub_path":"Tensorforce/tf_main.py","file_name":"tf_main.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"77788996","text":"import numpy as np\nimport os\nimport warnings\nfrom matplotlib.patches import Polygon, Wedge\nfrom matplotlib.collections import PatchCollection\nimport matplotlib as mpl\n\nfrom pleiades import (Current, CurrentGroup, Magnet, Component, ZSymmCoilSet,\n MagnetGroup, CurrentArray, Configuration)\n\nclass TREXcoil(CurrentGroup):\n def __init__(self,**kwargs):\n z0 = float(kwargs.pop(\"z0\",0))\n self._z0 = z0\n Rarr = 2.00467 + np.linspace(0, .067083, 6)\n Zarr = np.linspace(-.105469,.105469, 16)\n rz_pts = []\n for i, r in enumerate(Rarr):\n if i == 5:\n rz_pts.extend([(r, z0 + z) for z in (Zarr[0::2]+Zarr[1::2])/2])\n else:\n rz_pts.extend([(r, z0 + z) for z in Zarr])\n# for i, z in enumerate(Zarr):\n# if np.mod(i, 2) == 0:\n# rz_pts.extend([(r, z0 + z) for r in Rarr])\n# else:\n# rz_pts.extend([(r, z0 + z) for r in Rarr[0:5]])\n rz_pts = np.array(rz_pts)\n super_kwargs = {\"rz_pts\":rz_pts,\"patchcls\":Polygon,\"fc\":\".35\",\"ec\":\"k\"}\n super_kwargs.update(kwargs)\n super(TREXcoil,self).__init__(**super_kwargs)\n\n @property\n def z0(self):\n return self._z0\n\n @z0.setter\n def z0(self,new_z0):\n dz = new_z0 - self._z0\n super(TREXcoil,self).translate((0,dz))\n self._z0 = new_z0\n\n def build_patchargs(self,**kwargs):\n z0 = self._z0\n left,right = 2.00467, 2.00467 + .067083\n bottom, top = z0 - .105469, z0 + .105469\n return (np.array([[left,bottom],[left,top],[right,top],[right,bottom]]),)\n\nclass TREXCoils(Component):\n def __init__(self,**kwargs):\n ###### Build TREX coils\n super(TREXCoils,self).__init__()\n z0 = float(kwargs.pop(\"z0\",1.1757))\n labels = kwargs.pop(\"labels\",[\"Ncoil\",\"Scoil\"])\n currents = np.array(kwargs.pop(\"currents\",(1,1)),dtype=\"float\")\n nprocs = kwargs.pop(\"nprocs\",[4,4])\n patch_mask = kwargs.pop(\"patch_mask\",[0,0])\n grid = kwargs.pop(\"grid\",None)\n Scoil = TREXcoil(z0=-z0,**kwargs)\n Ncoil = TREXcoil(z0=z0,**kwargs)\n self.groups = [Ncoil,Scoil]\n self.labels = labels\n self.currents = currents\n self.nprocs = nprocs\n self.patch_mask = [0,0]\n\nclass LTRXCoils(ZSymmCoilSet):\n def __init__(self,**kwargs):\n dr,dz = 0.010583333,0.01031667\n nr,nz = 10,13\n r0,z0 = 0.185725,1.6367\n super_kwargs = {\"r0\":r0,\"z0\":z0,\"dr\":dr,\"dz\":dz,\"labels\":[\"Scoil\",\"Ncoil\"],\n \"patchcls\":Polygon,\"fc\":\".35\",\"ec\":\"k\"}\n super_kwargs.update(kwargs)\n super(LTRXCoils,self).__init__(**super_kwargs)\n\nclass VesselMagnets(Component):\n def __init__(self,**kwargs):\n super(VesselMagnets,self).__init__()\n labels = kwargs.pop(\"labels\",[\"Npole\",\"bulk\",\"Spole\"])\n nprocs = kwargs.pop(\"nprocs\",[1,12,1])\n currents = kwargs.pop(\"currents\",[2710.68,2710.68,2710.68])\n patch_mask = kwargs.pop(\"patch_mask\",[0,0,0])\n height = 1 * .0254\n width = 1.5 * .0254\n # first group\n z = 1.5117\n r = .0768\n kwargs.update({\"fc\":\"b\"})\n m1 = MagnetGroup(rz_pts=[(r,z)],mu_hats=[0],height=height,width=width,**kwargs)\n # second group\n R = 1.514475\n Theta = np.linspace(7.5, 172.5, 34)\n rpts,zpts = R*np.sin(np.deg2rad(Theta)),R*np.cos(np.deg2rad(Theta))\n rz_pts = np.vstack((rpts,zpts)).T\n mu_hats = Theta + np.mod(np.arange(1, 35), 2) * 180\n m2 = MagnetGroup(rz_pts=rz_pts,mu_hats=mu_hats,height=height,width=width,**kwargs)\n for m_obj in m2.obj_list[::2]:\n m_obj.patchkwargs[\"fc\"]=\"r\"\n # third group\n z = -1.5117\n r = .0768\n kwargs.update({\"fc\":\"r\"})\n m3 = MagnetGroup(rz_pts=[(r,z)],mu_hats=[0],height=height,width=width,**kwargs)\n self.groups = [m1,m2,m3]\n self.labels = labels\n self.nprocs = nprocs\n self.patch_mask = patch_mask\n self.currents = currents\n self.update_patches()\n\n @Component.patches.getter\n def patches(self):\n plist = [group.patches for group,mask in zip(self._groups,self._patch_mask) if not mask]\n return [p for sublist in plist for p in sublist]\n\nclass Dipole(Component):\n \"\"\"Internal dipole Magnet comprised of 2 cylindrical SmCo magnets.\n\n Attributes:\n magnets (list): list of Magnet objects comprising this instance\n patches (list of matplotlib.patches.Polygon instances): patches representing the vessel magnets\n \"\"\"\n\n def __init__(self, **kwargs):\n super(Dipole,self).__init__()\n r0,z0 = kwargs.pop(\"loc\",(0,0))\n muhat = kwargs.pop(\"muhat\",0)\n labels = kwargs.pop(\"labels\",[\"dipole\"])\n nprocs = kwargs.pop(\"nprocs\",[1])\n currents = kwargs.pop(\"currents\",[2901.0])\n patch_mask = kwargs.pop(\"patch_mask\",[0])\n # Build internal dipole magnets\n width = (2.75 / 2 - .125) * .0254\n height = 2.5 / 2 * .0254\n delta = (1.25 / 2 + .125) * .0254\n r1 = r0 + .125 * .0254 + width / 2.0 # + delta*np.sin(np.pi*mu_hat/180.0)\n r2 = r1 # rho0 - delta*np.sin(np.pi*mu_hat/180.0)\n z1 = z0 + delta # *np.cos(np.pi*mu_hat/180.0)\n z2 = z0 - delta # *np.cos(np.pi*mu_hat/180.0)\n m1 = MagnetGroup(rz_pts=[(r1,z1),(r2,z2)],mu_hats=[muhat,muhat],height=height,width=width,current=currents[0],**kwargs)\n self.groups = [m1]\n self.labels = labels\n self.nprocs = nprocs\n self.patch_mask = patch_mask\n self.currents = currents\n self.update_patches()\n \n @Component.patches.getter\n def patches(self):\n plist = [group.patches for group,mask in zip(self._groups,self._patch_mask) if not mask]\n return [p for sublist in plist for p in sublist]\n\nclass BRB(Configuration):\n def __init__(self,**kwargs):\n super(BRB,self).__init__()\n self.add_component(TREXCoils(),\"trex\")\n self.add_component(LTRXCoils(),\"ltrx\")\n self.add_component(VesselMagnets(),\"vessel_mags\")\n self.grid = kwargs.pop(\"grid\",None)\n self.artists = [Wedge((0,0),1.556,0,360,width=.032,fc=\".35\",ec=\"k\",zorder=100)]\n\n def add_cathode(self):\n raise NotImplementedError(\"Can't add cathodes to BRB yet\")\n\n def add_anode(self):\n raise NotImplementedError(\"Can't add anode to BRB yet\")\n\n def add_sweep(self,center,r,theta1,theta2,width=None,**kwargs):\n self.patches.append(Wedge(center,r,theta1,theta2,width=width,**kwargs))\n\nclass LTRX(Configuration):\n def __init__(self,**kwargs):\n super(LTRX,self).__init__()\n zc = 2.811\n self.add_component(CoilPack(r0=.286,z0=1.173-zc,nr=16,nz=16,dr=0.0135,dz=0.0135,fc=\".35\",ec=\"k\"),\"coil_1\")\n for i in range(2,8):\n z_i = 1.173+.278 + (i-2)*.214 - zc\n coil_i = CoilPack(r0=.381,z0=z_i,nr=12,nz=8,dr=0.0127,dz=0.0127,fc=\".35\",ec=\"k\")\n self.add_component(coil_i,\"coil_{0}\".format(i))\n self.add_component(CoilPack(r0=.286,z0=2.811-zc,nr=16,nz=16,dr=0.0135,dz=0.0135,fc=\".35\",ec=\"k\"),\"coil_8\")\n# self.add_component(CoilPack(r0=.53,z0=3.3,nr=3,nz=6,dr=0.01,dz=0.01,fc=\".35\",ec=\"k\"),\"coil_9\")\n# self.add_component(CoilPack(r0=.53,z0=5.3,nr=3,nz=6,dr=0.01,dz=0.01,fc=\".35\",ec=\"k\"),\"coil_10\")\n\n self.grid = kwargs.pop(\"grid\",None)\n\n\n\nclass PCX_HH(object):\n \"\"\"PCX Helmholtz coil set\n\n Attributes:\n t_current (double): current through top HH coil\n b_current (double): current through bottom HH coil\n fc (str): facecolor for patch\n top_coil (CurrentArray): CurrentArray object for top coil\n bot_coil (CurrentArray): CurrentArray object for bottom coil\n current_objs (list): list of Current objects comprising this instance\n patches (list of matplotlib.patches.Polygon instances): patches representing the PCX HH coils\n \"\"\"\n\n def __init__(self, t_current, b_current, fc='0.35'):\n ## Build PCX HH coils\n N=89. #number of windings (guess...)\n self.t_current = t_current * N\n self.b_current = b_current * N\n self.fc = fc\n R = 75.8825/100.\n ztop = 38.03142/100.\n zbot = -37.85616/100.\n w = 10.795/100.\n h = 10.16/100.\n self.top_coil = Current((R, ztop), self.t_current, frame=\"rhoz\", units=\"m\")\n self.bot_coil = Current((R, zbot), self.b_current, frame=\"rhoz\", units=\"m\")\n self.current_objs = [self.top_coil, self.bot_coil]\n top_coil_patch = Polygon([(R-w/2.,ztop-h/2.),(R-w/2., ztop+h/2.), (R+w/2., ztop+h/2.), (R+w/2., ztop-h/2.)],\n closed=True, fc=self.fc, ec='k')\n bot_coil_patch = Polygon([(R-w/2.,zbot-h/2.),(R-w/2., zbot+h/2.), (R+w/2., zbot+h/2.), (R+w/2., zbot-h/2.)],\n closed=True, fc=self.fc, ec='k')\n self.patches = [top_coil_patch, bot_coil_patch]\n\n def get_current_objs(self):\n return self.current_objs\n\n def get_current_tuples(self, frame='rhoz', units='m'):\n assert frame.lower() in ['polar', 'rhoz'], \"Invalid frame choice: {0}\".format(frame)\n assert units.lower() in ['m', 'cm'], \"Invalid units choice: {0}\".format(units)\n return [c_obj.get_current_tuples(frame=frame, units=units)[0] for c_obj in self.current_objs]\n\n\nclass PCX_magCage(object):\n \"\"\"Represent an array of dipole magnets that comprise the PCX magnet cage.\n\n Attributes:\n magnets (list): list of Magnet objects comprising this instance\n patches (list of matplotlib.patches.Polygon instances): patches representing the cage magnets\n \"\"\"\n\n def __init__(self, current_mags=None):\n ### Build the magnet array ###\n height = 1.905 # cm\n width = 1.905 # cm\n # all positions relative to origin of the vessel, ref: gdrive sheet\n # [TS1,TS2,TS3,TS4,TS5,TS6,TS7,TS8,TA,S14,S13,S12,S11,S10,S9,S8,S7,\n # S6,S5,S4,S3,S2,S1,BA,BS8,BS7,BS6,BS5,BS4,BS3,BS2,BS1]\n R = np.array([4.1275, 9.8425, 15.5575, 21.2725, 26.9875, 32.7025, 38.4175, 44.1325, 46.6598, 46.25975, 46.25975,\n 46.25975,46.25975, 46.25975, 46.25975, 46.25975, 46.25975, 46.25975, 46.25975, 46.25975, 46.25975,\n 46.25975,46.25975, 46.6598, 44.1325, 38.4175, 32.7025, 26.9875, 21.2725, 15.5575, 9.8425, 4.1275])\n Z = np.array([50.2335, 50.2335, 50.2335, 50.2335, 50.2335, 50.2335, 50.2335, 50.2335,49.0512,46.2645,40.0959,\n 33.9273,27.7587,21.5901,15.4215,9.2529,3.0843,-3.0843,-9.2529,-15.4215,-21.5901,-27.7587,-33.9273,\n -36.7139,-37.8965,-37.8965,-37.8965,-37.8965,-37.8965,-37.8965,-37.8965,-37.8965])\n muHats = np.array([180.,0.,180.,0.,180.,0.,180.,0.,135.,270.,90.,270.,90.,270.,90.,270.,90.,270.,90.,270.,90.,270.,\n 90.,225.,0.,180.,0.,180.,0.,180.,0.,180.])\n if current_mags == None:\n current_mags = np.ones(10)\n n = len(current_mags) / (height / 100.0)\n ## MPDX strengths\n current_mags *= .4 / (4 * np.pi * 10 ** -7 * n)\n current_mags *= 3.3527\n self.magnets = []\n self.patches = []\n for i, (r, z, h) in enumerate(zip(R, Z, muHats)):\n if np.mod(i,2):\n fc = \"b\"\n else:\n fc = \"r\"\n m = Magnet((r, z), current_mags=current_mags, width=width, height=height, frame='rhoz', units=\"cm\",\n mu_hat=h, fc= fc)\n self.magnets.append(m)\n self.patches.append(m.patch)\n\n def set_strength(self, current_mags):\n \"\"\"Set strength of each magnet with 1D array current_mags\"\"\"\n for m in self.magnets:\n m.set_currents(current_mags)\n\n def get_current_tuples(self, frame='rhoz', units='m'):\n \"\"\"Return computationally relevant info: list of (rho, z, current) tuples for instance.\"\"\"\n assert frame.lower() in [\"polar\", \"rhoz\"], \"Invalid frame choice: {0}\".format(frame)\n assert units.lower() in [\"m\", \"cm\"], \"Invalid units choice: {0}\".format(units)\n return [c_obj.get_current_tuples(frame=frame, units=units)[0] for c_obj in self.magnets]\n\n def set_magnets(self, current_mags):\n self.magnets = []\n self.patches = []\n self.current_mags = current_mags\n for i, (r, zz, h) in enumerate(zip(self.r, self.z, self.muHats)):\n if np.mod(i, 2):\n fc = \"r\"\n else:\n fc = \"b\"\n m = Magnet((r, zz), current_mags=current_mags, width=width, height=height, frame='rhoz',\n units='cm', mu_hat=h, fc=fc)\n self.magnets.append(m)\n self.patches.append(m.patch)\n\n def set_strength(self, current_mags):\n \"\"\"Set strength of each magnet with 1D array current_mags\"\"\"\n self.current_mags = current_mags\n for m in self.magnets:\n m.set_currents(current_mags)\n\n\nclass PhilipsMRI(object):\n def __init__(self, loc, current):\n rho0, z0 = loc\n delta_rho = .01\n delta_z = .01\n nz = 1.5 // delta_z\n nrho = .05 // delta_rho\n z1 = nz / 2 * delta_z\n ## dimensions of cryostats\n inner_r = .930 / 2.0\n outer_r = 1.88 / 2.0\n length = 1.618\n verts = np.array([[inner_r,z0+length/2.0],[outer_r,z0+length/2.0],[outer_r,z0-length/2.0],[inner_r,z0-length/2.0]])\n self.cryo = Polygon(verts,closed=True,fc=\"None\",ec=\"k\",lw=2,joinstyle=\"round\")\n self.coil = CurrentArray((rho0,z0-z1),nrho,nz,delta_rho,delta_z,current,fc=\".45\",units=\"m\")\n self.patches = [self.cryo,self.coil.patch]\n\n def get_current_tuples(self,frame=\"rhoz\",units=\"m\"):\n return self.coil.get_current_tuples(frame=frame,units=units)\n\n\ndef build_pcx(vessel=True, HH=(False, 0, 0)):\n \"\"\"\"Return field objects and patches representing PCX (modified copy of\n build_wipal).\n\n Parameters\n ----------\n vessel : bool\n Boolean to include vessel magnets or not, default True\n HH : tuple\n Tuple of (bool,float,float) representing whether or not to include\n helmholtz coil set and, if so, how much current goes into the upper and\n lower coil, respectively default (False,0,0)\n \"\"\"\n patches = []\n current_objs = []\n if vessel:\n vessel_magnets = PCX_magCage()\n current_objs.extend(vessel_magnets.magnets)\n patches += vessel_magnets.patches\n if HH[0]:\n top_current = HH[1]\n bot_current = HH[2]\n hh_coils = PCX_HH(top_current, bot_current)\n current_objs.extend(hh_coils.get_current_objs())\n patches.extend(hh_coils.patches)\n return current_objs, patches\n\ndef build_gdt():\n rho0 = 40\n n_rho = 20\n n_z = 100\n delta_rho = 1\n delta_z = 1\n coil_1 = CurrentArray((rho0, -325), n_rho, n_z, delta_rho, delta_z, 1500, units=\"cm\")\n coil_2 = CurrentArray((rho0, -215), n_rho, n_z, delta_rho, delta_z, 500, units=\"cm\")\n coil_3 = CurrentArray((rho0, -105), n_rho, n_z, delta_rho, delta_z, 500, units=\"cm\")\n coil_4 = CurrentArray((rho0, 5), n_rho, n_z, delta_rho, delta_z, 500, units=\"cm\")\n coil_5 = CurrentArray((rho0, 115), n_rho, n_z, delta_rho, delta_z, 500, units=\"cm\")\n coil_6 = CurrentArray((rho0, 225), n_rho, n_z, delta_rho, delta_z, 1500, units=\"cm\")\n coil_7 = CurrentArray((rho0 / 2.0, -395), 10, 50, delta_rho, delta_z, 30000, units=\"cm\")\n coil_8 = CurrentArray((rho0 / 2.0, 345), 10, 50, delta_rho, delta_z, 30000, units=\"cm\")\n current_objs = [coil_1, coil_2, coil_3, coil_4, coil_5, coil_6, coil_7, coil_8]\n patches = [coil_1.patch, coil_2.patch, coil_3.patch, coil_4.patch, coil_5.patch, coil_6.patch, coil_7.patch,\n coil_8.patch]\n\n return current_objs, patches\n","sub_path":"pleiades/wipplsystems.py","file_name":"wipplsystems.py","file_ext":"py","file_size_in_byte":15856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"206700785","text":"import cvxpy as cp\nimport numpy as np\nfrom .util import missing2mask\nfrom .convergence import *\nimport sys\n\n\nclass GLRM:\n def __init__(\n self, A, loss_list, k, regX=None, regY=None, missing_list=None, scale=True\n ):\n self.scale = scale\n self.k = k\n self.A = A\n self.loss_list = loss_list\n self.missing_list = missing_list\n self.regX = regX\n self.regY = regY\n self.converged = Convergence()\n self.vals = []\n self.niter = 0\n if missing_list is not None:\n self.mask = missing2mask(A.shape, missing_list)\n else:\n self.mask = np.ones_like(A, dtype=np.bool)\n if self.scale:\n self.calc_scaling()\n else:\n self.mu = ones(A.shape[1])\n self.sigma = ones(A.shape[1])\n self._initialize_probs()\n\n def calc_scaling(self):\n self.mu = np.zeros(self.A.shape[1])\n self.sigma = np.zeros(self.A.shape[1])\n for columns, loss_fxn in self.loss_list:\n for col in columns:\n elems = self.A[:, col][self.mask[:, col]]\n alpha = cp.Variable()\n prob = cp.Problem(cp.Minimize(loss_fxn(elems, alpha)))\n self.sigma[col] = prob.solve() / len(\n elems\n ) # len(elems)-1 per the paper?\n self.mu[col] = alpha.value\n\n def _initialize_probs(self):\n m = self.A.shape[0]\n n = self.A.shape[1]\n\n self.Xp = cp.Parameter((m, self.k))\n self.Xv = cp.Variable((m, self.k))\n\n self.Yp = cp.Parameter((self.k, n))\n self.Yv = cp.Variable((self.k, n))\n\n # Random Intialization\n self.Xv.value = np.random.rand(m, self.k)\n self.Xp.value = np.random.rand(m, self.k)\n\n self.Yp.value = np.random.rand(self.k, n)\n self.Yv.value = np.random.rand(self.k, n)\n self._initialize_XY()\n self.objX = 0\n self.objY = 0\n Zx = self.Xv @ self.Yp\n Zy = self.Xp @ self.Yv\n for col, loss_fxn in self.loss_list:\n Acol = self.A[:, col][self.mask[:, col]]\n Zxcol = Zx[:, col][self.mask[:, col]]\n Zycol = Zy[:, col][self.mask[:, col]]\n\n # Acol\n # print(col)\n # print((Acol,Zx[:,col]+self.mu[col].shape)\n self.objX += loss_fxn(Acol, Zxcol + self.mu[col]) / self.sigma[col]\n self.objY += loss_fxn(Acol, Zycol + self.mu[col]) / self.sigma[col]\n\n if self.regX is not None:\n self.objX += self.regX(self.Xv)\n if self.regY is not None:\n self.objY += self.regY(self.Yv)\n self.probX = cp.Problem(cp.Minimize(self.objX))\n self.probY = cp.Problem(cp.Minimize(self.objY))\n\n def _initialize_XY(self):\n B = (self.A - self.mu) / self.sigma\n B[~self.mask] = 0\n\n U, s, Vh = np.linalg.svd(B, full_matrices=False)\n S = np.diag(s)\n\n X0 = (U @ S)[:, : self.k]\n Y0 = (S @ Vh)[: self.k, :]\n\n self.Xv.value = np.copy(X0)\n self.Xp.value = np.copy(X0)\n\n self.Yv.value = np.copy(Y0)\n self.Yp.value = np.copy(Y0)\n\n def fit(self, solver=cp.ECOS, verboseX=False, verboseY=False, verbose=False):\n if verbose:\n verboseX = True\n verboseY = True\n print(\"iter \\t objY\")\n while not self.converged:\n\n self.converged.objX.append(self.probX.solve(solver, verbose=verbose))\n self.Xp.value = np.copy(self.Xv.value)\n\n self.converged.objY.append(self.probY.solve(solver, verbose=verbose))\n self.Yp.value = np.copy(self.Yv.value)\n self.vals.append(self.objY.value)\n sys.stdout.write(\n f\"\\r {self.niter} \\t {np.round(self.converged.objY[-1],2)}\"\n )\n sys.stdout.flush()\n self.niter += 1\n\n return self.Xp.value, self.Yp.value\n\n def predict(self):\n return (self.Xp @ self.Yp).value + self.mu\n\n def plot_convergence(self, **kwargs):\n self.converged.plot(**kwargs)\n","sub_path":"glrm/glrm.py","file_name":"glrm.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"32657297","text":"from django.test import TestCase\n\n\nclass MyTest(TestCase):\n\n def test_loading_index(self):\n response = self.client.get('/', follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed('index.html')\n \n def test_loading_about_us(self):\n response = self.client.get('/about_us/', follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed('about.html')\n","sub_path":"home/tests_views.py","file_name":"tests_views.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"465379608","text":"from numpy import *\nfrom csv import *\nfrom pandas import *\n\n########store id and label into label[id,label]#######################################\n \nwith open('truth_train.csv') as trf:\n label = dict(reader(trf));\n\n################## pandas #############################################################\n\ntrainset = open('kddtrain_v3.csv', 'w');\ntrainset.write(\"enrollment_id,ndate,study_days,label\\n\");\n\ndf = DataFrame.from_csv('new_log_train.csv',index_col=False)\nidx_dict = df['enrollment_id'].value_counts().to_dict()\n\nfor key in idx_dict:\n selt = df[df['enrollment_id']==key]\n event_stat = selt['event'].value_counts().to_dict();\n datedict = selt.date.value_counts().to_dict(); \n datelist = datedict.keys();\n dhour = 0;\n for datekey in datelist:\n perday = df[(df.enrollment_id==key) & (df.date==datekey)];\n delta = max(to_datetime(perday.time)) - min(to_datetime(perday.time));\n dhour = dhour + round(delta/np.timedelta64(1,'D'),3)\n \n ndate = selt['date'].nunique();\n lab = label.get(str(key)); \n trainset.write(str(key)+\",\"+str(ndate)+\",\"+str(dhour)+\",\"+str(lab)+\"\\n\") \n","sub_path":"Kdd2015/Feature_extractor/extractor_v3.py","file_name":"extractor_v3.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"317740097","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2019-08-12 16:32\n@Author : 比尔丶盖子\n@Email : 914138410@qq.com\n\"\"\"\nimport tensorflow as tf\nfrom util.mnist import load_mnist\n\n\"\"\"\nacc = 96.81%\n\"\"\"\ntf.random.set_seed(1)\nmodel = tf.keras.Sequential([tf.keras.layers.Dense(784, activation='relu'),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10, activation='softmax')])\nmodel.compile(optimizer=tf.keras.optimizers.Adam(0.001),\n loss=tf.keras.losses.categorical_crossentropy,\n metrics=[tf.keras.metrics.categorical_accuracy])\n\ntrain_image, train_label, test_image, test_label = load_mnist()\n\nmodel.fit(train_image, train_label, epochs=1)\ntest_loss, test_acc = model.evaluate(test_image, test_label)\nprint(test_acc)\n","sub_path":"attempt/dense.py","file_name":"dense.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"311976954","text":"from features_extraction import *\r\nfrom build_crystal_lattice import *\r\nfrom scipy.spatial import Voronoi\r\n#import matplotlib.pyplot as plt\r\nfrom Data_save_load import *\r\n\r\ndef q_l6m_tilt_fun(q_l6_m):#input list for 1 paritcle\r\n q_l6_m = np.array(q_l6_m)\r\n s = np.sum(pow(abs(q_l6_m), 2))\r\n q_l6_m_tile = q_l6_m / math.sqrt(s)\r\n return q_l6_m_tile.tolist()\r\n#test\r\n#a=[1,2,3]\r\n#b = q_l6m_tilt_fun(a)\r\n#print(np.array(a) / math.sqrt(14))\r\n\r\n\r\ndef disorder_fun(ref_P_ql6m_tilt, NN_index, particle_index, ql6m_tilt):#list\r\n NN_ql6m = []\r\n ref_P_ql6m_tilt = np.array(ref_P_ql6m_tilt)\r\n ql6m_tilt = np.array(ql6m_tilt)\r\n\r\n for item in NN_index:\r\n NN_ql6m.append(ql6m_tilt[particle_index.index(item)])\r\n\r\n s = 0\r\n for item in NN_ql6m:\r\n s = s + np.dot(ref_P_ql6m_tilt, np.conj(item))\r\n s = s / float(len(NN_index))\r\n return np.around(s, 2)\r\n\r\ndef disorder_filter(particle, cache_NN, cache_ql6m, features): #input complete info list particle [(index, [coordiante]),...], cahce_NN [[indices],...], chace_ql6m[[],...], features [[],...]\r\n ql6m_tilt = [q_l6m_tilt_fun(item) for item in cache_ql6m]\r\n unfilterable_particle = []\r\n filtered_particle = []\r\n filtered_features = []\r\n disorders = []\r\n\r\n index, particle_position = zip(*particle)\r\n for i in range(len(cache_NN)):\r\n if set(cache_NN[i]) < set(index):\r\n s = disorder_fun(cache_ql6m[i], cache_NN[i], index, ql6m_tilt)\r\n filtered_particle.append(particle[i])\r\n filtered_features.append(features[i])\r\n disorders.append(np.around(s, 3))\r\n else:\r\n unfilterable_particle.append([particle[i], features[i]])\r\n\r\n return filtered_particle, filtered_features, disorders, unfilterable_particle # filtered_particle list [(index,[coordinate]),...], filtered_features list[[],...], disorders list [...], unfilterable_particle list [[(index, [coordinate]),[feature]],...]\r\n\r\n\r\ndef pre_processing(crystal_lattice):\r\n particle = []\r\n particle_eliminated = []\r\n features = []\r\n cache_NN_index = []\r\n cache_q_l6_m = []\r\n\r\n lattice_voronoi = Voronoi(crystal_lattice)\r\n\r\n points = lattice_voronoi.points\r\n point_region = lattice_voronoi.point_region\r\n regions = lattice_voronoi.regions\r\n vertices = lattice_voronoi.vertices\r\n ridge_points = lattice_voronoi.ridge_points\r\n ridge_vertices = lattice_voronoi.ridge_vertices\r\n\r\n for i in range(len(points)):\r\n region_index = point_region[i]\r\n cell = regions[region_index]\r\n if -1 in cell:\r\n particle_eliminated.append((i, points[i].tolist()))\r\n else:\r\n particle.append((i, points[i].tolist()))\r\n h_dis, h_angle, q_w, minkowski_eig, NN_count, cache_NN_index_temp, cache_q_l6_m_temp = features_extract(i, ridge_points, ridge_vertices, points, vertices)\r\n feature_temp = output_features(h_dis, h_angle, q_w, minkowski_eig, NN_count)\r\n features.append(feature_temp)\r\n cache_NN_index.append(cache_NN_index_temp)\r\n cache_q_l6_m.append(cache_q_l6_m_temp)\r\n\r\n filtered_particle, filtered_features, disorders, unfilterable_particle = disorder_filter(particle, cache_NN_index, cache_q_l6_m, features)\r\n return filtered_particle, filtered_features, disorders, unfilterable_particle, particle_eliminated # filtered_particle/eliminated list [(particle_index, particle coordinate)...], filter_features list [[]...], disorders list [],unfilterable_particle list[[(index, coordinate),[feature]],...]\r\n\r\n\r\n#test\r\ncrystal_lattice = crystal_lattice_bcc(5)\r\nfiltered_particle, filtered_features, disorders, unfilterable_particle, particle_eliminated = pre_processing(crystal_lattice)\r\nprint(len(filtered_particle))\r\nprint(len(unfilterable_particle))\r\nprint(len(particle_eliminated))\r\n#inner = [item[1] for item in particle]\r\n#x,y,z = zip(*inner)\r\n#xx,yy,zz = zip(*crystal_lattice)\r\n#fig = plt.figure()\r\n#ax = fig.gca(projection='3d')\r\n#ax.scatter(x,y,z,color='black', marker='o')\r\n#ax.scatter(xx,yy,zz,color='green', marker='*')\r\n#plt.show()\r\n\r\ndata_save(crystal_lattice, filtered_particle, filtered_features, disorders, unfilterable_particle, particle_eliminated)\r\n\r\n","sub_path":"pre_processing.py","file_name":"pre_processing.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"613327896","text":"import emoji\nimport datetime\nimport numpy as np\nimport six\nimport matplotlib.pyplot as plt\n\n# Teeb täpitähed nähtavaks.\ndef parseString(str):\n t = str.encode('latin1').decode('utf8')\n e = \"\"\n i = 0\n while i < (len(t)):\n if t[i] in emoji.UNICODE_EMOJI:\n e += t[i] + \" \"\n t = t[:i] + ' ' + t[i + 1:]\n i -= 1\n i += 1\n symbols = [\n \",\", \".\", \";\", \":\", \"*\", \"'\", \"\\\"\",\n \"-\", \"_\", \"<\", \">\", \"!\", \"@\", \"#\",\n \"£\", \"¤\", \"$\", \"%\", \"&\", \"/\", \"{\", \"}\",\n \"(\", \")\", \"[\", \"]\", \"=\", \"?\", \"+\", \"´\", \"´´\", \"ˇ\", \"|\"\n ]\n for sy in symbols:\n t = t.replace(sy, \" \")\n return t.strip(), e.strip()\n\n\n# Dictionary väärtuste summa.\ndef dictSum(myDict):\n sum = 0\n for i in myDict:\n sum = sum + myDict[i]\n return sum\n\n\ndef on_contenti(msg):\n try:\n c = msg[\"content\"]\n\n return True\n except:\n return False\n\n\ndef plotLisa(largest):\n if largest < 10:\n return 2\n if largest < 100:\n return 20\n if largest < 10000:\n return largest * 0.1\n if largest < 15000:\n return largest * 0.2\n if largest < 35000:\n return largest * 0.2\n if largest < 70000:\n return largest * 0.15\n if largest < 100000:\n return largest * 0.08\n return round(largest * 0.14)\n\n\ndef kuupäev(ts):\n return datetime.datetime.fromtimestamp(ts / 1000.0).date()\n\n\ndef yearmonth(ts):\n kp = kuupäev(ts)\n return (str(kp.year) + str(kp.strftime('%h')))\n\n\ndef algus(messages):\n ts = messages[0][\"timestamp_ms\"]\n return kuupäev(ts)\n\n\ndef lõpp(messages):\n ts = messages[len(messages) - 1][\"timestamp_ms\"]\n return kuupäev(ts)\n\n\ndef save_df(data, filename, row_height=0.625, font_size=14,\n header_color='#40466e', row_colors=['#f1f1f2', 'w'], edge_color='w',\n bbox=[0, 0, 1, 1], header_columns=0,\n ax=None, **kwargs):\n longest = 0\n for row in data.itertuples():\n for colname in data.columns:\n n = (data.at[row.Index, colname])\n if len(n) > longest:\n longest = len(n)\n if len(colname) > longest:\n longest = len(colname)\n\n tegur = 0.1715\n if longest < 5:\n tegur = 0.24\n if longest < 10:\n tegur = 0.2\n if longest < 31:\n tegur = 0.18\n\n col_width = longest * tegur\n if ax is None:\n size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])\n fig, ax = plt.subplots(figsize=size)\n ax.axis('off')\n\n mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns, **kwargs)\n\n mpl_table.auto_set_font_size(False)\n mpl_table.set_fontsize(font_size)\n\n for k, cell in six.iteritems(mpl_table._cells):\n cell.set_edgecolor(edge_color)\n if k[0] == 0 or k[1] < header_columns:\n cell.set_text_props(weight='bold', color='w')\n cell.set_facecolor(header_color)\n else:\n cell.set_facecolor(row_colors[k[0] % len(row_colors)])\n ax.get_figure().savefig(\"results\\\\plots\\\\\" + filename)\n plt.close()\n\n\ndef kellaaeg(timestamp):\n return datetime.datetime.fromtimestamp(timestamp / 1000.0).time()\n","sub_path":"abistajad.py","file_name":"abistajad.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"317119696","text":"# pylint: disable=W0611\n'''Regression model trained on different types of drift accuracy on original model.'''\nimport numpy as np\n\nfrom joblib import load, dump\nfrom sklearn.svm import SVR\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\n\nfrom train_model.text_preprocessing import prepare, _extract_message_len, _text_process\nfrom train_model.util import load_data, DATASET_DIR, DATA_DRIFT_DIR\nfrom deploy_model.util import load_best_clf\n\nclass RegressionModel():\n '''Class containing the Regression Model training methods.'''\n datasets: list\n drift_detector: any\n classifier: any\n preprocessor: any\n\n def __init__(self) -> None:\n self.set_datasets()\n self.drift_detector = SVR()\n self.classifier, _ = load_best_clf()\n self.preprocessor = load('output/preprocessor.joblib')\n\n def set_datasets(self):\n '''Set the datasets to train model on.'''\n self.datasets = [DATASET_DIR + 'SMSSpamCollection',\n DATA_DRIFT_DIR + 'drift_flip.txt',\n DATA_DRIFT_DIR + 'drift_random_0.5.txt',\n DATA_DRIFT_DIR + 'drift_mutation.txt',\n DATA_DRIFT_DIR + 'drift_concept.txt',\n DATA_DRIFT_DIR + 'drift_ham_only.txt',\n DATA_DRIFT_DIR + 'drift_spam_only.txt']\n\n def train_regression_model(self):\n '''Trains the regression model on all supplied datasets.'''\n percentiles_stats = []\n scores = []\n\n for index, data_set in enumerate(self.datasets):\n raw_data = load_data(data_set)\n for batch in range(25):\n print(f\"Train logistic drift detector epoch {batch}, dataset {index}\")\n\n x_sample, _ = train_test_split(raw_data, test_size=0.3, random_state=batch)\n y_sample = x_sample['label']\n x_sample = self.preprocessor.transform(x_sample['message'])\n\n classifier_stats = [x[0] for x in self.classifier.predict_proba(x_sample)]\n classifier_res = self.classifier.predict(x_sample)\n print(accuracy_score(classifier_res, y_sample))\n\n percentiles_stats += [\n [np.percentile(classifier_stats, i) for i in range(0, 101, 5)]]\n scores += [accuracy_score(classifier_res, y_sample)]\n\n self.drift_detector.fit(percentiles_stats, scores)\n dump(self.drift_detector, 'output/regression/regression_model.joblib')\n\n\nif __name__ == \"__main__\":\n RegressionModel().train_regression_model()\n","sub_path":"train_model/regression_model.py","file_name":"regression_model.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"496140979","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 29 12:10:19 2020\n\n@author: Daniel.Feeney\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n# Read in files\n# only read .asc files for this work\nfPath = 'C:/Users/kate.harrison/Dropbox (Boa)/EndurancePerformance/NewBalanceRoadRacing_Jan2020/PressureData/'\nfileExt = r\".mva\"\nentries = [fName for fName in os.listdir(fPath) if fName.endswith(fileExt)]\n\n# Define constants and options\nfThresh = 30 #below this value will be set to 0.\nstepLen = 45 #Set value to look forward \n# list of functions \n# finding landings on the force plate once the filtered force exceeds the force threshold\ndef findLandings(force):\n lic = []\n for step in range(len(force)-1):\n if force[step] == 0 and force[step + 1] >= fThresh:\n lic.append(step)\n return lic\n\n#Find takeoff from FP when force goes from above thresh to 0\ndef findTakeoffs(force):\n lto = []\n for step in range(len(force)-1):\n if force[step] >= fThresh and force[step + 1] == 0:\n lto.append(step + 1)\n return lto\n\n### File Structure: Dorsal Forefoot, Metatarsals (.1), Midfoot (.2), plantar mets (.3), plantar toes (.4), plantar heel (.5)\nsdFF = []\nmeanFF = []\nsdMF = []\nmeanMF = []\nsdMets = []\nmeanMets = []\nmaxMF = []\nmaxFF = []\nmaxMets = []\n\nsdPlantMetP = []\nmeanPlantMetP = []\nsdToeP = []\nmeanToeP = []\nsdHeelP = []\nmeanHeelP = []\nmaxPlantMetP = []\nmaxToeP = []\nmaxHeelP = []\n\nHeelPMidStance = []\nHeelRateDecay = []\ntrial = []\nSubject = []\nCondition = []\nConfig = []\n\n#first columns (FF, Mets, and MF) all relate to dorsal values. Once you get to PlantMetsForce it is plantar metatarsal force \n#and everything to the right of that column is plantar side. Each location (e.g. FF, MF, etc.) has force, max Pressure, Mean Pressure, and pct\n\nfor file in entries:\n try:\n \n fName = file #Load one file at a time\n \n subName = fName.split(sep = \"_\")[0]\n ConditionTmp = fName.split(sep=\"_\")[1]\n ConfigTmp = fName.split(sep=\"_\")[2]\n \n dat = pd.read_csv(fPath+fName,sep='\\t', skiprows = 15, header = 0)\n \n dat.columns = ['Time','FFForce', 'FFMaxP', 'FFMeanP', 'FFpct', 'MetsForce', 'MetsMaxP', 'MetsMeanP','Metspct', \n 'MFForce','MFMaxP', 'MFMeanP', 'MFpct', 'PlantMetsForce','PlantMetsMaxP', 'PlantMetsMeanP', 'PlantMetsPct',\n 'ToesForce','ToesMaxP','ToesMeanP','ToesPct','HeelForce', 'HeelMaxP', 'HeelMeanP','HeelPct']\n dat['Force'] = dat.HeelForce + dat.ToesForce + dat.PlantMetsForce\n # filtering force to find landings/takeoffs \n forceTot = dat.Force\n forceTot[forceTot go.Scatter:\n\n data = line.load(start, end)\n mode = ''\n linestyle = None\n marker = None\n if line.linestyle:\n mode = 'lines'\n dash_dict = {'-': 'solid', ':': 'dot', '.': 'dot', '--': 'dash', '-.': 'dashdot'}\n linestyle = {'color': line.color, 'dash': dash_dict[line.linestyle], 'width': line.linewidth}\n if line.marker:\n mode = 'lines+markers' if mode else 'markers'\n symboldict = {'o': 'circle', 'x': 'x-thin', ',': 'line-ns', '+': 'cross-thin', '*': 'asterisk', '.': 'circle'}\n if line.marker in symbols:\n symbol = line.marker\n else:\n symbol = symboldict.get(line.marker, 'circle')\n\n marker = {'color': line.color, 'symbol': symbol}\n\n return go.Scatter(x=data.index, y=data, mode=mode, line=linestyle, marker=marker, name=line.name)\n\n\ndef _make_figure(plot: Plot) -> go.Figure:\n rows = -(-len(plot.subplots) // plot.columns)\n fig = make_subplots(rows, plot.columns, shared_xaxes=True)\n subplot_positions = sum(([i] * len(sp.lines) for i, sp in enumerate(plot.subplots)), [])\n rows = [1 + i // plot.columns for i in subplot_positions]\n cols = [1 + i % plot.columns for i in subplot_positions]\n for i, sp in enumerate(plot.subplots):\n row, col = 1 + i // plot.columns, 1 + i % plot.columns\n if sp.ylim:\n fig.update_yaxes(range=list(sp.ylim), row=row, col=col)\n\n fig.update_yaxes()\n fig.add_traces(\n [\n _draw_line(l, plot.start, plot.end)\n for l in plot.lines()\n ],\n rows=rows,\n cols=cols\n )\n\n fig.update_yaxes()\n fig.update_layout(width=plot.size[0], height=plot.size[1], template='none')\n return fig\n\n\ndef to_image(plot: Plot, format: str) -> bytes:\n \"\"\"\n Draws the plot and returns a byte string containing the image\n \"\"\"\n fig = _make_figure(plot)\n return fig.to_image(format=format)\n\n\ndef to_html(plot: Plot)->bytes:\n \"\"\"\n Draws the plot to include into an html page, here as svg.\n Alternative could be as an element with base64 data\n \"\"\"\n fig = _make_figure(plot)\n return fig.to_html(include_plotlyjs='cdn').encode('utf-8')\n\n\nsymbols = [\n \"circle\", \"circle-open\", \"circle-dot\", \"circle-open-dot\",\n \"square\", \"square-open\", \"square-dot\", \"square-open-dot\",\n \"diamond\", \"diamond-open\", \"diamond-dot\", \"diamond-open-dot\",\n \"cross\", \"cross-open\", \"cross-dot\", \"cross-open-dot\", \"x\",\n \"x-open\", \"x-dot\", \"x-open-dot\", \"triangle-up\",\n \"triangle-up-open\", \"triangle-up-dot\", \"triangle-up-open-dot\",\n \"triangle-down\", \"triangle-down-open\", \"triangle-down-dot\",\n \"triangle-down-open-dot\", \"triangle-left\", \"triangle-left-open\",\n \"triangle-left-dot\", \"triangle-left-open-dot\", \"triangle-right\",\n \"triangle-right-open\", \"triangle-right-dot\", \"triangle-right-open-dot\",\n \"triangle-ne\", \"triangle-ne-open\", \"triangle-ne-dot\",\n \"triangle-ne-open-dot\", \"triangle-se\", \"triangle-se-open\",\n \"triangle-se-dot\", \"triangle-se-open-dot\", \"triangle-sw\", \"triangle-sw-open\", \"triangle-sw-dot\", \"triangle-sw-open-dot\" ,\n \"triangle-nw\", \"triangle-nw-open\", \"triangle-nw-dot\" ,\n \"triangle-nw-open-dot\", \"pentagon\", \"pentagon-open\", \"pentagon-dot\",\n \"pentagon-open-dot\", \"hexagon\", \"hexagon-open\", \"hexagon-dot\", \"hexagon-open-dot\", \"hexagon2\", \"hexagon2-open\", \"hexagon2-dot\", \"hexagon2-open-dot\", \"octagon\", \"octagon-open\",\n \"octagon-dot\", \"octagon-open-dot\", \"star\", \"star-open\",\n \"star-dot\", \"star-open-dot\", \"hexagram\", \"hexagram-open\",\n \"hexagram-dot\", \"hexagram-open-dot\", \"star-triangle-up\", \"star-triangle-up-open\", \"star-triangle-up-dot\", \"star-triangle-up-open-dot\" ,\n \"star-triangle-down\", \"star-triangle-down-open\", \"star-triangle-down-dot\",\n \"star-triangle-down-open-dot\", \"star-square\", \"star-square-open\", \"star-square-dot\", \"star-square-open-dot\", \"star-diamond\" ,\n \"star-diamond-open\", \"star-diamond-dot\", \"star-diamond-open-dot\" ,\n \"diamond-tall\", \"diamond-tall-open\", \"diamond-tall-dot\" ,\n \"diamond-tall-open-dot\", \"diamond-wide\", \"diamond-wide-open\" ,\n \"diamond-wide-dot\", \"diamond-wide-open-dot\", \"hourglass\" ,\n \"hourglass-open\", \"bowtie\", \"bowtie-open\", \"circle-cross\" ,\n \"circle-cross-open\", \"circle-x\", \"circle-x-open\", \"square-cross\" ,\n \"square-cross-open\", \"square-x\", \"square-x-open\", \"diamond-cross\",\n \"diamond-cross-open\", \"diamond-x\", \"diamond-x-open\",\n \"cross-thin\", \"cross-thin-open\", \"x-thin\", \"x-thin-open\",\n \"asterisk\", \"asterisk-open\", \"hash\", \"hash-open\",\n \"hash-dot\", \"hash-open-dot\", \"y-up\", \"y-up-open\", \"y-down\",\n \"y-down-open\", \"y-left\", \"y-left-open\", \"y-right\", \"y-right-open\", \"line-ew\", \"line-ew-open\", \"line-ns\", \"line-ns-open\", \"line-ne\", \"line-ne-open\", \"line-nw\" ,\n \"line-nw-open\", \"arrow-up\", \"arrow-up-open\", \"arrow-down\" ,\n \"arrow-down-open\", \"arrow-left\", \"arrow-left-open\", \"arrow-right\" ,\n \"arrow-right-open\", \"arrow-bar-up\", \"arrow-bar-up-open\" ,\n \"arrow-bar-down\", \"arrow-bar-down-open\", \"arrow-bar-left\" ,\n \"arrow-bar-left-open\", \"arrow-bar-right\", \"arrow-bar-right-open\"\n]\n","sub_path":"odmf/plot/draw_plotly.py","file_name":"draw_plotly.py","file_ext":"py","file_size_in_byte":5426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"188795432","text":"from django.conf.urls import patterns, include, url\nfrom comics.feeds import LatestEntriesFeed\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'threepanel.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^$', 'comics.views.home', name='home'),\n url(r'^manage', 'comics.views.manage', name='manage'),\n url(r'^dashboard/', include('dashboard.urls')),\n url(r'^comics/', include('comics.urls')),\n url(r'^subscribe/', include('publish.urls')),\n url(r'^pages/', include('pages.urls')),\n url(r'^subscribe$', 'publish.views.subscribe'),\n url(r'rss.xml', LatestEntriesFeed())\n)\n","sub_path":"threepanel/threepanel/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"129479998","text":"import re\nimport requests\nfrom urllib import request\nresp = requests.get(url='http://mp.weixin.qq.com/s/Z6OeRHUfiUyKIV-KG7Eb8w')\nregx = \"https://mmbiz.qpic.cn/mmbiz_jpg/.*?wx_fmt=jpeg\"\npic = re.findall(regx,resp.text)\nstorePATH = \"/Users/fangdongliang/Desktop/pic/\"\ncount = 0\nfor item in pic:\n\tcount += 1\n\tfilename = storePATH+str(count)+\".jpeg\" \n\twith request.urlopen(item) as stream:\n\t\tpic_stream = stream.read()\n\twith open(filename,\"wb\") as outfile:\n\t\toutfile.write(pic_stream)\n\t\t","sub_path":"Others/wx_pic_spider.py","file_name":"wx_pic_spider.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"399167891","text":"from flask import Flask, render_template\nimport pandas as pd\nimport time\n\napp = Flask(__name__)\ndata = pd.read_csv(\"data/data.csv\")\n\n\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef index(chart1ID='chart1_ID', chart1_type='bar',\n chart2ID='chart2_ID', chart2_type='bar',\n chart3ID='chart3_ID', chart3_type='column',\n chart4ID='chart4_ID',\n chart5ID='chart5_ID', chart5_type='bar',\n chart6ID='chart6_ID', chart6_type='column'):\n # Chart1\n maleCount = data[data['Gender'] == 'M'][\"Gender\"].count()\n femaleCount = data[data['Gender'] == 'F'][\"Gender\"].count()\n total = maleCount + femaleCount\n malePercentage = ((maleCount / total) * 100).round(2)\n femalePercentage = ((femaleCount / total) * 100).round(2)\n\n chart1 = {\"renderTo\": chart1ID, \"type\": chart1_type}\n series1 = [{\"name\": 'Male', \"data\": [malePercentage], \"color\": \"#00FFFF\"},\n {\"name\": 'Female', \"data\": [femalePercentage], \"color\": \"#DC143C\"}]\n title1 = {\"text\": 'Male vs Female Ratio'}\n xAxis1 = {\"categories\": ['Gender']}\n yAxis1 = {\"title\": {\"text\": 'Percentage (%)'}}\n plotOptions1 = {\"bar\": {\"stacking\": \"normal\", 'dataLabels': {'enabled': 'true'}}}\n tooltip1 = {\"borderRadius\": \"20\"}\n credits = {\"text\": 'Made by VARUN NAGRARE', \"href\": 'https://www.facebook.com/Wolvarun9295',\n \"style\": {\"fontSize\": '10px', 'color': \"#FF0000\"}}\n\n ###################################################################################################################\n # Chart2\n python = []\n automation = []\n nodejs = []\n java = []\n ios = []\n devops = []\n de = []\n ml = []\n\n python.append(data[data['Technology'] == 'Python'][\"Technology\"].count())\n automation.append(data[data['Technology'] == 'Automation'][\"Technology\"].count())\n nodejs.append(data[data['Technology'] == 'NodeJs'][\"Technology\"].count())\n java.append(data[data['Technology'] == 'Java'][\"Technology\"].count())\n ios.append(data[data['Technology'] == 'IOS'][\"Technology\"].count())\n devops.append(data[data['Technology'] == 'DevOps'][\"Technology\"].count())\n de.append(data[data['Technology'] == 'Data Engineering'][\"Technology\"].count())\n ml.append(data[data['Technology'] == 'Machine Learning'][\"Technology\"].count())\n\n chart2 = {\"renderTo\": chart2ID, \"type\": chart2_type}\n series2 = [\n {\"name\": 'Automation', \"data\": automation, \"color\": \"#FF8000\"},\n {\"name\": 'Data Engineering', \"data\": de, \"color\": \"#CD00FF\"},\n {\"name\": 'DevOps', \"data\": devops, \"color\": \"#0027FF\"},\n {\"name\": 'IOS', \"data\": ios, \"color\": \"#00FFFF\"},\n {\"name\": 'Java', \"data\": java, \"color\": \"#74FF00\"},\n {\"name\": 'Machine Learning', \"data\": ml, \"color\": \"#FF00A2\"},\n {\"name\": 'NodeJS', \"data\": nodejs, \"color\": \"#FFF300\"},\n {\"name\": 'Python', \"data\": python, \"color\": \"#FF0000\"},\n ]\n title2 = {\"text\": 'Techwise Bar Chart'}\n xAxis2 = {\"categories\": ['Technology']}\n yAxis2 = {\"title\": {\"text\": 'No. of People'}}\n plotOptions2 = {\"bar\": {'dataLabels': {'enabled': 'true'}}}\n\n ###################################################################################################################\n # Chart3\n labX = data[data['Lab'] == 'Bangalore']['Lab'].count()\n labY = data[data['Lab'] == 'Mumbai']['Lab'].count()\n chart3 = {\"renderTo\": chart3ID, \"type\": chart3_type, \"polar\": \"true\", \"inverted\": \"true\"}\n series3 = [{\"name\": 'Bangalore', \"data\": [labX], \"color\": \"#B500FE\"},\n {\"name\": 'Mumbai', \"data\": [labY], \"color\": \"#00FE7B\"}]\n title3 = {\"text\": 'Distribution of People in Lab X and Y'}\n xAxis3 = {\"categories\": ['LAB']}\n yAxis3 = {\"crosshair\": {\"enabled\": \"true\", \"color\": \"#333\"}}\n plotOptions3 = {\"column\": {'dataLabels': {'enabled': 'true'}}}\n pane3 = {\"size\": \"85%\", \"innerSize\": \"20%\", \"endAngle\": \"300\"}\n\n ###################################################################################################################\n # Chart4\n technology = []\n people = []\n for i in data.Technology.unique():\n technology.append(i)\n people.append(data[data['Technology'] == i][\"Technology\"].count())\n chart4 = {\"renderTo\": chart4ID}\n series4 = [{\"type\": \"areaspline\", \"name\": 'Technologies (Areaspline Chart)', \"data\": people, \"color\": \"#FF1493\"},\n {\"type\": \"column\", \"name\": 'Technologies (Column Chart)', \"data\": people, \"color\": \"#006CFE\"}]\n title4 = {\"text\": 'Techwise Distribution Chart'}\n xAxis4 = {\"categories\": technology}\n yAxis4 = {\"title\": {\"text\": 'No. of People'}}\n plotOptions4 = {\"areaspline\": {'dataLabels': {'enabled': 'true'}}, \"column\": {'dataLabels': {'enabled': 'true'}}}\n\n ###################################################################################################################\n # Chart5\n company = []\n for i in data.Company.unique():\n company.append(i)\n\n company.sort()\n\n dataFrame = data\n dataFrame.rename(columns={'Unnamed: 0': 'id'}, inplace=True)\n dataFrame = dataFrame[['Technology', 'Company', 'id']]\n techCount = dataFrame.groupby([\"Technology\", \"Company\"])[\"id\"].count().unstack(fill_value=0).stack().reset_index(\n name=\"count\")\n techList = techCount[\"Technology\"].unique().tolist()\n\n finalList = []\n for tech in techList:\n tech = techCount[techCount[\"Technology\"].str.contains(tech)]\n finalList.append(tech)\n\n automation = finalList[0]['count'].tolist()\n de = finalList[1]['count'].tolist()\n devops = finalList[2]['count'].tolist()\n ios = finalList[3]['count'].tolist()\n java = finalList[4]['count'].tolist()\n ml = finalList[5]['count'].tolist()\n nodejs = finalList[6]['count'].tolist()\n python = finalList[7]['count'].tolist()\n\n chart5 = {\"renderTo\": chart5ID, \"type\": chart5_type}\n series5 = [\n {\"name\": 'Automation', \"data\": automation, \"color\": \"#CC00FE\"},\n {\"name\": 'Data Engineering', \"data\": de, \"color\": \"#006CFE\"},\n {\"name\": 'DevOps', \"data\": devops, \"color\": \"#FE0068\"},\n {\"name\": 'IOS', \"data\": ios, \"color\": \"#FE00BD\"},\n {\"name\": 'Java', \"data\": java, \"color\": \"#00FE45\"},\n {\"name\": 'Machine Learning', \"data\": ml, \"color\": \"#FEEB00\"},\n {\"name\": 'NodeJS', \"data\": nodejs, \"color\": \"#FE8300\"},\n {\"name\": 'Python', \"data\": python, \"color\": \"#FE0000\"}\n ]\n title5 = {\"text\": 'Company vs Technologies Stacked Bar Chart'}\n xAxis5 = {\"categories\": company, \"title\": {\"text\": \"Companies\"}}\n yAxis5 = {\"title\": {\"text\": \"No. of People\"}}\n plotOptions5 = {\"bar\": {\"stacking\": \"normal\"}}\n tooltip2 = {\"borderRadius\": \"20\", \"shared\": \"true\"}\n\n ###################################################################################################################\n # Chart6\n lab = []\n for i in data.Lab.unique():\n lab.append(i)\n\n dataFrame = data\n dataFrame.rename(columns={'Unnamed: 0': 'id'}, inplace=True)\n dataFrame = dataFrame[['Gender', 'Lab', 'id']]\n sexCount = dataFrame.groupby([\"Gender\", \"Lab\"])[\"id\"].count().unstack(fill_value=0).stack().reset_index(\n name=\"count\")\n genderList = sexCount[\"Gender\"].unique().tolist()\n\n finalList = []\n for sex in genderList:\n sex = sexCount[sexCount[\"Gender\"].str.contains(sex)]\n finalList.append(sex)\n\n female = finalList[0]['count'].tolist()\n male = finalList[1]['count'].tolist()\n\n chart6 = {\"renderTo\": chart6ID, \"type\": chart6_type, \"polar\": \"true\", \"inverted\": \"true\"}\n series6 = [{\"name\": 'Male', \"data\": male, \"color\": \"#00FFFF\"},\n {\"name\": 'Female', \"data\": female, \"color\": \"#DC143C\"}]\n title6 = {\"text\": \"Distribution of Males and Females in Lab X and Y\"}\n xAxis6 = {\"categories\": lab}\n yAxis6 = {\"crosshair\": {\"enabled\": \"true\", \"color\": \"#333\"}}\n plotOptions6 = {\"column\": {'dataLabels': {'enabled': 'true'}}}\n pane6 = {\"size\": \"85%\", \"innerSize\": \"20%\", \"endAngle\": \"300\"}\n\n return render_template('index.html',\n chart1ID=chart1ID, chart1=chart1, series1=series1, title1=title1,\n xAxis1=xAxis1, yAxis1=yAxis1, plotOptions1=plotOptions1, tooltip1=tooltip1, credits=credits,\n chart2ID=chart2ID, chart2=chart2, series2=series2, title2=title2, xAxis2=xAxis2,\n yAxis2=yAxis2, plotOptions2=plotOptions2, tooltip2=tooltip2,\n chart3ID=chart3ID, chart3=chart3, series3=series3, title3=title3, xAxis3=xAxis3,\n yAxis3=yAxis3, plotOptions3=plotOptions3, pane3=pane3,\n chart4ID=chart4ID, chart4=chart4, series4=series4, title4=title4, xAxis4=xAxis4,\n yAxis4=yAxis4, plotOptions4=plotOptions4,\n chart5ID=chart5ID, chart5=chart5, series5=series5, title5=title5, xAxis5=xAxis5,\n yAxis5=yAxis5, plotOptions5=plotOptions5,\n chart6ID=chart6ID, chart6=chart6, series6=series6, title6=title6, xAxis6=xAxis6,\n yAxis6=yAxis6, plotOptions6=plotOptions6, pane6=pane6,\n reload=time.time())\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"378184703","text":"#Importing libraries\r\nfrom detecto import core, utils\r\nfrom detecto.visualize import show_labeled_image, plot_prediction_grid\r\nfrom torchvision import transforms\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n#Image augmentation\r\ncustom_transforms = transforms.Compose([\r\ntransforms.ToPILImage(),\r\ntransforms.Resize(900),\r\ntransforms.RandomHorizontalFlip(0.5),\r\ntransforms.ColorJitter(saturation=0.2),\r\ntransforms.ToTensor(),\r\nutils.normalize_transform(),\r\n])\r\n\r\n#Model training\r\nTrain_dataset = core.Dataset('images/', transform=custom_transforms)#L1\r\nTest_dataset = core.Dataset('images/')#L2\r\nloader = core.DataLoader(Train_dataset, batch_size=2, shuffle=True)#L3\r\nmodel = core.Model(['dog'])#L4\r\nlosses = model.fit(loader, Test_dataset, epochs=25, lr_step_size=5, learning_rate=0.001, verbose=True)#L5\r\n\r\n#Model saving\r\nmodel.save('custom_model_weights.pth')\r\n\r\n\r\n#Inputting test image\r\nimage = utils.read_image('images/dog/n02085620_574.jpg')\r\n\r\n#Initializes the trained model\r\nmodel = core.Model.load('custom_model_weights.pth', ['dog'])\r\n\r\n#Evaluating model\r\nlabels, boxes, scores = model.predict_top(image)\r\n\r\n#Outputting predictions\r\nshow_labeled_image(image, boxes, labels)\r\nprint(labels, boxes, scores)\r\nplt.plot(losses)\r\nplt.show()\r\n","sub_path":"AI & ML/pytorch detecto training and prediction tutorial.py","file_name":"pytorch detecto training and prediction tutorial.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"260598914","text":"import unittest\nfrom unittest import mock\n\nfrom Week6B.book import Book\n\n\nclass BookTestSuite(unittest.TestCase):\n def test_init_correctParameters_setAllNecessaryAttributes(self):\n b = Book(\"132350882\", \"Clean Code\", \"Robert C. Martin\")\n self.assertEqual(b.isbn, \"132350882\", \"-0.25: ISBN is not set!\")\n self.assertEqual(b.title, \"Clean Code\", \"-0.25: title is not set!\")\n self.assertEqual(b.author, \"Robert C. Martin\", \"-0.25: author is not set!\")\n self.assertIsNone(b.price, \"-0.25: price is not set!\")\n\n def test_set_price_10_setThePriceTo10(self):\n b = Book(\"132350882\", \"Clean Code\", \"Robert C. Martin\")\n expected_price = 10\n b.set_price(expected_price)\n self.assertEqual(expected_price, b.price)\n\n def test_open_noArgs_printCorrectText(self):\n title = \"Clean Code\"\n author = \"Robert C. Martin\"\n b = Book(\"132350882\", title, author)\n print_mock = mock.MagicMock()\n\n with mock.patch(\"builtins.print\", print_mock):\n b.open()\n\n print_mock.assert_called_with(\"The {} written by {} is opened.\".format(title, author))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Week6B/book_tests.py","file_name":"book_tests.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"68650605","text":"import logging\nimport math\nfrom pathlib import Path\nfrom typing import List, Tuple\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport numpy as np\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import Dense\nimport tensorflow.keras.initializers as initializers\n\n\nlogger = logging.getLogger()\n\n\nclass SimpleModel(Model):\n \"\"\"Feed Forward Neural Network that represents a stochastic policy\n for continuous action spaces. Mu and sigma are calculated using\n the same internal layers.\n \"\"\"\n\n def __init__(self, model_path: Path, layer_sizes: List[int], learning_rate: float,\n actions_size: int, hidden_activation: str = \"relu\", mu_activation: str = \"tanh\",\n sigma_activation: str = \"softplus\",\n start_mu: float = 0.0, start_sigma: float = 1.0):\n \"\"\"Creates a new FFNN model to represent a policy. Implements all needed\n methods from tf.keras.Model.\n\n Args:\n model_path: Where to save the model and other training info\n layer_sizes: A list with the number of neurons on each hidden layer\n learning_rate: The training step size\n actions_size: The number of possible actions\n hidden_activation: Activation function for hidden layer neurons\n mu_activation: Activation function for mu\n sigma_activation: Activation function for sigma\n start_mu: The starting Mu value\n start_sigma: The starting Sigma value\n \"\"\"\n\n super(SimpleModel, self).__init__()\n self.model_path = model_path\n self.layer_sizes = layer_sizes\n self.output_size = actions_size\n self.learning_rate = learning_rate\n self.hidden_activation = hidden_activation\n self.mu_activation = mu_activation\n self.sigma_activation = sigma_activation\n self.start_mu = start_mu\n self.start_sigma = start_sigma\n\n self.hidden_layers = []\n for i in self.layer_sizes:\n self.hidden_layers.append(Dense(i, activation=self.hidden_activation,\n name=f\"hidden_{len(self.hidden_layers)}\"))\n\n self.mu = Dense(self.output_size, activation=self.mu_activation, name=\"dense_mu\",\n kernel_initializer=initializers.Constant(self.start_mu),\n bias_initializer=initializers.Zeros())\n self.sigma = Dense(self.output_size, activation=self.sigma_activation, name=\"dense_sigma\",\n kernel_initializer=initializers.Constant(self.start_sigma),\n bias_initializer=initializers.Zeros())\n\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n\n self.train_log_dir = Path(model_path, \"train_log\")\n self.summary_writer = tf.summary.create_file_writer(str(self.train_log_dir))\n\n def get_config(self):\n \"\"\"Used by tf.keras to load a saved model.\"\"\"\n return {\"layer_sizes\": self.layer_sizes,\n \"learning_rate\": self.learning_rate,\n \"output_size\": self.output_size,\n \"hidden_activation\": self.hidden_activation,\n \"mu_activation\": self.mu_activation,\n \"sigma_activation\": self.sigma_activation}\n\n @tf.function\n def call(self, inputs: tf.Tensor):\n \"\"\"See base Class.\"\"\"\n\n logger.info(\"[Retrace] call\")\n x = inputs\n for layer in self.hidden_layers:\n x = layer(x)\n mu = self.mu(x)\n sigma = self.sigma(x)\n\n return mu, sigma\n\n @tf.function\n def train_step(self, states: tf.Tensor, actions: tf.Tensor,\n weights: tf.Tensor) -> (Tuple[tf.Tensor], tf.Tensor, tf.Tensor):\n \"\"\"See base Class.\"\"\"\n\n logger.info(\"[Retrace] train_step\")\n with tf.GradientTape() as tape:\n mu, sigma = self(states)\n log_probabilities = self._get_log_probabilities(mu, sigma, actions)\n loss = -tf.reduce_mean(weights * log_probabilities)\n\n gradients = tape.gradient(loss, self.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\n\n return (mu, sigma), loss, log_probabilities, gradients\n\n @tf.function\n def _get_log_probabilities(self, mu: tf.Tensor, sigma: tf.Tensor, actions: tf.Tensor) -> tf.Tensor:\n \"\"\"Gets the logarithmic probabilities of each action for each set of logits.\n\n Args:\n mu: The mean value for each action for each step\n sigma: The variance value for each action for each step\n actions: The actual actions used in each step\n\n Returns:\n The logarithmic probabilities for the actions\n \"\"\"\n\n logger.info(\"[Retrace] get_log_probabilities\")\n\n x1 = actions - mu\n x2 = x1 ** 2\n sigma2 = sigma ** 2\n x3 = x2 / sigma2\n logsigma = tf.math.log(sigma)\n x4 = x3 + (2 * logsigma)\n actions_sum = tf.reduce_sum(x4, axis=-1)\n x5 = actions_sum + self.output_size * tf.math.log(2 * math.pi)\n x6 = - x5 * 0.5\n log_probabilities = x6\n return log_probabilities\n\n @tf.function\n def produce_actions(self, states: tf.Tensor) -> tf.Tensor:\n \"\"\"Get a sample from the action probability distribution produced\n by the model, for each passed state.\n\n Args:\n states: The list of states representations\n\n Returns:\n The sampled action for each state\n \"\"\"\n\n logger.info(\"[Retrace] produce_actions\")\n mu, sigma = self(states)\n actions = tfp.distributions.Normal(mu, sigma).sample([1])\n return actions\n\n\ndef test():\n tf.config.run_functions_eagerly(True)\n tf.random.set_seed(0)\n model = SimpleModel(model_path=Path(\"experiments/tests\"),\n layer_sizes=[],\n learning_rate=0.1,\n actions_size=1,\n hidden_activation=\"tanh\",\n mu_activation=\"tanh\",\n sigma_activation=\"softplus\")\n\n state = np.array([[1.], [1.], [1.]])\n reward = np.array([0.5, 1., 0.2])\n\n actions = model.produce_actions(state)\n print(f\"actions train= {actions}\")\n\n (mu, sigma), loss, log_probabilities, gradients = model.train_step(state, actions, reward)\n print(f\"Mu = {mu}\")\n print(f\"Sigma = {sigma}\")\n print(f\"loss = {loss}\")\n print(f\"log_probabilities train= {log_probabilities}\")\n print(f\"gradients train= {gradients}\")\n pass\n\n\nif __name__ == '__main__':\n\n test()\n","sub_path":"models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"301414878","text":"import numpy as np \nfrom os import listdir\nfrom os.path import isfile, join\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport torch\nimport seaborn as sns\n\nmypath = '../weights_sin2Reg/cifar10/'\nonlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\nlayers = []\nfor each in sorted(onlyfiles):\n layers.append(np.load(mypath+each).ravel())\n\n#layers.append(np.load(mypath+'/svhn_q/svhn_features0_quantized_wrpn.npy').ravel())\n#layers.append(np.load(mypath+'/svhn_q/svhn_classifier0_quantized_wrpn.npy').ravel())\n#layers.append(mypath+'/cifar10_conv2_quantized_wrpn.npy')\nprint(onlyfiles)\n\n# plot\nf, axes = plt.subplots(1, 6, figsize=(25, 5), sharex=False)\ncolor = \"b\"\n#sns.set()\n#sns.set(style=\"white\", palette=\"bright\", color_codes=True)\nsns.set(palette=\"bright\", color_codes=True)\n\n#plt.ylabel('counts')\n#sns.distplot( layers[0] , ax=axes[0], color=color, bins=100, kde=False, axlabel='epoch#')\nleft = -0.35\nright = 0.35\nplt.subplot(1, 6, 1)\n_ = plt.hist(layers[0], 50)\n#plt.xlim((left,right))\nplt.subplot(1, 6, 2)\n_ = plt.hist(layers[1], 50)\n#plt.xlim((left,right))\nplt.subplot(1, 6, 3)\n_ = plt.hist(layers[2], 50)\n#plt.xlim((left,right))\nplt.subplot(1, 6, 4)\n_ = plt.hist(layers[3], 50)\n#plt.xlim((left,right))\nplt.subplot(1, 6, 5)\n_ = plt.hist(layers[4], 50)\n#plt.xlim((left,right))\n#plt.xlim((left,right))\n#plt.xlim((-0.5, 0.5))\n#plt.savefig('examples/classifier_compression/figs/fig_sin2_bits-44444_cf_'+str(cf)+'_lr_'+str(lr)+'_TMP.png')\nplt.savefig('cifar10_sinreq-learn.png')\n","sub_path":"examples/classifier_compression/plotting/hist_plot_weights_3.py","file_name":"hist_plot_weights_3.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"544981789","text":"import sys\n\nfrom PyQt6.QtCore import QTimer, QDateTime\nfrom PyQt6.QtWidgets import QWidget, QListWidget, QLabel, QPushButton, QGridLayout, QApplication\n\n\nclass WinForm(QWidget):\n def __init__(self, parent=None):\n super(WinForm, self).__init__(parent)\n self.setWindowTitle(\"QTimer demo\")\n self.listFile = QListWidget()\n self.label = QLabel(\"显示当前时间\")\n self.startButton = QPushButton(\"开始\")\n self.endButton = QPushButton(\"结束\")\n layout = QGridLayout(self)\n\n # 初始化定时器\n self.timer = QTimer(self)\n # 显示时间\n self.timer.timeout.connect(self.showTime) # timeout 信号连接到特定的槽,当定时器超时,发出 timeout 信号\n\n layout.addWidget(self.label, 0, 0, 1, 2)\n layout.addWidget(self.startButton, 1, 0)\n layout.addWidget(self.endButton, 1, 1)\n\n self.startButton.clicked.connect(self.start_timer)\n self.endButton.clicked.connect(self.end_timer)\n\n self.setLayout(layout)\n\n def showTime(self):\n # 获取当前系统时间\n time = QDateTime.currentDateTime()\n # 设置时间格式\n timeDisplay = time.toString(\"yyyy-MM-dd hh:mm:ss dddd\")\n self.label.setText(timeDisplay)\n\n def start_timer(self):\n # 设置时间间隔并启动定时器\n self.timer.start(1000) # start 内设置时间间隔,启动或重新启动计时器,如果计时器在运行,则重启\n self.startButton.setEnabled(False)\n self.endButton.setEnabled(True)\n\n def end_timer(self):\n self.timer.stop() # 停止计时器\n self.startButton.setEnabled(True)\n self.endButton.setEnabled(False)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n form = WinForm()\n form.show()\n sys.exit(app.exec())\n","sub_path":"src/pyside_demo/qtimer_demo.py","file_name":"qtimer_demo.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"598949673","text":"#!/usr/bin/python3.6.9\nfrom pymongo import MongoClient\nimport sys\n\nglobal dOut\n\n\ndef connectDB(myDB):\n try:\n connection = MongoClient(myDB)\n return connection\n except:\n sendResult(\"Connect Error\")\n\n\ndef closeConnect(connection):\n try:\n connection.close()\n except:\n sendResult(\"Close Error\")\n\n\ndef cashflow(myDB):\n try:\n bConnect = connectDB(myDB)\n\n baccs = bConnect.cisbank.baccs\n moves = bConnect.cisbank.moves\n\n bQuery = {'bAlias': bId}\n mQuery = {'mCode': mId}\n bacc = baccs.find_one(bQuery)\n move = moves.find_one(mQuery)\n if move['mSign']:\n newBalance = bacc['bBalance'] + move['mAmmount']\n else:\n newBalance = bacc['bBalance'] - move['mAmmount']\n\n oldB = bacc['bBalance']\n\n newMoves = []\n\n newMoves.extend(bacc['bMoves'])\n newMoves.append(mId)\n\n mOld = {\"$set\": {\"mOld\": bacc['bBalance']}}\n mNew = {\"$set\": {\"mNew\": newBalance}}\n\n bBalance = {\"$set\": {\"bBalance\": newBalance}}\n bMoves = {\"$set\": {\"bMoves\": newMoves}}\n\n baccs.update_one(bQuery, bBalance)\n baccs.update_one(bQuery, bMoves)\n\n moves.update_one(mQuery, mOld)\n moves.update_one(mQuery, mNew)\n\n closeConnect(bConnect)\n status = True\n return status\n\n except Exception as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n sendResult(message)\n status = False\n return status\n\n\ndef totalizeMonths(tId, mId, myDB):\n try:\n tConnect = connectDB(myDB)\n\n moves = tConnect.cisbank.moves\n mtaccs = tConnect.cisbank.mtaccs\n\n mQuery = {'mCode': mId}\n move = moves.find_one(mQuery)\n\n mtQuery = {'tName': tId}\n mtacc = mtaccs.find_one(mtQuery)\n\n newBalance = mtacc['tBalance'] + move['mAmmount']\n mtBalance = {\"$set\": {\"tBalance\": newBalance}}\n\n mtaccs.update_one(mtQuery, mtBalance)\n closeConnect(tConnect)\n status = True\n return status\n\n except Exception as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n sendResult(message)\n status = False\n return status\n\n\ndef sendResult(dOut):\n print(dOut)\n sys.stdout.flush()\n\n\ndef main():\n #myDB = \"mongodb://localhost:27017/cisbank\"\n myDB = \"mongodb://angeloacr:cisbankDataBase47@ds051595.mlab.com:51595/cisbank\"\n\n bId = sys.argv[1]\n tId = sys.argv[2]\n mId = sys.argv[3]\n #mDate = sys.argv[4]\n statusB = updateB(bId, mId, myDB)\n statusT = updateT(tId, mId, myDB)\n #statusM = totalizeMove(mDate, mId, myDB)\n statusA = totalizeMonths(tId, mId, myDB)\n if statusB and statusT and statusM:\n sendResult(\"Success\")\n else:\n sendResult(\"Error\")\n\n\n# if __name__ == \"__main__\":\n # sendResult(\"Init\")\nmain()\n","sub_path":"cisbankServer/python/balancestatus.py","file_name":"balancestatus.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"42091632","text":"import argparse\nimport os\nimport time\n\nimport googleapiclient.discovery\nfrom oauth2client.client import GoogleCredentials\n\n# [START list_disks]\ndef list_disks (compute, PROJECT, d_zone):\n result = compute.disks().list(PROJECT=PROJECT, d_zone=d_zone).execute()\n return result['items'] if 'items' in result else None\n# [END list_disks]\n\n# [START disk_attach]\ndef disk_attach(compute, PROJECT, d_zone, vm_name):\n config = {\n 'source': \"/compute/v1/projects/\"+str(PROJECT)+\"/zones/\"+str(d_zone)+\"/disks\"+str(vm_name)\n }\n return compute.instances().attachDisk(\n project=PROJECT,\n zone=d_zone,\n instance=vm_name,\n body=config\n )\n# [END disk_attach]\n\n# [START disks_creation]\ndef disks_creation(compute, PROJECT, d_zone, d_name, d_type, d_size):\n config = {\n 'name' : d_name,\n 'type' : d_type,\n 'sizeGB' : d_size,\n }\n return compute.disks().insert(PROJECT=PROJECT, d_zone=d_zone, body=config).execute()\n# [END disks_creation]\n\n# [START wait_for_operation]\ndef wait_for_operation(compute, PROJECT, d_zone, operation):\n print('Waiting for operation to finish...')\n while True:\n result = compute.d_zoneOperations().get(\n PROJECT=PROJECT,\n d_zone=d_zone,\n operation=operation).execute()\n\n if result['status'] == 'DONE':\n print(\"done.\")\n if 'error' in result:\n raise Exception(result['error'])\n return result\n\n time.sleep(1)\n# [END wait_for_operation]\n\nPROJECT = os.environ['GCLOUD_PROJECT']\nBUCKET = os.environ['CLOUD_STORAGE_BUCKET']\n\n# [START run] \ndef main():\n #reading the options from cfg file\n file = open(\"test.cfg\",\"r\")\n options=list(file.readlines())\n for obj in options:\n if 'VM' in obj:\n if 'name' in obj:\n a, vm_name=str.split(obj,'=')\n if 'Disk' in obj:\n if 'name' in obj:\n a, d_name=str.split(obj,'=')\n if 'type' in obj:\n a, d_type=str.split(obj,'=')\n if 'size' in obj:\n a, d_size=str.split(obj,'=')\n if 'zone' in obj:\n a, d_zone=str.split(obj,'=')\n\n compute = googleapiclient.discovery.build('compute','v1')\n\n print('Creating instance.')\n\n operation = disks_creation(compute, PROJECT, d_zone, d_name, d_type, d_size)\n wait_for_operation(compute, PROJECT, d_zone, operation['name'])\n\n disks = list_disks(compute, PROJECT, d_zone)\n\n operation = disk_attach(compute, PROJECT, d_zone, vm_name)\n wait_for_operation(compute, PROJECT, d_zone, operation['name'])\n print('Disks in PROJECT %s and d_zone %s:' % (PROJECT, d_zone))\n for disk in disks:\n print(' - ' + disk['name'])\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=__doc__,formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('PROJECT_id', help='Your Google Cloud PROJECT ID.')\n parser.add_argument('bucket_name', help='Your Google Cloud Storage bucket name.')\n parser.add_argument('--d_zone', default='us-central1-f', help='Compute Engine d_zone to deploy to.')\n parser.add_argument('--size' , default='10GB', help='Size of the disk')\n parser.add_argument('--type' , default='pd-standard', help='Type of the disk')\n parser.add_argument('--name', default='demo-instance', help='New instance name.')\n args = parser.parse_args()\n main()\n# [END run]\n","sub_path":"automation_api/create_disks.py","file_name":"create_disks.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"635307619","text":"from exceptions import Empty\n\nclass ArrayQueue:\n\t\n\tdef __init__(self):\n\t\tself._data = [] \n\t\tself._size = 0\n\t\tself._front = 0\n\t\t\n\tdef __len__(self):\n\t\treturn self._size\n\t\n\tdef is_empty(self):\n\t\treturn self._size ==0\n\t\n\tdef enqueue(self, e):\n\t\tself._data.append(e)\n\t\tself._size = self._size+1\n\t\n\tdef dequeue(self):\n\t\tif self.is_empty():\n\t\t\traise Empty('Queue is Empty')\n\t\tvalue = self._data[self._front]\n\t\tself._data[self._front] = None\n\t\tself._front = self._front+1\n\t\tself._size = self._size-1\n\t\treturn value\n\t\n\tdef first(self):\n\t\tif self.is_empty():\n\t\t\traise Empty('Queue is empty')\n\t\treturn self._data[self._front]\n\t\t\nq = ArrayQueue()\nq.enqueue(10)\nq.enqueue(20)\nq.enqueue(30)\nq.enqueue(50)\nprint('Queue is :' , q._data)\n\t\t\n\t\t","sub_path":"arrayqueue.py","file_name":"arrayqueue.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"121275538","text":"\ndef read_matrix():\n\tsudoku_length = 9\n\tmatrix = [[int(x) for x in input().split()] for y in range(sudoku_length)]\n\treturn matrix\n\n\ndef validate_sudoku(matrix):\n\t# Validación por filas\n\tfor i in range(9):\n\t\toccur = [False for i in range(10)]\n\t\tfor j in range(9):\n\t\t\tif occur[matrix[i][j]]:\n\t\t\t\treturn False\n\t\t\toccur[matrix[i][j]] = True\n\t\t\t\n\t# Validación por columnas\n\tfor j in range(9):\n\t\toccur = [False for i in range(10)]\n\t\tfor i in range(9):\n\t\t\tif occur[matrix[i][j]]:\n\t\t\t\treturn False\n\t\t\toccur[matrix[i][j]] = True\n\t\n\t# Validación por grillas\n\tfor i in range(3):\n\t\tfor j in range(3):\n\t\t\toccur = [False for i in range(10)]\n\t\t\tfor k in range(3):\n\t\t\t\tfor l in range(3):\n\t\t\t\t\tif occur[matrix[i * 3 + k][j * 3 + l]]:\n\t\t\t\t\t\treturn False\n\t\t\t\t\toccur[matrix[i * 3 + k][j * 3 + l]] = True\n\n\treturn True\n\n\ndef run():\n\tprint(\"Ingrese el sudoku: \")\n\n\tgrid = read_matrix()\n\n\tis_valid = validate_sudoku(grid)\n\n\tif(is_valid):\n\t\tprint(\"El sudoku ingresado es correcto\")\n\telse:\n\t\tprint(\"El sudoku ingresado es incorrecto\")\n\n\nif __name__ == \"__main__\":\n\trun()\n\n\n# 7 4 3 9 5 1 6 8 2\n# 1 6 2 4 8 7 3 9 5\n# 9 5 8 6 3 2 7 1 4\n# 2 1 9 8 7 3 5 4 6\n# 3 7 4 5 6 9 1 2 8\n# 5 8 6 1 2 4 9 7 3\n# 4 9 5 2 1 6 8 3 7\n# 8 2 7 3 9 5 4 6 1\n# 6 3 1 7 4 8 2 5 9","sub_path":"semana_01/codigos/ejercicio_03.py","file_name":"ejercicio_03.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"451856459","text":"#Problem 2: https://leetcode.com/problems/meeting-rooms-ii/\n#Time Complexity: O(n.logn)\n#Space Complexity: O(n)\n#Approach- low-0th element , high=last element, find mid ,perform Binary Search by computing number of elements less than mid, moving low and high\n#pointers accordingly.\nclass Solution:\n def minMeetingRooms(self, intervals: List[List[int]]) -> int:\n #edge case\n if not intervals:\n return 0\n #put start and end timings in different array\n start_timing=[]\n end_timing=[]\n for i in range(len(intervals)):\n start_timing.append(intervals[i][0])\n for i in range(len(intervals)):\n end_timing.append(intervals[i][1])\n \n #sort them\n start_timing.sort()\n print(start_timing)\n end_timing.sort()\n print(end_timing)\n \n start_ptr=0\n end_ptr=0\n rooms=0\n \n while start_ptr=end_timing[end_ptr]:\n rooms-=1\n end_ptr+=1\n rooms+=1\n start_ptr+=1\n return rooms\n ","sub_path":"Problem2_Meeting Rooms.py","file_name":"Problem2_Meeting Rooms.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"279159961","text":"import sys\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom scipy.constants import codata\nfrom scipy import integrate\nmpl.rcParams['text.usetex'] = True\nmpl.rcParams['text.latex.preview'] = True\nif len(sys.argv) < 2:\n print(\"Please inform the filename.\")\n exit(1)\nfname = sys.argv[1]\ntry:\n data = np.loadtxt(fname, dtype='float')\nexcept IOError:\n print(\"File '%s' does not exit.\",fname)\n exit(1)\n# scientific constants\nc = codata.value('speed of light in vacuum') * 100 # cgs\nh = codata.value('Planck constant') * 1e7 # cgs\nk = codata.value('Boltzmann constant') * 1e7 # cgs\nMsun = 1.9885e33 # solar mass cgs\nl_sun = 3.828e+33\nk230 = 0.005\na = float(2)\nlambda_0 = float(200e-4) # cgs\nN = np.ones(3)\n# fitting curve\ndef fbb(freq, Td, b):\n wavelength = c / freq\n exp1 = 1 - np.exp(- ((lambda_0/wavelength)**b))\n exp2 = np.exp(h*freq/k/Td) - 1\n return 2 * h * exp1 * freq**3 / exp2 / c**2\ndef f(freq, Td, b):\n wavelength = c / freq\n lambda_c = 1 / ((26.6764 + 6.24629*a)**-2 + (1.90530e-04 + 7.24277e-05*a)*Td) * 1e-4\n freq_c = c / lambda_c\n power = ((wavelength/lambda_c)**a) * np.exp(- (wavelength/(lambda_c*3/4))**2)\n return fbb(freq,Td, b) + fbb(freq_c, Td, b)*power\ndef mass(freq, flux, Td, b, mpc):\n logk = np.log(k230) + b*np.log(freq/230e9)\n logM = -np.log(1e23) + np.log(flux) + 2*np.log(mpc) - logk - np.log(2*h*(freq**3)/(c**2)) - np.log(np.exp(h*freq/k/Td)-1) - np.log(Msun) - np.log(1+z)\n return logM / np.log(10)\n# data\nz = data[:,1] # redshift\nd = data[:,2]*3.086e+24 # luminosity distance: Mpc \ny = data[:,3]*1e-6 # Jy\ny_err = data[:,4]*1e-6\nt = np.array([29.68, 43.37, 34.42, 34.98, 34.69, 39.97, 42.67, 35.63, 33.06])\nt_err = np.array([1.55, 1.21, 0.64, 0.49, 0.88, 0.69, 3.03, 0.63, 0.22])\nb_est = np.array([1.27, 1.39, 1.33, 1.64, 1.33, 1.32, 1.32, 1.44, 1.79])\nb_err = np.array([0.13, 0.04, 0.04, 0.03, 0.05, 0.03, 0.11, 0.04, 0.02])\nup = c / 8.0e-4\nlow = c / 1000.0e-4\n# calculation\nT0 = np.average(t, weights=(1/t_err))\nT0_err = np.sum(t_err)/np.sqrt(len(t_err))\nb0 = np.average(b_est, weights=(1/b_err))\n#b_err = np.sum(b_err)/np.sqrt(len(b_err))\nx = 343.5e9 * (1+z)\nN = y / f(x, T0, b0)\nIR = N * integrate.quad(f, low, up, args=(T0, b0))[0] * 4 * np.pi * d**2 * 1e-23 / (l_sun)\nIR_up = N * integrate.quad(f, low, up, args=(T0+T0_err, b0))[0] * 4 * np.pi * d**2 * 1e-23 / (l_sun)\nIR_do = N * integrate.quad(f, low, up, args=(T0-T0_err, b0))[0] * 4 * np.pi * d**2 * 1e-23 / (l_sun)\nsfr = IR * 1.7e-10\nsfr_up = IR_up * 1.7e-10\nsfr_do = IR_do * 1.7e-10\nM = mass(x, y, T0, b0, d)\nprint(b0)\nprint(T0, T0_err)\nprint(IR)\nprint(IR_up-IR)\nprint(IR-IR_do)\nprint(sfr)\nprint(sfr_up-sfr)\nprint(sfr-sfr_do)\nprint(M)\nyerr0 = y_err[0]\npt = np.arange(4., 1200., 1) * 1e-4\nplt.plot(c/x[0]*1e4, y[0], 'bo', label='data')\nplt.errorbar(c/x[0]*1e4, y[0], yerr=yerr0, fmt='bo')\nplt.plot(pt*1e4, N[0]*f(c/pt, T0, b0), 'r-', label='Casey')\nplt.fill_between(pt*1e4, N[0]*f(c/pt, T0-T0_err, b0), N[0]*f(c/pt, T0+T0_err, b0), color='grey', alpha=0.2)\nplt.ylabel(r'$Flux\\,(Jy)$', fontsize='large')\nplt.xlabel(r'$Wavelength\\,(\\mu m)$', fontsize='large')\nplt.yscale('log')\nplt.xscale('log')\nplt.title('IR SED of GRB 080607')\nplt.legend(loc='upper right', fontsize='medium', handletextpad=0.1)\nplt.grid(True)\nplt.show()","sub_path":"final/grb.py","file_name":"grb.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"446150833","text":"from kivy.app import App\nfrom kivy.uix.button import Button\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.relativelayout import RelativeLayout\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.textinput import TextInput\nfrom kivy.lang import Builder\nfrom kivy.uix.screenmanager import ScreenManager, Screen\n\nBuilder.load_string(\"\"\"\n:\n RelativeLayout:\n size: 600,600\n Label:\n text: \"Title\"\n color: [0,1,0,1]\n font_size: 150\n Label:\n text: \"Random\"\n pos: -400,0\n Button:\n text: \"Button\"\n size: 1,1\n pos: 0,200\n\n\n\n\"\"\")\n\n\nclass Menu(Screen):\n pass\n\nsm = ScreenManager()\nsm.add_widget(Menu(name='menu'))\n\n\nclass UserInterface(App):\n\n def build(self):\n return sm;\n\n\n\nif __name__ == '__main__':\n UserInterface().run()","sub_path":"currentinterface.py","file_name":"currentinterface.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"552289387","text":"import os\nfrom setuptools import find_packages, setup\n\n\ndef get_package_data():\n ''' Find all additional package data to distribute with code. '''\n\n baseline_images = ['tests/baseline_images/%s/*' % x\n for x in os.listdir('ggplot/tests/baseline_images')]\n\n return {'ggplot': baseline_images + [\"exampledata/*.csv\", \"geoms/*.png\"]}\n\n\ndef get_readme():\n ''' Retrieve README.rst's content in a safe way. '''\n with open('README.rst') as f:\n return f.read()\n\n\nsetup(name=\"ggplot\",\n version=\"0.3.0\",\n author=\"Greg Lamp\",\n author_email=\"greg@yhathq.com\",\n url=\"https://github.com/yhat/ggplot/\",\n license=\"BSD\",\n packages=find_packages(),\n package_dir={\"ggplot\": \"ggplot\"},\n package_data=get_package_data(),\n description=\"ggplot for python\",\n long_description=get_readme(),\n install_requires=[\"pandas\", \"matplotlib\", \"scipy\", \"statsmodels\",\n \"patsy\"],\n classifiers=['Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3'],\n zip_safe=False)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"217253092","text":"\"\"\"\nWord Count Engine\nImplement a document scanning function wordCountEngine, which receives a string document and returns a list of all unique words in it and their number of occurrences, sorted by the number of occurrences in a descending order. If two or more words have the same count, they should be sorted according to their order in the original sentence. Assume that all letters are in english alphabet. You function should be case-insensitive, so for instance, the words “Perfect” and “perfect” should be considered the same word.\n\nThe engine should strip out punctuation (even in the middle of a word) and use whitespaces to separate words.\n\nAnalyze the time and space complexities of your solution. Try to optimize for time while keeping a polynomial space complexity.\n\nExamples:\ninput: document = \"Practice makes perfect. you'll only\n get Perfect by practice. just practice!\"\n\noutput: [ [\"practice\", \"3\"], [\"perfect\", \"2\"],\n [\"makes\", \"1\"], [\"youll\", \"1\"], [\"only\", \"1\"], \n [\"get\", \"1\"], [\"by\", \"1\"], [\"just\", \"1\"] ]\nImportant: please convert the occurrence integers in the output list to strings (e.g. \"3\" instead of 3). We ask this because in compiled languages such as C#, Java, C++, C etc., it’s not straightforward to create mixed-type arrays (as it is, for instance, in scripted languages like JavaScript, Python, Ruby etc.). The expected output will simply be an array of string arrays.\n\nConstraints:\n[time limit] 5000ms\n[input] string document\n[output] array.array.string\n\"\"\"\n# Time complexity: O(N)\n# Space complexity: O(N)\nimport collections\ndef word_count_engine(document):\n punct = \"!@#$%^&*(),./;':\\\"[]{}?<>~\"\n formatted = [process(word, punct) for word in document.split()]\n count = collections.defaultdict(int)\n for word in formatted:\n if word: count[word] += 1\n \n ordered_by_freq = collections.defaultdict(list)\n for word in formatted:\n if word in count:\n ordered_by_freq[count[word]].append(word)\n del count[word]\n \n output = []\n for i in reversed(range(len(formatted))):\n if i in ordered_by_freq:\n for word in ordered_by_freq[i]:\n output.append([word, str(i)])\n return output\n \ndef process(word, punct):\n temp = [c for c in word.lower() if c not in punct]\n return \"\".join(temp)\n","sub_path":"Interviews/Pramp: Word Count Engine.py","file_name":"Pramp: Word Count Engine.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"58964183","text":"from rdflib import Literal\nfrom .namespaces import TAG, OWL, SKOS, BRICK, RDFS\n\n\"\"\"\nSet up subclasses of the equipment superclass\n\"\"\"\nequipment_subclasses = {\n \"HVAC\": {\n OWL.equivalentClass: BRICK[\"Heating_Ventilation_Air_Conditioning_System\"],\n \"tags\": [TAG.Heat, TAG.Ventilation, TAG.Air, TAG.Conditioning, TAG.System],\n },\n \"Heating_Ventilation_Air_Conditioning_System\": {\n OWL.equivalentClass: BRICK[\"HVAC\"],\n \"tags\": [TAG.HVAC],\n },\n \"Weather\": {\"tags\": [TAG.Weather]},\n \"Electrical_System\": {\n \"tags\": [TAG.Electrical, TAG.System],\n \"subclasses\": {\n \"Emergency_Power_Off_System\": {\n \"tags\": [TAG.Emergency, TAG.Power, TAG.Off, TAG.Equipment],\n },\n \"Energy_Storage\": {\n \"tags\": [TAG.Energy, TAG.Storage, TAG.Equipment],\n \"subclasses\": {\n \"Battery\": {\n \"tags\": [TAG.Battery, TAG.Energy, TAG.Storage, TAG.Equipment],\n },\n },\n },\n \"Inverter\": {\"tags\": [TAG.Inverter, TAG.Equipment]},\n \"PlugStrip\": {\"tags\": [TAG.PlugStrip, TAG.Equipment]},\n },\n },\n \"Meter\": {\n \"tags\": [TAG.Meter, TAG.Equipment],\n \"subclasses\": {\n \"Electrical_Meter\": {\n \"tags\": [TAG.Electrical, TAG.Meter, TAG.Equipment],\n \"subclasses\": {\n \"Building_Electrical_Meter\": {\n \"tags\": [\n TAG.Building,\n TAG.Electrical,\n TAG.Meter,\n TAG.Equipment,\n ],\n \"parents\": [BRICK.Building_Meter],\n }\n },\n },\n \"Gas_Meter\": {\n \"tags\": [TAG.Meter, TAG.Equipment, TAG.Gas],\n \"subclasses\": {\n \"Building_Gas_Meter\": {\n \"tags\": [TAG.Building, TAG.Gas, TAG.Meter, TAG.Equipment],\n \"parents\": [BRICK.Building_Meter],\n }\n },\n },\n \"Water_Meter\": {\n \"tags\": [TAG.Meter, TAG.Equipment, TAG.Water],\n \"parents\": [BRICK.Water_System],\n \"subclasses\": {\n \"Building_Water_Meter\": {\n \"tags\": [TAG.Building, TAG.Water, TAG.Meter, TAG.Equipment],\n \"parents\": [BRICK.Building_Meter],\n },\n \"Chilled_Water_Meter\": {\n \"tags\": [TAG.Meter, TAG.Equipment, TAG.Water, TAG.Chilled],\n \"parents\": [BRICK.Chilled_Water_System],\n \"subclasses\": {\n \"Building_Chilled_Water_Meter\": {\n \"tags\": [\n TAG.Building,\n TAG.Chilled,\n TAG.Water,\n TAG.Meter,\n TAG.Equipment,\n ],\n \"parents\": [BRICK.Building_Meter],\n },\n },\n },\n \"Hot_Water_Meter\": {\n \"tags\": [TAG.Meter, TAG.Equipment, TAG.Water, TAG.Hot],\n \"parents\": [BRICK.Chilled_Water_System],\n \"subclasses\": {\n \"Building_Hot_Water_Meter\": {\n \"tags\": [\n TAG.Building,\n TAG.Hot,\n TAG.Water,\n TAG.Meter,\n TAG.Equipment,\n ],\n \"parents\": [BRICK.Building_Meter],\n },\n },\n },\n },\n },\n \"Building_Meter\": {\"tags\": [TAG.Meter, TAG.Equipment, TAG.Building]},\n },\n },\n \"Water_System\": {\n \"tags\": [TAG.Water, TAG.Equipment],\n \"subclasses\": {\n \"Chilled_Water_System\": {\n OWL.equivalentClass: BRICK[\"CWS\"],\n \"tags\": [TAG.Water, TAG.Chilled, TAG.Equipment],\n },\n \"Hot_Water_System\": {\n OWL.equivalentClass: BRICK[\"HWS\"],\n \"tags\": [TAG.Water, TAG.Hot, TAG.Equipment],\n \"subclasses\": {\n \"Domestic_Hot_Water_System\": {\n \"tags\": [TAG.Domestic, TAG.Water, TAG.Hot, TAG.Equipment],\n },\n },\n },\n \"CWS\": {\n OWL.equivalentClass: BRICK[\"Chilled_Water_System\"],\n \"tags\": [TAG.CWS],\n },\n \"HWS\": {OWL.equivalentClass: BRICK[\"Hot_Water_System\"], \"tags\": [TAG.HWS]},\n },\n },\n \"Steam_System\": {\"tags\": [TAG.Steam, TAG.Equipment]},\n \"Solar_Panel\": {\"tags\": [TAG.Solar, TAG.Equipment]},\n \"Shading_System\": {\n \"tags\": [TAG.Shade, TAG.Equipment],\n \"subclasses\": {\"Louver\": {\"tags\": [TAG.Shade, TAG.Equipment, TAG.Louver]}},\n },\n \"Lighting_System\": {\n \"tags\": [TAG.Lighting, TAG.Equipment],\n \"subclasses\": {\n \"Lighting\": {\n \"subclasses\": {\n \"Luminaire\": {\"tags\": [TAG.Luminaire, TAG.Equipment]},\n \"Luminaire_Driver\": {\n \"tags\": [TAG.Luminaire, TAG.Driver, TAG.Equipment],\n },\n },\n },\n \"Interface\": {\n \"tags\": [TAG.Equipment, TAG.Interface],\n \"subclasses\": {\n \"Switch\": {\n \"tags\": [TAG.Equipment, TAG.Interface, TAG.Switch],\n \"subclasses\": {\n \"Dimmer\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Interface,\n TAG.Switch,\n TAG.Dimmer,\n ],\n },\n },\n },\n \"Touchpanel\": {\n \"tags\": [TAG.Equipment, TAG.Interface, TAG.Touchpanel],\n },\n },\n },\n },\n },\n \"Furniture\": {\"tags\": [TAG.Equipment, TAG.Furniture]},\n \"Fire_Safety_System\": {\n \"tags\": [TAG.Equipment, TAG.Fire, TAG.Safety, TAG.System],\n \"subclasses\": {\n \"Fire_Control_Panel\": {\n \"tags\": [TAG.Equipment, TAG.Fire, TAG.Safety, TAG.Panel],\n OWL.equivalentClass: BRICK[\"FCP\"],\n },\n \"FCP\": {\"tags\": [TAG.FCP, TAG.Equipment]},\n },\n },\n \"Elevator\": {\"tags\": [TAG.Elevator, TAG.Equipment]},\n \"Security_Equipment\": {\"tags\": [TAG.Security, TAG.Equipment]},\n \"Safety_Equipment\": {\"tags\": [TAG.Safety, TAG.Equipment]},\n \"Camera\": {\"tags\": [TAG.Camera, TAG.Equipment]},\n}\n\n\n\"\"\"\nDefine classes of HVAC equipment\n\"\"\"\nhvac_subclasses = {\n \"Variable_Frequency_Drive\": {\n \"tags\": [TAG.Equipment, TAG.Variable, TAG.Frequency, TAG.Drive],\n OWL.equivalentClass: BRICK[\"VFD\"],\n SKOS.definition: Literal(\n \"Electronic device that varies its output frequency to vary the rotating speed of a motor, given a fixed input frequency. Used with fans or pumps to vary the flow in the system as a function of a maintained pressure.\"\n ),\n },\n \"Valve\": {\n \"tags\": [TAG.Valve, TAG.Equipment]\n # subclasses defined in 'valve_subclasses'\n },\n \"VFD\": {\n \"tags\": [TAG.Equipment, TAG.VFD],\n \"subclasses\": {\n \"Heat_Wheel_VFD\": {\"tags\": [TAG.Equipment, TAG.Heat, TAG.Wheel, TAG.VFD]},\n },\n },\n \"Thermostat\": {\n \"tags\": [TAG.Equipment, TAG.Thermostat],\n SKOS.definition: Literal(\n \"An automatic control device used to maintain temperature at a fixed or adjustable setpoint.\"\n ),\n },\n \"Terminal_Unit\": {\n \"tags\": [TAG.Equipment, TAG.Terminal, TAG.Unit],\n SKOS.definition: Literal(\n \"A device that regulates the volumetric flow rate and/or the temperature of the controlled medium.\"\n ),\n \"subclasses\": {\n \"Fan_Coil_Unit\": {\n \"tags\": [TAG.Equipment, TAG.Fan, TAG.Coil, TAG.Unit],\n OWL.equivalentClass: BRICK[\"FCU\"],\n },\n \"FCU\": {\"tags\": [TAG.FCU]},\n \"Variable_Air_Volume_Box\": {\n \"tags\": [TAG.Equipment, TAG.Variable, TAG.Volume, TAG.Box],\n OWL.equivalentClass: BRICK[\"VAV\"],\n \"subclasses\": {\n \"Variable_Air_Volume_Box_With_Reheat\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Variable,\n TAG.Volume,\n TAG.Box,\n TAG.Reheat,\n ],\n OWL.equivalentClass: BRICK[\"RVAV\"],\n },\n \"RVAV\": {\"tags\": [TAG.Equipment, TAG.RVAV]},\n },\n },\n \"VAV\": {\"tags\": [TAG.Equipment, TAG.VAV]},\n },\n },\n \"Space_Heater\": {\n \"tags\": [TAG.Equipment, TAG.Space, TAG.Heater],\n SKOS.definition: Literal(\n \"A heater used to warm the air in an enclosed area, such as a room or office\"\n ),\n },\n \"Pump\": {\n \"tags\": [TAG.Equipment, TAG.Pump],\n SKOS.definition: Literal(\n \"Machine for imparting energy to a fluid, causing it to do work, drawing a fluid into itself through an entrance port, and forcing the fluid out through an exhaust port.\"\n ),\n \"subclasses\": {\n \"Water_Pump\": {\n \"tags\": [TAG.Equipment, TAG.Pump, TAG.Water],\n \"subclasses\": {\n \"Chilled_Water_Pump\": {\n \"tags\": [TAG.Equipment, TAG.Pump, TAG.Chilled, TAG.Water],\n },\n \"Condenser_Water_Pump\": {\n \"tags\": [TAG.Equipment, TAG.Pump, TAG.Condenser, TAG.Water],\n },\n \"Hot_Water_Pump\": {\n \"tags\": [TAG.Equipment, TAG.Pump, TAG.Hot, TAG.Water],\n },\n },\n },\n },\n },\n \"Heat_Exchanger\": {\n \"tags\": [TAG.Equipment, TAG.Heat, TAG.Exchanger],\n OWL.equivalentClass: BRICK[\"HX\"],\n \"subclasses\": {\n \"Evaporative_Heat_Exchanger\": {\n \"tags\": [TAG.Evaporative, TAG.Equipment, TAG.Heat, TAG.Exchanger],\n },\n \"Condenser_Heat_Exchanger\": {\n \"tags\": [TAG.Condenser, TAG.Equipment, TAG.Heat, TAG.Exchanger],\n },\n \"Heat_Wheel\": {\n \"tags\": [TAG.Equipment, TAG.Heat, TAG.Wheel],\n SKOS.definition: Literal(\n \"A type of energy recovery heat exchanger positioned within the supply and exhaust air streams of an air-handling system or in the exhaust gases of an industrial process, in order to recover the heat energy\"\n ),\n RDFS.seeAlso: Literal(\"https://en.wikipedia.org/wiki/Thermal_wheel\"),\n },\n },\n },\n \"HX\": {\"tags\": [TAG.Equipment, TAG.HX]},\n \"Fume_Hood\": {\n \"tags\": [TAG.Equipment, TAG.Fume, TAG.Hood],\n SKOS.definition: Literal(\n \"A fume-collection device mounted over a work space, table, or shelf and serving to conduct unwanted gases away from the area enclosed.\"\n ),\n },\n \"Filter\": {\n \"tags\": [TAG.Equipment, TAG.Filter],\n SKOS.definition: Literal(\"Device to remove gases from a mixture of gases\"),\n \"subclasses\": {\n \"Mixed_Air_Filter\": {\n \"tags\": [TAG.Equipment, TAG.Mixed, TAG.Air, TAG.Filter],\n },\n },\n },\n \"Fan\": {\n SKOS.definition: Literal(\n \"Any device with two or more blades or vanes attached to a rotating shaft used to produce an airflow for the purpose of comfort, ventilation, exhaust, heating, cooling, or any other gaseous transport.\"\n ),\n \"tags\": [TAG.Equipment, TAG.Fan],\n \"subclasses\": {\n \"Cooling_Tower_Fan\": {\n \"tags\": [TAG.Cool, TAG.Tower, TAG.Equipment, TAG.Fan],\n },\n \"Exhaust_Fan\": {\"tags\": [TAG.Equipment, TAG.Fan, TAG.Exhaust]},\n \"Return_Fan\": {\"tags\": [TAG.Equipment, TAG.Fan, TAG.Return]},\n \"Standby_Fan\": {\"tags\": [TAG.Equipment, TAG.Fan, TAG.Standby]},\n \"Discharge_Fan\": {\"tags\": [TAG.Equipment, TAG.Fan, TAG.Discharge]},\n \"Supply_Fan\": {\n \"tags\": [TAG.Equipment, TAG.Fan, TAG.Supply],\n \"subclasses\": {\n \"Booster_Fan\": {\"tags\": [TAG.Equipment, TAG.Fan, TAG.Booster]},\n },\n },\n },\n },\n \"Economizer\": {\n \"tags\": [TAG.Equipment, TAG.Economizer],\n SKOS.definition: Literal(\n \"Device that, on proper variable sensing, initiates control signals or actions to conserve energy. A control system that reduces the mechanical heating and cooling requirement.\"\n ),\n },\n \"Damper\": {\n SKOS.definition: Literal(\n \"Element inserted into an air-distribution system or element of an air-distribution system permitting modification of the air resistance of the system and consequently changing the airflow rate or shutting off the airflow.\"\n ),\n \"tags\": [TAG.Equipment, TAG.Damper],\n \"subclasses\": {\n \"Economizer_Damper\": {\"tags\": [TAG.Equipment, TAG.Damper, TAG.Economizer]},\n \"Exhaust_Damper\": {\"tags\": [TAG.Equipment, TAG.Damper, TAG.Exhaust]},\n \"Outside_Damper\": {\"tags\": [TAG.Equipment, TAG.Damper, TAG.Outside]},\n \"Return_Damper\": {\"tags\": [TAG.Equipment, TAG.Damper, TAG.Return]},\n },\n },\n \"Condenser\": {\n \"tags\": [TAG.Equipment, TAG.Condenser],\n SKOS.definition: Literal(\n \"A heat exchanger in which the primary heat transfer vapor changes its state to a liquid phase.\"\n ),\n },\n \"Computer_Room_Air_Conditioning\": {\n \"tags\": [TAG.Equipment, TAG.Computer, TAG.Room, TAG.Air, TAG.Conditioning],\n SKOS.definition: Literal(\n \"A device that monitors and maintains the temperature, air distribution and humidity in a network room or data center. \"\n ),\n OWL.equivalentClass: BRICK[\"CRAC\"],\n },\n \"CRAC\": {\n \"tags\": [TAG.Equipment, TAG.CRAC],\n OWL.equivalentClass: BRICK[\"Computer_Room_Air_Conditioning\"],\n \"subclasses\": {\n \"Standby_CRAC\": {\"tags\": [TAG.Equipment, TAG.CRAC, TAG.Standby]},\n },\n },\n \"Compressor\": {\n \"tags\": [TAG.Equipment, TAG.Compressor],\n SKOS.definition: Literal(\n \"(1) device for mechanically increasing the pressure of a gas. (2) often described as being either open, hermetic, or semihermetic to describe how the compressor and motor drive is situated in relation to the gas or vapor being compressed. Types include centrifugal, axial flow, reciprocating, rotary screw, rotary vane, scroll, or diaphragm. 1. device for mechanically increasing the pressure of a gas. 2. specific machine, with or without accessories, for compressing refrigerant vapor.\"\n ),\n },\n \"Coil\": {\n SKOS.definition: Literal(\n \"Exchanger that transfers heat from an exhaust airstream to a separated supply airstream.\"\n ),\n \"tags\": [TAG.Equipment, TAG.Coil],\n \"subclasses\": {\n \"Cooling_Coil\": {\"tags\": [TAG.Equipment, TAG.Coil, TAG.Cool]},\n \"Heating_Coil\": {\"tags\": [TAG.Equipment, TAG.Coil, TAG.Heat]},\n },\n },\n \"Chiller\": {\n \"tags\": [TAG.Equipment, TAG.Chiller],\n \"subclasses\": {\n \"Absorption_Chiller\": {\n \"tags\": [TAG.Equipment, TAG.Chiller, TAG.Absorption],\n },\n \"Centrifugal_Chiller\": {\n \"tags\": [TAG.Equipment, TAG.Chiller, TAG.Centrifugal],\n },\n },\n },\n \"Humidifier\": {\"tags\": [TAG.Equipment, TAG.Humidifier]},\n \"Boiler\": {\n \"tags\": [TAG.Equipment, TAG.Boiler],\n SKOS.definition: Literal(\n \"A closed, pressure vessel that uses fuel or electricity for heating water or other fluids to supply steam or hot water for heating, humidification, or other applications.\"\n ),\n },\n \"Air_Handler_Unit\": {\n \"tags\": [TAG.Equipment, TAG.Air, TAG.Handler, TAG.Unit],\n SKOS.definition: Literal(\n \"Assembly consisting of sections containing a fan or fans and other necessary equipment to perform one or more of the following functions: circulating, filtration, heating, cooling, heat recovery, humidifying, dehumidifying, and mixing of air. Is usually connected to an air-distribution system.\"\n ),\n OWL.equivalentClass: BRICK[\"AHU\"],\n },\n \"AHU\": {\n \"tags\": [TAG.Equipment, TAG.AHU],\n \"subclasses\": {\n \"Rooftop_Unit\": {\n OWL.equivalentClass: BRICK[\"RTU\"],\n \"tags\": [TAG.Equipment, TAG.Rooftop, TAG.AHU],\n },\n \"RTU\": {\n \"tags\": [TAG.Equipment, TAG.RTU],\n OWL.equivalentClass: BRICK[\"Rooftop_Unit\"],\n },\n },\n },\n}\n\n\"\"\"\nValve subclasses\n\"\"\"\nvalve_subclasses = {\n \"Heating_Valve\": {\n \"tags\": [TAG.Valve, TAG.Heat, TAG.Equipment],\n \"subclasses\": {\n \"Reheat_Valve\": {\"tags\": [TAG.Valve, TAG.Reheat, TAG.Heat, TAG.Equipment]},\n \"Return_Heating_Valve\": {\n \"tags\": [TAG.Valve, TAG.Return, TAG.Heat, TAG.Equipment],\n SKOS.definition: Literal(\n \"A valve installed on the return side of a heat exchanger\"\n ),\n },\n \"Domestic_Hot_Water_Valve\": {\n \"tags\": [\n TAG.Domestic,\n TAG.Water,\n TAG.Hot,\n TAG.Valve,\n TAG.Heat,\n TAG.Equipment,\n ],\n \"parents\": [BRICK.Domestic_Hot_Water_System, BRICK.Water_Valve],\n },\n \"Preheat_Hot_Water_Valve\": {\n \"tags\": [\n TAG.Preheat,\n TAG.Water,\n TAG.Hot,\n TAG.Valve,\n TAG.Heat,\n TAG.Equipment,\n ],\n \"parents\": [BRICK.Hot_Water_System, BRICK.Water_Valve],\n },\n },\n },\n \"Cooling_Valve\": {\"tags\": [TAG.Valve, TAG.Cool, TAG.Equipment]},\n \"Water_Valve\": {\n \"tags\": [TAG.Valve, TAG.Water, TAG.Equipment],\n \"subclasses\": {\n \"Chilled_Water_Valve\": {\n \"tags\": [TAG.Chilled, TAG.Valve, TAG.Water, TAG.Equipment],\n \"parents\": [BRICK.Chilled_Water_System],\n },\n },\n },\n \"Isolation_Valve\": {\"tags\": [TAG.Isolation, TAG.Valve, TAG.Equipment]},\n}\n\nsecurity_subclasses = {\n \"Access_Control_Equipment\": {\n \"tags\": [TAG.Equipment, TAG.Security, TAG.Access, TAG.Control],\n \"subclasses\": {\n \"Access_Reader\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Security,\n TAG.Access,\n TAG.Reader,\n TAG.Control,\n ],\n SKOS.definition: Literal(\n \"Used in physical security systems to read a credential that allows access through access points. \"\n \"Usually card badge credentials for locked doors or monitored checkpoints.\"\n ),\n },\n },\n # TODO subclasses\n # Access (Control) Panel: The controller panel will typically have electrical connections for the selected credential reader,\n # a relay output to control the door release, door position input, programmable inputs and outputs, and inputs for the REX.\n # Accsss Control Sub Panel (Alarm Panel)?\n # Panel_Input: Input into the access panel: switch toggle, button press, credential entered/scanned/swiped, etc\n # Panel_Output: Ouput from the access panel: some sort of function is activated, door area is unlocked, etc\n # Reader_Aux_Input: The input from the reader on the “other” side of the controlled door\n # Reader_Aux_Output: The output from the reader on the “other” side of the controlled door\n # Biometric Reader: Reader of biometric characteristics to be used for authentication\n # REX: Request to exit. a required accessory in an access control system, which can take the form of anything from a\n # mushroom button to an infrared sensor. In an access control system, the REX (Request to exit) trips a relay in the panel\n # to bypass “door forced” alarms within the access control software to avoid false alarms in the audit report. It can also be\n # used to trip a relay which changes the state of an electric electronic item from on to off, lock to unlock or open to close.\n # Magnetic_Lock: Electromagnetic or magnetic lock, a locking mechanisim that consists of an electromagnet and an armature plate.\n # Electrified_Lock: An electronic lock, a locking device that works by means of electric current. Can be controlled remotely\n # depending on the locking system.\n # Door_Release: An electronic input device used to immediately unlock specififed doors that are equipped with electronic locks.\n # Badge Station: A kiosk or checkpoint that requires the use of a badge in order to verify credentials and to grant access.\n },\n \"Video_Surveillance_Equipment\": {\n \"tags\": [TAG.Equipment, TAG.Security, TAG.Video, TAG.Surveillance],\n \"subclasses\": {\n \"Surveillance_Camera\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Security,\n TAG.Video,\n TAG.Surveillance,\n TAG.Camera,\n ],\n SKOS.definition: Literal(\n \"An optical instrument to capture still images or record moving images, which are stored on a physical or digital medium.\"\n ),\n \"parents\": [BRICK.Camera]\n # TODO: subclass of PTZ (Pan/Tilt/Zoom) cameras?\n },\n \"NVR\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Security,\n TAG.Video,\n TAG.Surveillance,\n TAG.NVR,\n ],\n OWL.equivalentClass: BRICK[\"Network_Video_Recorder\"],\n SKOS.definition: Literal(\"A Network Video Recorder.\"),\n },\n \"Network_Video_Recorder\": {\n \"tags\": [\n TAG.NVR,\n TAG.Equipment,\n TAG.Security,\n TAG.Video,\n TAG.Recorder,\n TAG.Network,\n ],\n OWL.equivalentClass: BRICK[\"NVR\"],\n SKOS.definition: Literal(\"A Network Video Recorder.\"),\n },\n },\n # TODO\n # Encoder: (Do we imply video encoder here?) - A device that is used to convert information from one format to another.\n # Switch: Again this sounds generic - A device that can connect, disconnect, or divert current in an electrical current (or signal)\n # - is any specific kind of switch e.g. PoESwitch implied here?\n # Video_Wall (or should this be in a separate classification with displays and monitors?)\n },\n \"Intrusion_Detection_Equipment\": {\n \"tags\": [TAG.Equipment, TAG.Security, TAG.Intrusion, TAG.Detection],\n # TODO\n # Motion sensor - but maybe to Points, but still need a way to represent security motion sensors\n # Security Control Panel: The central hub of a security system. All devices are connected to the security panel for easy\n # and efficient access for different security protocols (i.e. Intrusion security) and events. Question: How’s this different from\n # Access Panel? Is this specific to Intrusion detection system or more general?\n # Glass_Break_Sensor: a sensor used in electronic alarms that detect if pane of glass has been shattered or is broken.\n # Duress_Button: Panic button, an electronic input device used to help alerting someone in emergency situations.\n # Door_Contacts: Door contact sensor, a peripheral security sensor that lets an alarm system know whether a door is\n # open or closed.\n },\n \"Intercom_Equipment\": {\n \"tags\": [TAG.Equipment, TAG.Security, TAG.Intercom],\n \"subclasses\": {\n \"Emergency_Phone\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Security,\n TAG.Intercom,\n TAG.Emergency,\n TAG.Phone,\n ],\n SKOS.definition: Literal(\n \"A phone specifically provided for making calls to emergency services.\"\n ),\n },\n \"Video_Intercom\": {\n \"tags\": [TAG.Equipment, TAG.Security, TAG.Intercom, TAG.Video],\n SKOS.definition: Literal(\n \"An intercom device that has video capabilites as well as voice capabilities\"\n ),\n },\n },\n },\n}\n\nsafety_subclasses = {\n \"Automated_External_Defibrillator\": {\n OWL.equivalentClass: BRICK[\"AED\"],\n \"tags\": [TAG.Equipment, TAG.Safety, TAG.AED, TAG.Defibrillator],\n SKOS.definition: Literal(\n \"Automated External Defibrillator. Used by trained people to help those experiencing cardiac issues.\"\n ),\n },\n \"AED\": {\n OWL.equivalentClass: BRICK[\"Automated_External_Defibrillator\"],\n \"tags\": [TAG.Equipment, TAG.Safety, TAG.AED, TAG.Defibrillator],\n SKOS.definition: Literal(\n \"Automated External Defibrillator. Used by trained people to help those experiencing cardiac issues.\"\n ),\n },\n \"First_Aid_Kit\": {\n \"tags\": [TAG.Equipment, TAG.Safety, TAG.Aid, TAG.FirstAid],\n SKOS.definition: Literal(\n \"A collection of medical supplies placed in a well-known location to provide immediate treatment\"\n ),\n },\n \"Emergency_Wash_Station\": {\n \"tags\": [TAG.Equipment, TAG.Safety, TAG.Wash, TAG.Station, TAG.Emergency],\n SKOS.definition: Literal(\n \"A piece of plumbed equipment to flush chemicals or hazardous substances off of a person\"\n ),\n \"subclasses\": {\n \"Eye_Wash_Station\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Safety,\n TAG.Wash,\n TAG.Station,\n TAG.Emergency,\n TAG.Eye,\n ],\n SKOS.definition: Literal(\n \"An emergency wash station to flush chemicals or hazardous substances out of a persons eye\"\n ),\n },\n \"Safety_Shower\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Safety,\n TAG.Wash,\n TAG.Station,\n TAG.Emergency,\n TAG.Shower,\n ],\n SKOS.definition: Literal(\n \"An emergency wash station to flush chemicals or hazardous substances off of a person\"\n ),\n },\n \"Drench_Hose\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Safety,\n TAG.Wash,\n TAG.Station,\n TAG.Emergency,\n TAG.Drench,\n TAG.Hose,\n ],\n SKOS.definition: Literal(\n \"An emergency wash station to flush chemicals or hazardous substances off of a person by spraying water on them from a distance\"\n ),\n },\n },\n },\n}\n","sub_path":"bricksrc/equipment.py","file_name":"equipment.py","file_ext":"py","file_size_in_byte":28680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"593059436","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 5 10:04:31 2019\n\n@author: nicolas\n\"\"\"\n\nimport random\nimport pybullet\nimport pybullet_data\nimport cv2\nimport time\nfrom qibullet import SimulationManager\nfrom qibullet import PepperVirtual\n\nliste_choix = [\"cat\",\"turtle\",\"parrot\"]\nchoix = liste_choix[1]\n\n#-----------------------------Initialisation-----------------------------------\ndef main(): \n \"Initialisation des variables\"\n simulation_manager = SimulationManager() \n client = simulation_manager.launchSimulation(gui=True)\n pepper = simulation_manager.spawnPepper(client, spawn_ground_plane=True)\n pybullet.setAdditionalSearchPath(pybullet_data.getDataPath())\n \n \n \"Appel des fonctions\"\n Environnement(client)\n pepper.goToPosture(\"Stand\", 0.6)\n time.sleep(1)\n Catch_Objet_Milieu(pepper)\n \"\"\"for i in range(2): \n Prise_Photo(pepper)\"\"\"\n Retour_Depart_Milieu(pepper)\n Lacher_Objet(pepper,choix)\n \n while True:\n cv2.waitKey(1)\n \n \n#----------------------------FONCTION LOAD OBJET------------------------------- \n\"Ici on charge tous les objets de l'environnement dans lequel le robot va évoluer\"\ndef Environnement(client): \n \n coord_y = [-0.8,0.0,0.8]\n liste_coord_y = random.sample(coord_y,3)\n \n pybullet.loadURDF(\n \"table2.urdf\",\n basePosition=[2.3, 0, 0.3],\n globalScaling=17,\n physicsClientId=client)\n \n pybullet.loadURDF(\n \"totem_Tortue.urdf\",\n basePosition=[1.95, -0.8, 0.7],\n globalScaling=1.5,\n physicsClientId=client)\n \n pybullet.loadURDF(\n \"totem_Parrot.urdf\",\n basePosition=[1.95, 0.8, 0.7],\n globalScaling=1.5,\n physicsClientId=client)\n \n pybullet.loadURDF(\n \"totem_Cat.urdf\",\n basePosition=[1.95, 0, 0.7],\n globalScaling=1.5,\n physicsClientId=client)\n\n pybullet.loadURDF(\n \"caisse.urdf\",\n basePosition=[0.0, 2.5, 0.01],\n globalScaling=0.2,\n physicsClientId=client)\n\n pybullet.loadURDF(\n \"caisse.urdf\",\n basePosition=[-0.1, -2.5, 0.1],\n globalScaling=0.2,\n physicsClientId=client)\n\n pybullet.loadURDF(\n \"caisse.urdf\",\n basePosition=[-1.8,-0.05, 0.1],\n globalScaling=0.2,\n physicsClientId=client)\n\n\n#----------------------------FONCTION_Prise_Photos-----------------------------\n\"Ici on donne les instructions de mouvement pour que le robot prenne tous les animaux en photo\"\n\n \ndef Retour_Depart_Milieu(pepper):\n #pepper.moveTo(0,-0.8,0) \n #time.sleep(1)\n pepper.moveTo(-0.9,0,0)\n time.sleep(1)\n \ndef Catch_Objet_Milieu(pepper):\n pepper.moveTo(1.06,0,0) \n pepper.setAngles(\"RElbowRoll\", 0.0, 1.0) \n pepper.setAngles(\"RShoulderPitch\", 0.15, 1.0) \n pepper.setAngles(\"RHand\", 1.5, 1.0) \n pepper.moveTo(0.55,0,0)\n pepper.moveTo(0,0.187,0)\n pepper.setAngles(\"RHand\", 0.00, 1.0) \n time.sleep(4)\n pepper.moveTo(-0.73,0,0)\n time.sleep(4)\n pepper.moveTo(0,-0.187,0)\n time.sleep(1)\n \ndef Prise_Photo(pepper):\n pepper.setAngles(\"HeadPitch\",-1.0, 1.0) #le robot lève la tête pour avoir le bon angle\n pepper.subscribeCamera(PepperVirtual.ID_CAMERA_BOTTOM)\n img = pepper.getCameraFrame() #prise de photo\n cv2.imshow(\"bottom camera\", img) #affichage de la photo\n time.sleep(5)\n pepper.moveTo(0,0.8,0)\n time.sleep(1) \n \ndef Lacher_Objet(pepper, choix):\n if choix == \"cat\":\n pepper.moveTo(-0.9,0,0)\n time.sleep(1)\n pepper.moveTo(0,0,3.14)\n time.sleep(1)\n pepper.setAngles(\"RElbowRoll\", -0.3, 1.0) \n time.sleep(1)\n pepper.setAngles(\"RHand\", 1.5, 1.0) \n time.sleep(1)\n elif choix == \"parrot\":\n pepper.moveTo(0,-1.80,0)\n time.sleep(1)\n pepper.moveTo(0,0,-1.57)\n time.sleep(1)\n pepper.setAngles(\"RHand\", 1.5, 1.0) \n time.sleep(1)\n elif choix == \"turtle\":\n pepper.moveTo(0,1.80,0)\n time.sleep(1)\n pepper.moveTo(0,0,1.57)\n time.sleep(1)\n time.sleep(1)\n pepper.setAngles(\"RHand\", 1.5, 1.0) \n time.sleep(1)\n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Script_Projet_Nico/Pepper_Basic_Nico.py","file_name":"Pepper_Basic_Nico.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"68188442","text":"#!/usr/bin/env python3\n#\n# Copyright 2021 Graviti. Licensed under MIT License.\n#\n\n\"\"\"Notes, DatasetBase, Dataset and FusionDataset.\n\n:class:`Notes` contains the basic information of a :class:`DatasetBase`.\n\n:class:`DatasetBase` defines the basic concept of a dataset,\nwhich is the top-level structure to handle your data files, labels and other additional information.\n\nIt represents a whole dataset contains several segments\nand is the base class of :class:`Dataset` and :class:`FusionDataset`.\n\n:class:`Dataset` is made up of data collected from only one sensor\nor data without sensor information.\nIt consists of a list of :class:`~tensorbay.dataset.segment.Segment`.\n\n:class:`FusionDataset` is made up of data collected from multiple sensors.\nIt consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`.\n\n\"\"\"\n\nimport json\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n Iterable,\n KeysView,\n Optional,\n Sequence,\n Tuple,\n Type,\n TypeVar,\n Union,\n overload,\n)\n\nfrom ..label import Catalog\nfrom ..utility import (\n Deprecated,\n EqMixin,\n NameMixin,\n NameSortedList,\n ReprMixin,\n ReprType,\n common_loads,\n locked,\n)\nfrom .segment import FusionSegment, Segment\n\nif TYPE_CHECKING:\n from ..client import GAS\n\n_T = TypeVar(\"_T\", FusionSegment, Segment)\n\n\nclass Notes(ReprMixin, EqMixin):\n \"\"\"This is a class stores the basic information of :class:`DatasetBase`.\n\n Arguments:\n is_continuous: Whether the data inside the dataset is time-continuous.\n bin_point_cloud_fields: The field names of the bin point cloud files in the dataset.\n\n \"\"\"\n\n _T = TypeVar(\"_T\", bound=\"Notes\")\n\n _repr_attrs = (\"is_continuous\", \"bin_point_cloud_fields\")\n\n def __init__(\n self, is_continuous: bool = False, bin_point_cloud_fields: Optional[Iterable[str]] = None\n ) -> None:\n self.is_continuous = is_continuous\n self.bin_point_cloud_fields = bin_point_cloud_fields\n\n def __getitem__(self, key: str) -> Any:\n try:\n return getattr(self, key)\n except AttributeError as error:\n raise KeyError(key) from error\n\n def _loads(self, contents: Dict[str, Any]) -> None:\n self.is_continuous = contents[\"isContinuous\"]\n self.bin_point_cloud_fields = contents.get(\"binPointCloudFields\")\n\n @classmethod\n def loads(cls: Type[_T], contents: Dict[str, Any]) -> _T:\n \"\"\"Loads a :class:`Notes` instance from the given contents.\n\n Arguments:\n contents: The given dict containing the dataset notes::\n\n {\n \"isContinuous\": \n \"binPointCloudFields\": [ or null\n , \n ...\n ]\n }\n\n Returns:\n The loaded :class:`Notes` instance.\n\n \"\"\"\n return common_loads(cls, contents)\n\n def keys(self) -> KeysView[str]:\n \"\"\"Return the valid keys within the notes.\n\n Returns:\n The valid keys within the notes.\n\n \"\"\"\n return KeysView(self._repr_attrs) # type: ignore[arg-type]\n\n def dumps(self) -> Dict[str, Any]:\n \"\"\"Dumps the notes into a dict.\n\n Returns:\n A dict containing all the information of the Notes::\n\n {\n \"isContinuous\": \n \"binPointCloudFields\": [ or null\n , \n ...\n ]\n }\n\n \"\"\"\n contents: Dict[str, Any] = {\"isContinuous\": self.is_continuous}\n if self.bin_point_cloud_fields:\n contents[\"binPointCloudFields\"] = self.bin_point_cloud_fields\n return contents\n\n\n# When the NameMixin is before Sequence[_T], typing will raise AttributeError.\n# related issue: python/typing#777\nclass DatasetBase(Sequence[_T], NameMixin): # pylint: disable=too-many-ancestors\n \"\"\"This class defines the concept of a basic dataset.\n\n DatasetBase represents a whole dataset contains several segments\n and is the base class of :class:`Dataset` and :class:`FusionDataset`.\n\n A dataset with labels should contain a :class:`~tensorbay.label.catalog.Catalog`\n indicating all the possible values of the labels.\n\n Arguments:\n name: The name of the dataset.\n gas: The :class:`~tensorbay.client.gas.GAS` client for getting a remote dataset.\n revision: The revision of the remote dataset.\n\n Attributes:\n catalog: The :class:`~tensorbay.label.catalog.Catalog` of the dataset.\n notes: The :class:`Notes` of the dataset.\n\n \"\"\"\n\n _is_fusion: bool\n\n _repr_type = ReprType.SEQUENCE\n\n def __init__(\n self, name: str, gas: Optional[\"GAS\"] = None, revision: Optional[str] = None\n ) -> None:\n super().__init__(name)\n\n if gas:\n self._client = gas.get_dataset(name, is_fusion=self._is_fusion)\n if revision:\n self._client.checkout(revision)\n else:\n self._segments: NameSortedList[_T] = NameSortedList()\n self._catalog = Catalog()\n self._notes = Notes()\n\n def __len__(self) -> int:\n return self._get_segments().__len__()\n\n @overload\n def __getitem__(self, index: Union[int, str]) -> _T:\n ...\n\n @overload\n def __getitem__(self, index: slice) -> Sequence[_T]:\n ...\n\n def __getitem__(self, index: Union[int, str, slice]) -> Union[Sequence[_T], _T]:\n if isinstance(index, str):\n return self._get_segments().get_from_name(index)\n\n return self._get_segments().__getitem__(index)\n\n def __delitem__(self, index: Union[int, str, slice]) -> None:\n if isinstance(index, slice):\n for key in self._get_segments()._data.keys()[index]:\n self._get_segments()._data.__delitem__(key)\n return\n\n if isinstance(index, int):\n index = self._get_segments()._data.keys()[index]\n\n self._get_segments()._data.__delitem__(index)\n\n @locked\n def _init_segments(self) -> None:\n self._segments = NameSortedList()\n # pylint: disable=protected-access\n for segment in self._client._list_segment_instances():\n self._segments.add(segment) # type: ignore[arg-type]\n\n def _get_segments(self) -> NameSortedList[_T]:\n if not hasattr(self, \"_segments\"):\n self._init_segments()\n\n return self._segments\n\n @property\n def catalog(self) -> Catalog:\n \"\"\"Return the catalog of the dataset.\n\n Returns:\n The :class:`~tensorbay.label.catalog.Catalog` of the dataset.\n\n \"\"\"\n if not hasattr(self, \"_catalog\"):\n self._catalog = self._client.get_catalog()\n\n return self._catalog\n\n @property\n def notes(self) -> Notes:\n \"\"\"Return the notes of the dataset.\n\n Returns:\n The class:`Notes` of the dataset.\n\n \"\"\"\n if not hasattr(self, \"_notes\"):\n self._notes = self._client.get_notes()\n\n return self._notes\n\n def keys(self) -> Tuple[str, ...]:\n \"\"\"Get all segment names.\n\n Returns:\n A tuple containing all segment names.\n\n \"\"\"\n # pylint: disable=protected-access\n return tuple(self._get_segments()._data)\n\n def load_catalog(self, filepath: str) -> None:\n \"\"\"Load catalog from a json file.\n\n Arguments:\n filepath: The path of the json file which contains the catalog information.\n\n \"\"\"\n with open(filepath, \"r\") as fp:\n contents = json.load(fp)\n self._catalog = Catalog.loads(contents)\n\n @Deprecated(since=\"v1.4.0\", removed_in=\"v1.7.0\", substitute=__getitem__)\n def get_segment_by_name(self, name: str) -> _T:\n \"\"\"Return the segment corresponding to the given name.\n\n Arguments:\n name: The name of the request segment.\n\n Returns:\n The segment which matches the input name.\n\n \"\"\"\n return self._get_segments().get_from_name(name)\n\n def add_segment(self, segment: _T) -> None:\n \"\"\"Add a segment to the dataset.\n\n Arguments:\n segment: The segment to be added.\n\n \"\"\"\n self._get_segments().add(segment)\n\n\nclass Dataset(DatasetBase[Segment]):\n \"\"\"This class defines the concept of dataset.\n\n Dataset is made up of data collected from only one sensor or data without sensor information.\n It consists of a list of :class:`~tensorbay.dataset.segment.Segment`.\n\n \"\"\"\n\n _is_fusion = False\n\n def create_segment(self, segment_name: str = \"\") -> Segment:\n \"\"\"Create a segment with the given name.\n\n Arguments:\n segment_name: The name of the segment to create, which default value is an empty string.\n\n Returns:\n The created :class:`~tensorbay.dataset.segment.Segment`.\n\n \"\"\"\n segment = Segment(segment_name)\n self._get_segments().add(segment)\n return segment\n\n\nclass FusionDataset(DatasetBase[FusionSegment]):\n \"\"\"This class defines the concept of fusion dataset.\n\n FusionDataset is made up of data collected from multiple sensors.\n It consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`.\n \"\"\"\n\n _is_fusion = True\n\n def create_segment(self, segment_name: str = \"\") -> FusionSegment:\n \"\"\"Create a fusion segment with the given name.\n\n Arguments:\n segment_name: The name of the fusion segment to create,\n which default value is an empty string.\n\n Returns:\n The created :class:`~tensorbay.dataset.segment.FusionSegment`.\n\n \"\"\"\n segment = FusionSegment(segment_name)\n self._get_segments().add(segment)\n return segment\n","sub_path":"tensorbay/dataset/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":9924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"203820474","text":"#!/usr/bin/env python\n# coding=UTF-8\n'''\nAuthor: Wei Luo\nDate: 2021-06-01 11:46:48\nLastEditors: Wei Luo\nLastEditTime: 2021-06-02 22:37:34\nNote: Note\n'''\n\nimport casadi as ca\nimport numpy as np\nfrom acados_template import AcadosModel\n\n\ndef export_uav_model():\n g_ = 9.8066\n # control input\n roll_ref_ = ca.SX.sym('roll_ref')\n pitch_ref_ = ca.SX.sym('pitch_ref')\n thrust_ref_ = ca.SX.sym('thrust_ref')\n controls = ca.vcat([roll_ref_, pitch_ref_,\n thrust_ref_])\n\n # model state\n x_ = ca.SX.sym('x')\n y_ = ca.SX.sym('y')\n z_ = ca.SX.sym('z')\n vx_ = ca.SX.sym('vx')\n vy_ = ca.SX.sym('vy')\n vz_ = ca.SX.sym('vz')\n roll_ = ca.SX.sym('roll')\n pitch_ = ca.SX.sym('pitch')\n yaw_ = ca.SX.sym('yaw')\n\n # states [p, q, v]\n states = ca.vcat([x_, y_, z_, vx_, vy_, vz_, roll_, pitch_, yaw_])\n\n # roll_gain = 2.477\n # roll_tau = 0.477\n # pitch_gain = 2.477\n # pitch_tau = 0.477\n\n roll_gain = ca.SX.sym('roll_gain')\n roll_tau = ca.SX.sym('roll_tau')\n pitch_gain = ca.SX.sym('pitch_gain')\n pitch_tau = ca.SX.sym('pitch_tau')\n\n params = ca.vcat([roll_gain, roll_tau, pitch_gain, pitch_tau])\n\n rhs = [\n vx_,\n vy_,\n vz_,\n (ca.cos(roll_) * ca.cos(yaw_) * ca.sin(pitch_) +\n ca.sin(roll_) * ca.sin(yaw_)) * thrust_ref_,\n (ca.cos(roll_) * ca.sin(pitch_) * ca.sin(yaw_) -\n ca.cos(yaw_) * ca.sin(roll_)) * thrust_ref_,\n -g_ + ca.cos(pitch_) * ca.cos(roll_) * thrust_ref_,\n (roll_gain * roll_ref_ - roll_) / roll_tau,\n (pitch_gain * pitch_ref_ - pitch_) / pitch_tau,\n 0.0\n ]\n\n f = ca.Function('f', [states, controls], [ca.vcat(rhs)])\n\n x_dot = ca.SX.sym('x_dot', len(rhs))\n f_impl = x_dot - f(states, controls)\n\n model = AcadosModel()\n model.f_expl_expr = f(states, controls)\n model.f_impl_expr = f_impl\n model.x = states\n model.xdot = x_dot\n model.u = controls\n model.p = params\n model.name = 'quadrotor'\n\n constraints = ca.types.SimpleNamespace()\n constraints.roll_min = np.deg2rad(-85)\n constraints.pitch_min = np.deg2rad(-85)\n constraints.roll_max = np.deg2rad(85)\n constraints.pitch_max = np.deg2rad(85)\n constraints.thrust_min = 0.5*g_\n constraints.thrust_max = 1.9*g_\n\n return model, constraints\n","sub_path":"itm_quadrotor_node_old/itm_nonlinear_mpc/solver_generator_scripts_acados/nmpc_acados_uav.py","file_name":"nmpc_acados_uav.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"145609253","text":"# 需求:爬取任意百度贴吧的列表页标题和链接地址; 以及每一个帖子(详情页)内的图片\n# 思路: 使用chrome浏览器中低版本的手机端,找到百度贴吧极速版的入口,使用 xpath 来提取数据\n\"\"\"\nhttps://tieba.baidu.com/mo/q---7EAD792A1FE0B613B16325D0E5802760%3AFG%3D1--1-3-0--2--wapp_1539301430297_582/m?kw=%E6%9D%8E%E6%AF%85&lp=5011&lm=&pn=20\n\n\n\n\"\"\"\nimport json\nimport sys\nimport time\nimport random\n\nfrom lxml import etree\nimport requests\n\n\nclass TiebaSpider:\n def __init__(self, tieba_name):\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (MeeGo; NokiaN9) AppleWebKit/534.13 (KHTML, like Gecko) NokiaBrowser/8.5.0 Mobile Safari/534.13'\n }\n self.start_url = 'https://tieba.baidu.com/mo/q---7EAD792A1FE0B613B16325D0E5802760%3AFG%3D1--1-3-0--2--wapp_1539301430297_582/m?kw={}&lp=5011&lm=&pn=0'.format(tieba_name)\n self.part_url = 'https://tieba.baidu.com/mo/q---7EAD792A1FE0B613B16325D0E5802760%3AFG%3D1--1-3-0--2--wapp_1539301430297_582/'\n\n def get_response(self, url):\n print(requests.utils.unquote(url))\n resp = requests.get(url, self.headers)\n # print(resp.content) # 比对请求结果和 element 中结果的区别\n return resp.content\n\n def parse_data(self, html_bytes):\n # print(html_bytes.decode('utf-8', 'ignore'))\n html = etree.HTML(html_bytes)\n # 增加处理逻辑: 控制停止爬取的节点\n a_list = html.xpath('//body/div/div[contains(@class, \"i\")]/a')\n data_list = []\n for a in a_list:\n item = {}\n # url 要拼接\n item['href'] = self.part_url + a.xpath('./@href')[0] if len(a.xpath('./@href')) > 0 else None\n item['title'] = a.xpath('./text()')[0] if len(a.xpath('./@href')) > 0 else None\n item[\"img_list\"] = self.get_img_list(item[\"href\"], [])\n # print(item)\n data_list.append(item)\n # 获取下一页的url地址\n next_url = self.part_url + html.xpath('//a[contains(text(), \"下一页\")]/@href')[0] if len(\n html.xpath('//a[contains(text(), \"下一页\")]/@href')) > 0 else None\n return data_list, next_url\n\n def get_img_list(self, detail_url, img_list):\n # 1.发送请求,获取响应\n detail_html_str = self.get_response(detail_url)\n # 2.提取数据\n detail_html = etree.HTML(detail_html_str)\n img_list += detail_html.xpath(\"//img[@class='BDE_Image']/@src\")\n\n # 详情页下一页的url地址\n next_url = detail_html.xpath(\"//a[text()='下一页']/@href\")\n next_url = self.part_url + next_url[0] if len(next_url) > 0 else None\n if next_url is not None: # 当存在详情页的下一页,请求\n return self.get_img_list(next_url, img_list)\n\n # else不用写\n img_list = [requests.utils.unquote(i).split(\"src=\")[-1] for i in img_list]\n return img_list\n\n def save_content(self, data):\n with open('tieba.txt', 'a', encoding='utf-8') as f:\n f.write(json.dumps(data, ensure_ascii=False, indent=2))\n print(data)\n print('保存成功')\n\n def run(self):\n # 获取url, 这里的url的最后一页数量不确定,所以不能 构造 url 列表\n next_url = self.start_url\n\n while next_url is not None:\n # 发送请求,获取响应\n html = self.get_response(next_url)\n\n # 数据处理\n data_list, next_url = self.parse_data(html)\n\n # 保存\n self.save_content(data_list)\n\n time.sleep(random.randint(1, 4) * 0.5)\n\n\nif __name__ == '__main__':\n # print('请在终端按照格式[python3 tieba_spider.py 贴吧名],例如[python3 tieba_spider.py \"李毅\"]运行该程序')\n # tieba_name = sys.argv[1]\n tieba_name = '武汉'\n spider = TiebaSpider(tieba_name)\n spider.run()\n","sub_path":"08_tieba_upgrade/tieba_spider.py","file_name":"tieba_spider.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"179505177","text":"'''\n File name: Gonzalez_centers.py\n Description: Classic Kcenters algorithm which picks the furthest point\n as the center\n Author: Sharvaree V\n Last modified: 16th May 2019\n Python Version: 3.5\n'''\n\nimport numpy as np\nfrom scipy.spatial import distance\nimport math\nimport csv\n\ndef max_dist(data, centers):\n distances = np.zeros(len(data)) #cumulative distance measure for all points\n for cluster_id, center in enumerate(centers):\n for point_id, point in enumerate(data):\n if distance.euclidean(point,center) == 0.0:\n distances[point_id] = -math.inf # already in cluster\n if not math.isinf(distances[point_id]):\n # add the distance\n distances[point_id] = distances[point_id] + distance.euclidean(point,center)\n # return the point which is furthest away\n return data[np.argmax(distances)]\n\ndef Gonzalez(data, num_clusters, init):\n '''\n data Data as numpy array\n num_clusters Number of clusters (k)\n init First center to initialize the algorithm\n '''\n\n\n centers = []\n centers.append(init) # initialize the first center\n while len(centers) is not num_clusters:\n centers.append(max_dist(data, centers))\n return np.array(centers)\n\n\n#dummy data_set\nfrom sklearn import cluster, datasets, mixture\nn_samples=100\nnoisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,\n noise=.05)\ndata= noisy_circles[0]\nGonzalez_centers = Gonzalez(data, num_clusters=3, init=data[0])\n#print('Cluster Centers:', Gonzalez_centers)\n","sub_path":"lib/Gonzalez_centers.py","file_name":"Gonzalez_centers.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"293203012","text":"#!/usr/bin/env python3\n\"\"\"\nThis module creates glider kmls for ooi arrays\n\"\"\"\n\n# IMPORTS\nimport os, configparser, time, sys\nfrom fnmatch import fnmatch\n\n# LOCAL IMPORTS\nimport kml_snips as kml\nimport gliderStateXML_parser as gsx\nimport argos_parser\n\n# INVARIABLES\nTRUE_STRINGS = ['TRUE','YES','ON','1']\nFALSE_STRINGS = ['FALSE','NO','OFF','0']\n\n\ndef read_config(config_file):\n\n config = configparser.ConfigParser(inline_comment_prefixes=('#',))\n config.optionxform = str # make keys case sensitive\n config.read(config_file)\n\n ## load-in default icons from file ##\n try:\n config.read([config['DEFAULT']['icon_ini'],config_file])\n except KeyError:\n config.read(['config/default_icons.ini',config_file])\n\n ## GROUP CONFIG FILE INTO GLIDERS, MOORINGS, AND SETTINGS ##\n output = {}\n for section in config.sections():\n if section == 'DEFAULT': continue\n\n ## ESTABLISHING DEFAULT FILENAME ##\n if 'file_name' not in config[section].keys() or config[section]['file_name']=='':\n # defaults to .kml\n fname = os.path.basename(config_file)\n doti = fname.rindex('.')\n fname = fname[:doti] + '.kml'\n config[section]['file_name'] = fname\n\n array = dict(name=section,gliders={},moorings={},lines={},settings={})\n for key,val in config[section].items():\n if fnmatch(val,'??05MOAS-?????/D?????') or fnmatch(val,'gliders1/*'):\n array['gliders'][key] = dict(name=key, refdes=val)\n elif len(val.split(' ')) == 2: #startswith(('FLMA','FLMB','HYPM','SUMO')) or fnmatch(key,'PM??'):\n lat,lon = val.split()\n array['moorings'][key] = dict(name=key, lat=lat,lon=lon)\n elif len(val.split(',')) >= 2: # paths\n pairs = val.split(',')\n coords_list = []\n for p in pairs:\n lon,lat = p.strip().split(' ')\n coord = dict(name=key, lat=float(lat),lon=float(lon))\n coords_list.append( coord )\n array['lines'][key] = coords_list\n else:\n if val.upper() in FALSE_STRINGS: val = False\n elif val.upper() in TRUE_STRINGS: val = True\n array['settings'][key] = val\n\n ## DEFAULTS VALUES OF UN-SPECIFIED SETTINGS ##\n #TODO inheritable defaults .ini file\n settings = array['settings']\n options = array['settings'].keys()\n if 'datapath' not in options:\n settings['datapath'] = '.'\n if 'tail_length' not in options:\n settings['tail_length'] = 'inf'\n if 'time_scrolling' not in options:\n settings['time_scrolling'] = False\n if 'show_by_default' not in options:\n settings['show_by_default'] = True\n if 'rich_bubbletext' not in options:\n settings['rich_bubbletext'] = True\n if 'argos_start' not in options:\n settings['argos_start'] = 'inf' #furthest back\n if 'argos_end' not in options:\n settings['argos_start'] = 0 #most recent\n if 'argos_src' not in options:\n settings['argos_src'] = None\n if 'greyout_timeout' not in options:\n settings['greyout_timeout'] = 'inf'\n\n ## ORDER ARRAYS BY OUTPUT FILENAME ##\n try:\n output[ array['settings']['file_name'] ].append(array)\n except KeyError:\n output[ array['settings']['file_name'] ] = [array]\n\n return output\n\ndef make_lines_pmarks(lines, settings):\n pmarks,styles = [],[] \n \n for name,line in lines.items():\n path = kml.LineString(line)\n styleUrl= 'PATH_STYLE_'+name\n try: color = possible_custom_setting(name, 'color', settings)\n except: color = 'white'\n color = kml.HEXCOLOR[color.lower()]\n style=kml.Style(id=styleUrl, LineStyle=kml.LineStyle(color))\n pmark = kml.Placemark(name=name,\n geometry=path,\n styleUrl=styleUrl )\n pmarks.append(pmark)\n styles.append(style)\n return pmarks, styles\n\ndef make_mooring_pmarks(moorings, settings):\n\n pmarks,styles = [],[]\n for mooring in sorted(moorings.values(), key=lambda m: m['name']):\n point = kml.Point(lat=mooring['lat'],\n lon=mooring['lon'])\n\n # assemble mooring icon\n iconURL = possible_custom_setting(mooring['name'], 'mooring_icon', settings)\n styleUrl = 'MOORING_STYLE_'+iconURL[iconURL.rindex('/')+1:]\n style = kml.Style(IconStyle=kml.IconStyle(iconhref=iconURL,scale=1.2,\n hotSpot = kml.IconStyle.hotSpot(y=0.05)), #put icon anchor near bottom center\n BalloonStyle=kml.BalloonStyle(None),\n id=styleUrl)\n lookat = kml.LookAt(range=230000,lat=mooring['lat'],lon=mooring['lon'])\n p = kml.Placemark(name=mooring['name'],\n geometry=point,\n LookAt = lookat,\n styleUrl=styleUrl)\n\n pmarks.append(p)\n styles.append(style)\n styles = list(set(styles))\n return pmarks,styles\n\ndef possible_custom_setting(name,setting,settings):\n try:\n # if custom setting exists use that, else use default\n possible_setting = '{} {}'.format(name,setting)\n settings[possible_setting] # test if key exists\n setting = possible_setting\n except: pass\n setting_val = settings[setting]\n\n # if setting is an icon but not a URL, pre-pend path to web hosted icon_folder\n if '_icon' in setting and not setting_val.startswith('http'):\n return os.path.join(settings['icon_folder'], setting_val)\n\n try: return float(setting_val)\n except: return setting_val\n\n\n\ndef make_glider_features(gliders,settings):\n glider_pmarks, glider_surfacings, glider_trails, glider_wpts, glider_argos, styles = [],[],[],[],[],[]\n for glider in sorted(gliders.values(), key=lambda g: g['name']):\n print(' '+glider['name'])\n gstate_path = os.path.join(settings['datapath'],glider['refdes'],'gliderState.xml')\n\n try:\n try: coords, script, wpt, lastabort = gsx.parse(gstate_path)\n except ValueError:\n coords, script, wpt, lastabort = gsx.alt_parse(gstate_path)\n except FileNotFoundError:\n coords, script, wpt, lastabort = gsx.alt_parse(gstate_path)\n except Exception as e:\n print('make_glider_feature() error',type(e),e)\n continue\n except FileNotFoundError as e1:\n print('FNFE:',e1)\n continue\n except IndexError as e1:\n print('FNFE:',e1)\n continue\n\n ### hack to remove one known bad datapoint from one glider ###\n if glider['name'] == 'ga_578':\n for coord in coords:\n if int(coord['lon']) == -144 and int(coord['lat']) == 50:\n coords.remove(coord)\n break\n\n ### GLIDER PMARK ###\n t = coords[-1]['epoch']\n if wpt is None: bearing = None\n else: bearing = wpt['bearing']\n\n style,styleUrl = get_glider_icon_style(settings, bearing, glider['name'],t)\n desc = glider_desc(glider,coords[-1],script,lastabort)\n lookat = kml.LookAt(lat=coords[-1]['lat'],lon=coords[-1]['lon'],range=230000)\n pmark = kml.Placemark(name=glider['name'], # last coord is most recent\n geometry=kml.Point(lat=coords[-1]['lat'],\n lon=coords[-1]['lon']),\n styleUrl=styleUrl,\n description=desc,\n LookAt=lookat,\n Snippet=kml.Snippet(None))\n glider_pmarks.append(pmark)\n styles.append(style)\n\n\n ### GLIDER SURFACINGS ###\n surfmarks = []\n iconURL = possible_custom_setting(glider['name'], 'glider_icon_surf', settings)\n surfacing_style_mousover = kml.Style(IconStyle=kml.IconStyle(iconhref=iconURL, scale=0.8),\n BalloonStyle=kml.BalloonStyle(None))\n surfacing_style_normal = kml.Style(IconStyle=kml.IconStyle(iconhref=iconURL, scale=0.2),\n LabelStyle=kml.LabelStyle(scale=0))\n styleUrl = 'Surfacing_'+iconURL\n surfacing_style = kml.StyleMap(styleUrl, surfacing_style_normal, surfacing_style_mousover)\n styles.append(surfacing_style)\n limit = det_tail_length(settings, 'tail_length', glider['name'], coords)\n for coord in coords[-limit:-1]:\n surface_dialogue_count = coord['dupes']\n name = '{} {}'.format(glider['name'], coord['timestamp'])\n if possible_custom_setting(glider['name'],'time_scrolling',settings):\n ts = kml.TimeStamp(coord['epoch'])\n else: ts = None\n surfmark = kml.Placemark(name=name, styleUrl=surfacing_style.id,\n geometry=kml.Point(lat=coord['lat'],\n lon=coord['lon']),\n time = ts)\n surfmarks.append(surfmark)\n glider_surfacings.append(surfmarks)\n\n\n ### GLIDER TRAIL ###\n styleUrl = 'gtrail'\n limit = det_tail_length(settings, 'tail_length', glider['name'], coords)\n style = kml.Style(id=styleUrl, LabelStyle=kml.LabelStyle(scale=0),\n LineStyle=kml.LineStyle(kml.HEXCOLOR['yellow']))\n styles.append(style)\n time_scrolling = possible_custom_setting(glider['name'],'time_scrolling',settings)\n trailmark = kml.Placemark(name = glider['name']+' Trail',\n styleUrl=styleUrl,\n geometry=kml.LineString(coords[-limit:]))\n glider_trails.append(trailmark)\n\n # # TIME TRAILS are not really working, probably due to glidertrails not being properly injested later\n # for i in range(len(coords[-limit:])-1):\n # geom = kml.LineString(coords[i:i+3]) # returns two consecutive coords\n # tspan = kml.TimeSpan(coords[i]['epoch'],coords[i+1]['epoch'])\n # trailmark = kml.Placemark(name=glider['name'] + ' Trail' + str(i),\n # styleUrl=styleUrl,\n # geometry=geom,\n # time=tspan)\n # glider_trails.append(trailmark)\n\n\n ### GLIDER WPT ###\n if wpt is None:\n glider_wpts.append(None)\n else:\n name=\"{}'s Next Waypoint\".format(glider['name'])\n desc = kml.xml('pre','Lat:{:+.3f}\\nLon:{:+.3f}'.format(wpt['lat'],wpt['lon']))\n iconURL = possible_custom_setting(glider['name'], 'nextwpt_icon', settings)\n styleUrl = 'NextWpt_'+iconURL\n nextwpt_style_mouseover = kml.Style(IconStyle=kml.IconStyle(iconhref=iconURL),\n BalloonStyle=kml.BalloonStyle(None))\n nextwpt_style_normal = kml.Style(IconStyle=kml.IconStyle(iconhref=iconURL),\n LabelStyle=kml.LabelStyle(scale=0))\n nextwpt_style = kml.StyleMap(styleUrl, nextwpt_style_normal, nextwpt_style_mouseover)\n styles.append(nextwpt_style)\n wptmark = kml.Placemark(styleUrl=nextwpt_style.id, name=name, description=desc,\n geometry=kml.Point(lat=wpt['lat'],lon=wpt['lon']),\n Snippet=kml.Snippet(None))\n glider_wpts.append(wptmark)\n\n\n ### GLIDER ARGOS ###\n if settings['argos_src']:\n argos_hits = argos_parser.main(settings['argos_src'],glider['name'])\n argos_pmarks = []\n iconURL = possible_custom_setting(glider['name'], 'argos_icon', settings)\n argos_style = kml.Style(id='Argos_'+iconURL,\n IconStyle=kml.IconStyle(iconhref=iconURL, scale=1),\n LabelStyle=kml.LabelStyle(scale=0),\n BalloonStyle=kml.BalloonStyle(None))\n styles.append(argos_style)\n start_limit = det_tail_length(settings, 'argos_start', glider['name'], argos_hits)\n end_limit = det_tail_length(settings, 'argos_end', glider['name'], argos_hits)\n for argos_hit in argos_hits[-start_limit:-end_limit]:\n argos_pmark = kml.Placemark(name=glider['name']+' Argos_Hit '+argos_hit['timestamp'],\n styleUrl=argos_style.id, visibility = 0,\n description = argos_hit['desc'],\n Snippet = kml.Snippet(None),\n geometry=kml.Point(lat=argos_hit['lat'],\n lon=argos_hit['lon']))\n argos_pmarks.append(argos_pmark)\n glider_argos.append(argos_pmarks)\n\n styles = list(set(styles))\n return glider_pmarks, glider_surfacings, glider_trails, glider_wpts, glider_argos, styles\n\ndef glider_desc(glider, coord, script, lastabort):\n\n refdes = '{} \\n\\n'.format(glider['refdes'])\n gps_str = 'GPS HIT: {:+.3f}N {:+.3f}E\\nHIT TIME: {}\\n\\n'.format(coord['lat'],coord['lon'],coord['timestamp'])\n\n try:\n if script['status'] == '0': status = 'paused'\n elif script['status'] == '1': status = 'on'\n else:status = 'unknown. status_code='+script['status']\n script_str = 'SCRIPT: {}\\nSTATUS: {}\\n\\n'.format(script['name'],status)\n except TypeError:\n script_str = '' # for if script is None\n\n try:\n abort_str = 'LAST ABORT \\nTIMESTAMP: {ts}\\nMISSION: {mis}\\nSEGMENT: {seg}\\nREASON: {why}\\nTOTAL: {tot}\\n\\n'\n abort_str = abort_str.format(tot=lastabort['reset_num'],\n ts=lastabort['timestamp'],\n mis=lastabort['mission'],\n seg=lastabort['segment'],\n why=lastabort['type'])\n except TypeError:\n abort_str = '' # for if lastabort is None\n\n ts = time.strftime('%Y-%m-%d %H:%M:00 EST',time.localtime(time.time()))\n footer = '(DATA CURRENT AS OF {}) '.format(ts)\n desc = refdes + gps_str + script_str + abort_str + footer\n desc = '{} '.format(desc)\n return kml.CDATA_wrap(desc)\n\ndef det_tail_length(settings, setting, glidername=None, coords=None ):\n\n limit = possible_custom_setting(glidername, setting,settings)\n\n #num of unique surfacing locations\n if isinstance(limit,(int,float)):\n try: return int(limit)+1\n except OverflowError: return 0 # in-case limit == 'inf'\n\n # DAYS AGO\n if 'day' in limit.lower():\n limit = ''.join(char for char in limit if char.isdigit() or char=='.') # strip limit of non-numbers\n limit = time.time()-int(limit)*24*60*60 # epoch limit\n limit = time.strftime('%Y-%m-%d',time.gmtime(limit))\n\n #DATE\n try:\n for i,coord in enumerate(reversed(coords)):\n if coord['timestamp'] < limit:\n return i\n except Exception as e: print(type(e),e,type(coords))\n\n return 0 # infinite\n\ndef get_glider_icon_style(settings, heading=None, glidername=None, glider_epoch=float('-inf')):\n\n try:\n heading = int(heading)\n if 0 <= heading%360 < 180:\n icon = 'glider_icon_R'\n offset = heading -90-30 # TODO adjust to be more exact (based on latitude??)\n else: # 180 <= heading%360 < 360:\n icon = 'glider_icon_L'\n offset = heading +90-5 # TODO adjust to be more exact (based on latitude??)\n if glider_epoch < time.time()-60*60*possible_custom_setting(glidername,'greyout_timeout',settings):\n icon = 'glider_icon_grey'\n offset = 0\n iconURL = possible_custom_setting(glidername, icon, settings)\n except TypeError as e:\n #print(type(e),e)\n print('no-heading')\n iconURL = possible_custom_setting(glidername,'glider_icon_tail',settings)\n heading,offset = 0,0\n\n styleUrl = iconURL[iconURL.rindex('/')+1:]+str(heading)\n style = kml.Style(id=styleUrl,\n BalloonStyle=kml.BalloonStyle(None),\n IconStyle=kml.IconStyle(heading=offset,\n iconhref=iconURL,\n scale=1.5))\n return style, styleUrl\n\n#TODO fetchin argos data\n\n#TODO Low Bandwidth ini, past deployment ini\n\n#TODO touring object (is by id possible?)\n\n#TODO find surfacing of last abort and make it stand out somehow,\n# click on link in ballon abort text -> fly to view perhaps?\n\n#TODO KMZ\n#TODO show_by_default\n#TODO rich_bubbletext\n# brush up on that http!\n#TODO UNIT_TESTS\n#TODO propper logging\n\n\n######## MAIN SCRIPT #########\n\ndef main(configs, output_kml_file):\n\n array_folders = []\n styles = []\n for array in sorted(configs, key=lambda arr: arr['name']):\n print(' '+array['name'].upper().strip())\n\n # CREATE KML PLACEMARKS AND STYLES #\n lines_pmarks, \\\n lines_styles = make_lines_pmarks(array['lines'], array['settings'])\n \n mooring_pmarks,\\\n mooring_styles = make_mooring_pmarks(array['moorings'], array['settings'])\n \n glider_pmarks,\\\n glider_surfacings,\\\n glider_trails,\\\n glider_wpts,\\\n glider_argos,\\\n glider_styles = make_glider_features(array['gliders'],array['settings'])\n\n # CREATE FOLDER STRUCTURE #\n if array['settings']['show_by_default']:\n array_folder = kml.Folder(name=array['name'], open=1)\n else:\n array_folder = kml.Folder(name=array['name'], visibility=0)\n more_folder = kml.Folder(name='more...',\n style=kml.Style(ListStyle=kml.checkOffOnly_ListStyle))\n surfacings_folders = []\n argos_folders = []\n for glider in glider_pmarks:\n if glider_surfacings:\n surfacings_folder = kml.Folder(name=glider.name + \" Surfacings\")\n surfacings_folders.append(surfacings_folder)\n if glider_argos:\n argos_folder = kml.Folder(name=glider.name +\" Argos\", visibility=0)\n argos_folders.append(argos_folder)\n \n # hotpatch quickfix #\n # no next-wpt or surfacings for archive #\n if array['settings']['file_name'] == 'archive.kml':\n glider_wpts, glider_surfacings, surfacings_folders = [], [], []\n more_folder = kml.Folder(name='more...')\n\n # FILLING FOLDERS WITH PLACEMARKS # ##ARCHIVE ARRAY MODE\n array_folder.extend(glider_pmarks)\n array_folder.extend(mooring_pmarks)\n array_folder.append(more_folder)\n more_folder.extend(glider_wpts)\n more_folder.extend(glider_trails)\n more_folder.extend(surfacings_folders)\n more_folder.extend(lines_pmarks)\n ##more_folder.extend(glider_pmarks)\n ##more_folder.extend(surfacings_folders)\n for i, glider in enumerate(glider_pmarks):\n if surfacings_folders: surfacings_folders[i].extend(glider_surfacings[i])\n if glider_argos: argos_folders[i].extend(glider_argos[i])\n ##array_folder.extend(glider_trails)\n ##array_folder.append(more_folder)\n\n array_folders.append(array_folder)\n styles.extend(mooring_styles + glider_styles + lines_styles)\n\n #print('TOTAL_STYLES list:', len(styles), ' set:', len(set(styles)))\n # remove duplicates and sort kml's shared styles\n styles = sorted(list(set(styles)))\n\n # Assembly of all Features and Containers into the full document\n doc = kml.kml(kml.Document(*styles + array_folders, open=1))\n\n # outputting the doc!\n if output_kml_file:\n doc.save(output_kml_file)\n print(output_kml_file, '...done!')\n else:\n doc.pprint()\n\n ## CREATE NETWORK LINK FILE IF NETLINK SETTING SET\n try:\n # We don't want the network link to load a monstrous amount of data all at once\n if any([array['settings']['show_by_default'] is False for array in configs]):\n chekov_style = kml.Style(ListStyle=kml.checkOffOnly_ListStyle)\n else: chekov_style = None\n\n targetURL = config[0]['settings']['netlink']\n netdoc = kml.kml(kml.NetworkLink(kml.Link(targetURL, refreshInterval=60),\n chekov_style))\n doti = output_kml_file.rindex('.')\n output_netlink_file = output_kml_file[:doti] + '.lnk' + output_kml_file[doti:]\n netdoc.save(output_netlink_file)\n print(output_netlink_file, '...done!')\n except KeyError: pass\n except Exception as e: print('make netlink:',type(e),e)\n\n\n## SCRIPT ##\nHELP_TEXT = \"\"\"This script generates one or more OOI CGSN KML files.\n\nArg1: a .ini configuration file\nArg2: (optional) the path to save the file(s) to. If not specified, prints kml to STDOUT\n if Arg2 finishes with .kml or .kmz, the 'file_name' options from the ini file will be ignored.\nBy default, the generated KML file bear the name the input ini file.\nIf the config file specifies a 'file_name', then that will be used instead.\nEach section may specify their own file_name and in such case a new file will be generated for the given section.\nIf 'netlink' is assigned a url in the ini file, a similarly named NetworkLink file will also be created.\n\nSee 'example.ini' for an example ini file.\n\"\"\"\nif __name__ == '__main__':\n\n try:\n input_config_file = sys.argv[1]\n if sys.argv[1] in ['-h', '--help']:\n print(HELP_TEXT)\n sys.exit()\n except: print(HELP_TEXT); sys.exit()\n\n try: output_path = sys.argv[2]\n except: output_path = 'output'\n\n configs = read_config(input_config_file)\n\n if output_path.endswith('.kml') or output_path.endswith('.kmz'):\n config = list(configs.values())[0]\n main( config, output_path )\n else:\n for output_file,config in configs.items():\n output_file = os.path.join(output_path,output_file)\n main( config, output_file )\n \n \n","sub_path":"GE_factory.py","file_name":"GE_factory.py","file_ext":"py","file_size_in_byte":22594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"126443544","text":"#!/usr/bin/env python3\n# code partially taken from https://github.com/waleedgondal/Texture-based-Super-Resolution-Network\n\nimport os\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom torchvision.transforms import ToTensor\nfrom torchvision.models import vgg19\n\nclass VGG(nn.Module):\n\n def __init__(self, layers=(0)):\n super(VGG, self).__init__()\n self.layers = layers\n self.model = vgg19(pretrained=True).features\n for param in self.model.parameters():\n param.requires_grad = False\n\n def forward(self, x):\n features = []\n for name, layer in enumerate(self.model):\n x = layer(x)\n if name in self.layers:\n features.append(x)\n if len(features) == len(self.layers):\n break\n return features\n\ndef distance(im1, im2, cuda=False):\n vgg_layers = [int(i) for i in opt.texture_layers]\n vgg_texture = VGG(layers=vgg_layers)\n if cuda:\n vgg_texture = vgg_texture.cuda()\n\n def gram_matrix(y):\n (b, ch, h, w) = y.size()\n features = y.view(b, ch, w * h)\n features_t = features.transpose(1, 2)\n gram = features.bmm(features_t) / ch\n return gram\n\n def criterion(a, b):\n return torch.mean(torch.abs((a-b)**2).view(-1))\n\n text_loss = []\n vgg1 = vgg_texture.forward(im1)\n vgg2 = vgg_texture.forward(im2)\n gram1 = [gram_matrix(y) for y in vgg1]\n gram2 = [gram_matrix(y) for y in vgg2]\n\n for m in range(0, len(vgg1)):\n text_loss += [criterion(gram1[m], gram2[m])]\n\n loss = torch.log(sum(text_loss))\n return loss.item()\n\ndef load_img(filepath):\n from PIL import Image\n img = Image.open(filepath).convert('RGB')\n return torch.stack([ToTensor()(img)])\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('im1')\n parser.add_argument('im2')\n parser.add_argument('--texture_layers', nargs='+', default=['8','17','26','35'], help='vgg layers for texture. Default:[]')\n parser.add_argument('--cuda', type=int, default=0, help='Try to use cuda? Default=1')\n opt = parser.parse_args()\n\n cuda = False\n if opt.cuda:\n if torch.cuda.is_available():\n cuda = True\n torch.cuda.manual_seed(opt.seed)\n else:\n cuda = False\n print('===> Warning: failed to load CUDA, running on CPU!')\n\n im1 = load_img(opt.im1)\n im2 = load_img(opt.im2)\n print(distance(im1, im2))\n\n","sub_path":"compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"608759524","text":"from .record import Record\n\nclass SPF(Record):\n\n def __init__(self, *args, **kwargs):\n self.default = kwargs.pop('default', None)\n kwargs['type'] = 'TXT'\n super(SPF, self).__init__(*args, **kwargs)\n self.data = list(self.data)\n\n def add(self, type, spec=None, action=''):\n if type not in ('all', 'include', 'a', 'mx', 'ptr', 'ip4', 'ip6', 'exists'):\n raise ValueError('Bad SPF type.')\n if action not in ('', '+', '-', '?', '~'):\n raise ValueError('Bad SPF action.')\n if spec:\n self.data.append('%s%s:%s' % (action, type, spec))\n else:\n self.data.append('%s%s' % (action, type))\n\n def dumps(self):\n\n if not (self.data or self.default):\n return ''\n\n parts = ['v=spf1']\n parts.extend(self.data)\n if self.default:\n parts.append('%sall' % self.default)\n\n return super(SPF, self).dumps(data=[' '.join(parts)])\n\n","sub_path":"zones/spf.py","file_name":"spf.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"14171902","text":"\"\"\"\nStack implemented with a resizing List\n\"\"\"\nfrom .stack import Stack\n\nclass StackWithList(Stack):\n def __init__(self):\n \"\"\"\n Using a pre-initilized List to store the items in the Stack in order to illistrate the resizing logic\n \"\"\"\n super(StackWithList, self).__init__()\n self._items = [None]\n\n def __iter__(self):\n return iter(reversed(filter(None, self._items[:self.size])))\n\n def _do_push(self, item):\n self._resize_if_nessisary()\n self._items.insert(self.size, item)\n self._size += 1\n\n def _do_pop(self):\n top_index = self.size - 1\n item = self._items[top_index]\n self._items[top_index] = None\n\n self._size -= 1\n self._resize_if_nessisary()\n return item\n\n def _get_top(self):\n return self._items[self.size - 1]\n\n def _resize_if_nessisary(self):\n max_items = len(self._items)\n\n if self.size == max_items:\n resize_factor = .5\n elif self.size == (max_items / 4):\n resize_factor = 2\n else:\n return\n\n new_items = [None] * int(max_items * resize_factor)\n\n for index in range(self.size):\n new_items[index] = self._items[index]\n\n self._items = new_items\n","sub_path":"src/data_structures/stack/stack_list.py","file_name":"stack_list.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"460493650","text":"import numpy as np\nfrom numpy import exp, log\nfrom functools import partial\nfrom scipy.special import gamma, gammaln\nfrom scipy.cluster.hierarchy import dendrogram\nimport matplotlib.pyplot as plt\n\ndef bhclust(dat, family, alpha, r = 0.001):\n \"\"\"Return a matrix in the format of linkage matrix for dendrogram\n @dat: N records of data with k columns\n @family: function to specify distribution for data. {\"multivariate\", \"bernoulli\"}\n @alpha: hyperparameter for the prior\n @r: scaling factor on the prior precision of the mean\n \"\"\"\n N, k = dat.shape\n la = log(alpha)\n\n if family == \"multivariate\":\n m = np.mean(dat, axis=0).reshape(k, 1)\n S = np.cov(dat.T)/10 # precision?\n def mlfunc(X):\n return niw(X, m, S, r)\n\n elif family == \"bernoulli\":\n #r=0.01\n m = np.mean(np.vstack((dat, np.ones(k)*r, np.zeros(k))), axis=0)\n alp= m*2; beta=(1-m)*2\n mlfunc = partial(bb, alp=alp, beta=beta)\n\n # leaf nodes\n SS = list(range(N))\n x0 = []; d0 = [la] * N\n ml = []\n for l in range(N):\n x0.append((l,))\n ml.append(mlfunc(dat[l,].reshape(1,k)))\n\n # paired base cases\n t = 0; PP = []\n c1 = []; c2 = []\n x = []; d = []\n lp1 = []; lp2 = []; lodds = []\n for i in range(N-1):\n for j in range(i+1, N):\n c1.append(i); c2.append(j)\n x.append(x0[i]+x0[j])\n u = la + gammaln(len(x[t]))\n v = d0[i] + d0[j]\n d.append((u + log(1 + exp(v - u))))\n lp1.append(mlfunc(dat[x[t],:]) + la + gammaln(len(x[t])) - d[t])\n lp2.append(ml[i] + ml[j] + d0[i] + d0[j] - d[t])\n lodds.append(lp1[t] - lp2[t])\n PP.append(t); t = t + 1\n\n # build tree, Z = [leaf1, leaf2, weight, #leaves]\n p = 0\n Z = []\n dye = {}\n while(1):\n idx = lodds.index(max([lodds[y] for y in PP]))\n Z.append([c1[idx], c2[idx], 1/lodds[idx], len(x[idx])])\n if lodds[idx] < 0:\n dye[N + p] = \"#FF0000\"\n else:\n dye[N + p] = \"#0013FF\"\n\n x0.append(x[idx]); d0.append(d[idx]); ml.append(lp1[idx] + log(1+exp(lp2[idx] - lp1[idx])))\n rm = set(Z[p][:2])\n SS = [y for y in SS if y not in rm]\n if len(SS) == 0:\n break\n\n for q in SS:\n c1.append(N+p); c2.append(q)\n x.append(x0[N+p] + x0[q])\n\n u = la + gammaln(len(x[t]))\n v = d0[N+p] + d0[q]\n d.append((u + log(1 + exp(v - u))))\n lp1.append(mlfunc(dat[x[t],:]) + la + gammaln(len(x[t])) - d[t])\n lp2.append(ml[N+p] + ml[q] + d0[N+p] + d0[q] - d[t])\n lodds.append(lp1[t] - lp2[t])\n PP.append(t); t = t + 1\n\n PP = [y for y in PP if c1[y] not in rm and c2[y] not in rm]\n SS.append(N + p); p = p + 1\n\n Z_ = weighted(Z, N)\n\n return Z_, dye\n\n\ndef weighted(Z, N):\n mw = max([y[2] for y in Z])\n for i in range(len(Z)):\n if Z[i][2] < 0:\n Z[i][2] = 2 * mw\n if Z[i][0] > (N - 1):\n Z[i][2] += Z[Z[i][0] - N][2]\n if Z[i][1] > (N - 1):\n Z[i][2] += Z[Z[i][1] - N][2]\n return Z\n\n\ndef scale_matrix(X, N, k, r, m, S):\n \"\"\"Return scale matrix for the inverse-Wishart distribution on Sigma.\n @X: N records of data with k columns\n @m: prior on the mean, k * 1\n @S: prior on the covariance, k * k\n \"\"\"\n\n xsum = np.sum(X, axis = 0).reshape(k,1) # column sum\n t1 = X.T @ X\n t2 = r * N / (N + r) * (m @ m.T)\n t3 = 1/(N+r) * (xsum @ xsum.T)\n t4 = (r / (N + r)) * (m @ xsum.T + xsum @ m.T)\n\n Sprime = S + t1 + t2 - t3 - t4\n\n return Sprime\n\n\ndef niw(X, m, S, r):\n \"\"\"Return marginal likelihood for multivariate normal data using the conjugate prior distribution normal-inverse-Wishart\n @X: N records of data with k columns\n @m: prior on the mean, k * 1\n @S: prior on the covariance, k * k\n @r: scaling factor on the prior precision of the mean\n \"\"\"\n\n N, k = X.shape\n v = k\n vprime = v + N\n Sprime = scale_matrix(X, N, k, r, m, S)\n\n t1 = (2 * np.pi) ** (- N * k / 2)\n t2 = (r / (N + r)) ** (k/2)\n t3 = np.linalg.det(S) ** (v/2)\n t4 = np.linalg.det(Sprime) ** (-vprime/2)\n t5num = np.prod(gamma( (vprime - np.arange(k))/2 ) ) * (2 ** (vprime * k / 2))\n t5den = np.prod(gamma( (v - np.arange(k))/2 ) ) * (2 ** (v * k / 2))\n\n ml = t1 * t2 * t3 * t4 * (t5num/t5den)\n\n return np.log(ml)\n\ndef bb(X, alp=0.001, beta=0.01):\n \"\"\"Return marginal likelihood for bernoulli data using the conjugate prior distribution Bernoulli-Beta\n @X: N records of data with k columns\n @alpha, beta: hyperparmeter for Beta distribution\n \"\"\"\n md = np.sum(X,axis=0)\n N = X.shape[0]\n num = gammaln(alp+beta) + gammaln(alp+md) + gammaln(beta+N-md)\n den = gammaln(alp) + gammaln(beta) + gammaln(alp+beta+N)\n return np.sum(num - den)\n\n#No consider mean relates to alphas\ndef bhclust_BB(X, alpha = 0.001):\n \"\"\"Calculate P(Dk|Tk)\n Return linkage_matrix\n \"\"\"\n linkage_list = []\n linkage_list_out = []\n nk = 2\n maximum = 0.01\n dim = X.copy().shape[0]\n merge_dim = X.shape[0]\n obs_list = [i for i in range(1,dim+1)]\n dye = {}\n while (nk < dim and maximum !=0):\n maximum = 0\n for i in obs_list:\n for j in obs_list:\n if (j>i):\n if (i<=dim and j<=dim):\n s, w = i-1, j-1\n nk = 2\n prob_DTi, prob_DTj = prob_DH1(X[s]), prob_DH1(X[w])\n di, dj = alpha, alpha\n elif (i<=dim and j>dim):\n s = i-1\n w = np.array(linkage_list[j-dim-1][:2]) - 1\n nk = linkage_list[j-dim-1][3] + 1\n prob_DTi, prob_DTj = prob_DH1(X[s]), linkage_list[j-dim-1][4]\n di, dj = alpha, linkage_list[j-dim-1][5]\n elif (i>dim and j>dim):\n s = np.array(linkage_list[i-dim-1][:2])-1\n w = np.array(linkage_list[j-dim-1][:2])-1\n nk = linkage_list[i-dim-1][3] + linkage_list[j-dim-1][3]\n prob_DTi, prob_DTj = linkage_list[i-dim-1][4], linkage_list[j-dim-1][4]\n di, dj = linkage_list[i-dim-1][5], linkage_list[j-dim-1][5]\n\n Dk_tmp = np.vstack((X[s],X[w]))\n\n dk = alpha*gamma(nk)+di*dj\n\n pik = alpha*gamma(nk)/dk\n prob_DT = prob_DH1(Dk_tmp)*pik + prob_DTi * prob_DTj * di * dj / dk\n\n rk = pik*prob_DH1(Dk_tmp)/prob_DT\n if (rk > maximum):\n maximum = rk\n merge_i = i\n merge_j = j\n merge_prob_DTi = prob_DT.copy()\n merge_Dk = Dk_tmp.copy()\n merge_dk = dk\n if (maximum ==0):\n break\n if (maximum > 0.5):\n dye[merge_dim] = \"#0013FF\"\n else:\n dye[merge_dim] = \"#FF0000\"\n merge_dim+=1\n obs_list.append(merge_dim)\n\n if (merge_i) in obs_list: obs_list.remove(merge_i) #remove merged observations' idx from list\n if (merge_j) in obs_list: obs_list.remove(merge_j)\n\n X = np.vstack((X,merge_Dk))\n nk = merge_Dk.shape[0]\n linkage_list.append([merge_i, merge_j, np.log(maximum/(1-maximum)), nk, merge_prob_DTi, merge_dk])\n linkage_list_out.append([merge_i-1, merge_j-1, np.log(maximum/(1-maximum)), nk])\n\n return (linkage_list_out, dye)\n\ndef prob_DH1(X, alpha=0.8, beta=0.2):\n \"\"\"Return marginal likelihood for bernoulli data using the conjugate prior distribution Bernoulli-Beta\n @X: N records of data with k columns\n @alpha, beta: hyperparmeter for Beta distribution\n \"\"\"\n md = np.sum(X,axis=0)\n N = X.shape[0]\n nominator = np.array(gamma(alpha+beta)*gamma(alpha+md))*np.array(gamma(beta+N-md))\n denominator = gamma(alpha)*gamma(beta)*gamma(alpha+beta+N)\n return np.prod(nominator/denominator)\n\ndef bb_draw(X_test):\n ttt, colorb = bhclust_BB(X=X_test)\n N = X_test.shape[0]\n Z1 = np.array(ttt)\n Z1[:,2] = 1/Z1[:,2]\n maxw = max(Z1[:,2])\n Z1[Z1[:,2] < 0,2] = 2*maxw\n for i in range(Z1.shape[0]):\n if Z1[i, 0] > (N-1):\n Z1[i, 2] += Z1[Z1[i, 0].astype(\"int\")-N, 2]\n if Z1[i,1] > (N-1):\n Z1[i,2] += Z1[Z1[i,1].astype(\"int\")-N, 2]\n\n dendrogram(Z1,link_color_func=lambda k: colorb[k])\n plt.show()\n","sub_path":"bhc/bhc.py","file_name":"bhc.py","file_ext":"py","file_size_in_byte":8642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"649207854","text":"import os\nfrom datetime import datetime\nimport time\nfrom crontab import CronTab\nfrom croniter import croniter\nimport logging\n\n\ndef child(command):\n os.system(command)\n os._exit(0)\n\n\ndef parent():\n base = datetime(datetime.now().year, datetime.now().month, datetime.now().day, datetime.now().hour,\n datetime.now().minute)\n\n logging.basicConfig(filename='logi.log', filemode='w', level=logging.DEBUG,\n format='%(asctime)s:%(name)s:%(levelname)s: %(message)s',\n )\n\n word_list=[]\n time_list = []\n iter_list = []\n counters =[]\n logging.info(\"Start file processing\")\n cron = CronTab(tabfile='task.tab')\n for word in cron:\n str_word =str(word)\n if str_word[:1] == '#':\n continue\n word_list.append(str_word)\n\n for i in range(len(word_list)):\n count=0\n for j in word_list[i]:\n if j.isalpha():\n if word_list[i][count-1:count] == '/':\n count-=1\n counters.append(count)\n break\n count+=1\n iter = croniter(word_list[i][:count], base)\n iter_list.append(iter)\n\n for j in range(len(word_list)):\n compare_time = iter_list[j].get_next(datetime)\n time_list.append(compare_time)\n\n logging.info(\"File processing is done\")\n logging.info(\"Read job(s): '{}'\".format(len(word_list)))\n size = len(word_list)\n while True:\n\n i=0\n for i in range(size):\n if time_list[i] == datetime(datetime.now().year, datetime.now().month, datetime.now().day, datetime.now().hour,datetime.now().minute):\n time_list[i] = iter_list[i].get_next(datetime)\n logging.info(\"Starting process command: '{}'\".format((word_list[i][counters[i]:])))\n pid = os.fork()\n if pid == 0:\n\n try:\n child(word_list[i][counters[i]:])\n except Exception as e:\n logging.error(e)\n logging.info(\"Finish process\")\n \n time.sleep(1)\n\nparent()\n\n\n\n\n\n\n\n","sub_path":"cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"360272728","text":"def solution(N, A):\r\n result = [0]*N\r\n max_counter = 0\r\n current_max = 0\r\n \r\n for command in A:\r\n if 1 <= command <= N:\r\n if max_counter > result[command-1]:\r\n result[command-1] = max_counter\r\n result[command-1] += 1\r\n if current_max < result[command-1]:\r\n current_max = result[command-1]\r\n else:\r\n max_counter = current_max\r\n \r\n for index in range(0,N):\r\n if result[index] < max_counter:\r\n result[index] = max_counter\r\n \r\n return result\r\n\r\ndef solution1(N, A):\r\n B = [0] * N\r\n count = 0\r\n last = 0\r\n \r\n for i in xrange(len(A)):\r\n if (A[i] > N):\r\n last = count\r\n else:\r\n B[A[i] - 1] = max(B[A[i] - 1], last) + 1\r\n count = max(count, B[A[i] - 1])\r\n \r\n for i in xrange(len(B)):\r\n B[i] = max(B[i], last)\r\n return B","sub_path":"codility/MaxCounters.py","file_name":"MaxCounters.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"270015030","text":"\"\"\"\n判断输入的正整数是不是回文数\n回文数是指将一个正整数从左往右排列和从右往左排列值一样的数\n\"\"\"\n\nnum = int(input(\"请输入数字:\"))\nreversedNum = 0\ntemp = num\n\nwhile temp > 0:\n # 取最低位数字 + 回文数\n reversedNum = reversedNum * 10 + temp % 10\n # 临时数取整\n temp = temp // 10\n\nprint(\"输入数字为%d, 反转后数字为 %d 。\" % (num, reversedNum))\nif reversedNum == num :\n print(\"%d 是回文数\" % num) \nelse:\n print(\"%d 不是回文数\" % num) \n","sub_path":"day05/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"643702762","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n#The MIT License (MIT)\n#\n#Copyright (c) <2013-2014> \n#\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n#\n#The above copyright notice and this permission notice shall be included in\n#all copies or substantial portions of the Software.\n#\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n#THE SOFTWARE.\n#\n\"\"\" Contains the automatic generic indenter \"\"\"\nimport re\nfrom pyqode.core.mode import Mode\nfrom pyqode.qt.QtCore import Qt\nfrom pyqode.qt.QtGui import QTextCursor, QKeyEvent\n\n\nclass AutoIndentMode(Mode):\n \"\"\"\n Generic indenter mode that indents the text when the user press RETURN.\n\n You can customize this mode by overriding\n :meth:`pyqode.core.AutoIndentMode._getIndent`\n \"\"\"\n #: Identifier\n IDENTIFIER = \"autoIndentMode\"\n #: Description\n DESCRIPTION = \"\"\" A basic auto indent mode that provides a basic auto\n indentation based on the previous line indentation.\n \"\"\"\n\n def __init__(self):\n super(AutoIndentMode, self).__init__()\n self.minIndent = \"\"\n\n def _getIndent(self, tc):\n \"\"\"\n Return the indentation text (a series of spaces or tabs)\n\n :param tc: QTextCursor\n\n :returns: Tuple (text before new line, text after new line)\n \"\"\"\n pos = tc.position()\n # tc.movePosition(QTextCursor.StartOfLine)\n # tc.setPosition(tc.position() - 1)\n tc.movePosition(QTextCursor.StartOfLine)\n tc.select(QTextCursor.LineUnderCursor)\n s = tc.selectedText()\n indent = re.match(r\"\\s*\", s).group()\n tc.setPosition(pos)\n if len(indent) < len(self.minIndent):\n indent = self.minIndent\n return \"\", indent\n\n def _onStateChanged(self, state):\n if state is True:\n self.editor.keyPressed.connect(self.__onKeyPressed)\n else:\n self.editor.postKeyPressed.disconnect(self.__onKeyPressed)\n\n def __onKeyPressed(self, keyEvent):\n \"\"\"\n Auto indent if the released key is the return key.\n :param keyEvent: the key event\n \"\"\"\n if keyEvent.isAccepted():\n return\n if keyEvent.key() == Qt.Key_Return or keyEvent.key() == Qt.Key_Enter:\n tc = self.editor.textCursor()\n pre, post = self._getIndent(tc)\n tc.insertText(\"%s\\n%s\" % (pre, post))\n\n # eats possible whitespaces\n tc.movePosition(tc.WordRight, tc.KeepAnchor)\n txt = tc.selectedText()\n if txt.startswith(' '):\n new_txt = txt.replace(\" \", '')\n if len(txt) > len(new_txt):\n tc.insertText(new_txt)\n\n keyEvent.accept()\n","sub_path":"pyqode/core/modes/autoindent.py","file_name":"autoindent.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"63483312","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 28 22:28:50 2021\n\n@author: admin\n\"\"\"\n\ndef take_square(num1, num2):\n dictionary = dict()\n \n for i in range(num1, num2+1):\n if i % 2 != 0:\n dictionary[i] = i ** 2\n \n return dictionary\n\ntake_square(1, 20)","sub_path":"projects/Square.py","file_name":"Square.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"427458550","text":"#Embedded file name: dogma/attributes\\utils.py\n\"\"\"\n Utility functions for dogma attributes.\n\"\"\"\nimport dogma.attributes\n\ndef GetAttributeValuesByCategoryNames(dbdogma, attributeList):\n \"\"\"\n Gets attributes by attributeCategory from the DB.\n :param dbdogma: dogma schema object from DB2\n :param attributeList: dict of key:attributeID and item:value\n :return attributesByCategories: dict with key:categoryName and item:dict with key:attributeID and item:value.\n \"\"\"\n categories = dbdogma.AttributeCategories_Select().Index('categoryID')\n attributesByCategories = {}\n for attributeID, value in attributeList.iteritems():\n attribute = dogma.attributes.GetAttribute(attributeID)\n categoryName = categories[attribute.categoryID].categoryName\n if categoryName not in attributesByCategories:\n attributesByCategories[categoryName] = []\n attributesByCategories[categoryName].append((attributeID, attribute.attributeName, value))\n\n for category, attributes in attributesByCategories.iteritems():\n attributesByCategories[category] = sorted(attributes, key=lambda x: x[1])\n\n return attributesByCategories\n\n\ndef GetDisplayNamesForAttributeList(attributeList):\n \"\"\"\n Gets display names for a list (or dict) of attributes.\n If no display name exists it gets the default name.\n :param attributeList: list or dict containing attributes\n :return attributeNames: list of names\n \"\"\"\n attributeNames = []\n for attribute in attributeList:\n name = dogma.attributes.GetDisplayName(attribute)\n attributeNames.append(name)\n\n return attributeNames\n","sub_path":"dogma/attributes/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"20080054","text":"import contextlib\nimport gc\nimport os\nimport signal\nfrom unittest.mock import DEFAULT, MagicMock, Mock, call, patch\n\nimport pytest\n\nimport aio\nimport aio.loop._priv\nfrom aio.interfaces import Clock, Handle, LoopPolicy, LoopStopped\nfrom aio.loop.pure import BaseEventLoop, BaseLoopRunner\nfrom aio.loop.pure.scheduler import Scheduler\nfrom tests.utils import mock_wraps\n\n\ndef process_callback_exception(exc, **__) -> None:\n if isinstance(exc, AssertionError):\n raise exc\n\n import traceback\n\n traceback.print_exception(type(exc), exc, exc.__traceback__)\n pytest.fail(\"No unhandled exceptions is allowed inside callbacks during testing\")\n\n\n@pytest.fixture\ndef clock():\n clock = MagicMock(Clock, name=\"clock\")\n clock.now.return_value = 50.0\n clock.resolution.return_value = 0.1\n return clock\n\n\n@pytest.fixture\ndef selector(clock):\n selector = Mock(name=\"selector\")\n\n def selector_select(time_):\n if time_ is None:\n return []\n clock.now.return_value += time_\n return []\n\n selector.select = Mock(wraps=selector_select)\n return selector\n\n\n@pytest.fixture\ndef loop_policy(selector, clock):\n @contextlib.contextmanager\n def create_loop():\n yield BaseEventLoop(selector, clock=clock)\n\n policy = Mock(LoopPolicy)\n policy.create_loop = create_loop\n policy.create_loop_runner = lambda loop: BaseLoopRunner(loop)\n policy.create_networking.side_effect = RuntimeError(\"Forbidden\")\n policy.create_executor.side_effect = RuntimeError(\"Forbidden\")\n\n with patch.object(aio.loop._priv.loop_global_cfg, \"policy\", policy, create=True):\n yield policy\n\n\nclass TestLoopStepping:\n @pytest.fixture\n def make_loop(self, selector, clock):\n return lambda scheduler: BaseEventLoop(\n selector,\n clock=clock,\n scheduler=scheduler,\n exception_handler=process_callback_exception,\n )\n\n def test_runs_io_callbacks(self, selector, make_loop):\n # TODO\n pass\n\n def test_runs_only_expired_cbs(self, clock, selector, make_loop):\n parent_cb = Mock()\n cb0 = parent_cb.cb0\n cb1 = parent_cb.cb1\n scheduler = Scheduler([], [Handle(55.0, cb0), Handle(60.0, cb1)])\n\n make_loop(scheduler).run_step()\n\n assert selector.mock_calls == [call.select(5.0)]\n assert scheduler.get_items() == [Handle(60.0, cb1)]\n assert parent_cb.mock_calls == [\n call.cb0(),\n ]\n assert clock.now() == 55.0\n\n def test_dont_runs_pending_if_cancelled(self, clock, selector, make_loop):\n parent_cb = Mock()\n cb1 = parent_cb.cb1\n cb2 = parent_cb.cb2\n handle1 = Handle(None, cb1, cancelled=True)\n handle2 = Handle(60.0, cb2)\n scheduler = Scheduler([handle1], [handle2])\n\n make_loop(scheduler).run_step()\n\n assert selector.mock_calls == [call.select(10.0)]\n assert scheduler.get_items() == []\n assert parent_cb.mock_calls == [\n call.cb2(),\n ]\n assert clock.now() == 60.0\n\n def test_dont_runs_enqueued_if_cancelled(self, clock, selector, make_loop):\n parent_cb = Mock()\n cb1 = parent_cb.cb1\n cb2 = parent_cb.cb2\n handle1 = Handle(55.0, cb1, cancelled=True)\n handle2 = Handle(60.0, cb2)\n scheduler = Scheduler([], [handle1, handle2])\n\n make_loop(scheduler).run_step()\n\n assert selector.mock_calls == [call.select(10.0)]\n assert scheduler.get_items() == []\n assert parent_cb.mock_calls == [\n call.cb2(),\n ]\n assert clock.now() == 60.0\n\n def test_dont_runs_pending_if_cancelled_during_select(self, clock, selector, make_loop):\n parent_cb = Mock()\n cb1 = parent_cb.cb1\n cb2 = parent_cb.cb2\n handle1 = Handle(55.0, cb1)\n handle2 = Handle(60.0, cb2)\n scheduler = Scheduler([], [handle1, handle2])\n selector.select.side_effect = (\n # returning DEFAULT force mock to proceed to call 'wraps' object\n lambda *_: (handle1.cancel() or DEFAULT)\n )\n\n make_loop(scheduler).run_step()\n\n assert selector.mock_calls == [call.select(5.0)]\n assert scheduler.get_items() == [handle2]\n assert parent_cb.mock_calls == []\n assert clock.now() == 55.0\n\n @pytest.mark.parametrize(\"same_time_events_count\", [2, 3, 4, 5])\n def test_runs_only_expired_cbs_have_same_time_events(\n self, clock, selector, make_loop, same_time_events_count\n ):\n parent_cb = Mock()\n cbs = [getattr(parent_cb, f\"cb{i}\") for i in range(same_time_events_count)]\n last_cb = parent_cb.cb2\n scheduler = Scheduler([], [Handle(55.0, cb) for cb in cbs] + [Handle(60.0, last_cb)])\n\n make_loop(scheduler).run_step()\n\n assert scheduler.get_items() == [Handle(60.0, last_cb)]\n assert parent_cb.mock_calls == [\n getattr(call, f\"cb{i}\")() for i in range(same_time_events_count)\n ]\n assert selector.mock_calls == [call.select(5.0)]\n assert clock.now() == 55.0\n\n def test_runs_only_expired_cbs2(self, clock, selector, make_loop):\n parent_cb = Mock()\n cb0 = parent_cb.cb0\n cb1 = parent_cb.cb1\n scheduler = Scheduler([], [Handle(55.0, cb0), Handle(60.0, cb1)])\n loop = make_loop(scheduler)\n\n loop.run_step()\n assert selector.mock_calls == [call.select(5.0)]\n assert clock.now() == 55.0\n assert parent_cb.mock_calls == [\n call.cb0(),\n ]\n\n loop.run_step()\n assert scheduler.get_items() == []\n assert parent_cb.mock_calls == [\n call.cb0(),\n call.cb1(),\n ]\n assert selector.mock_calls == [call.select(5.0), call.select(5.0)]\n assert clock.now() == 60.0\n\n def test_runs_only_expired_cbs3(self, clock, selector, make_loop):\n parent_cb = Mock()\n cb0 = parent_cb.cb0\n cb1 = parent_cb.cb1\n cb2 = parent_cb.cb2\n scheduler = Scheduler([], [Handle(55.0, cb0), Handle(60.0, cb1), Handle(65.0, cb2)])\n loop = make_loop(scheduler)\n\n for i in range(3):\n loop.run_step()\n\n assert scheduler.get_items() == []\n assert parent_cb.mock_calls == [\n call.cb0(),\n call.cb1(),\n call.cb2(),\n ]\n assert selector.mock_calls == [\n call.select(5.0),\n call.select(5.0),\n call.select(5.0),\n ]\n assert clock.now() == 65.0\n\n def test_runs_pending_cbs_immediately(self, clock, selector, make_loop):\n parent_cb = Mock()\n cb0 = parent_cb.cb0\n cb1 = parent_cb.cb1\n scheduler = Scheduler([Handle(None, cb0), Handle(None, cb1)])\n loop = make_loop(scheduler)\n\n loop.run_step()\n\n assert scheduler.get_items() == []\n assert parent_cb.mock_calls == [call.cb0(), call.cb1()]\n assert selector.mock_calls == [call.select(0)]\n assert clock.now() == 50.0\n\n def test_runs_only_pending_cbs(self, selector, clock, make_loop):\n cb1 = Mock()\n cb2 = Mock()\n scheduler = Scheduler([Handle(None, cb1)], [Handle(60, cb2)])\n loop = make_loop(scheduler)\n\n loop.run_step()\n\n assert scheduler.get_items() == [Handle(60, cb2)]\n assert cb1.mock_calls == [call()]\n assert cb2.mock_calls == []\n assert selector.mock_calls == [call.select(0)]\n assert clock.now() == 50\n\n @pytest.mark.parametrize(\"now\", [0.0, 15.0])\n def test_executes_handle_eagerly_if_time_less_clock_resolution(\n self, selector, clock, make_loop, now\n ):\n clock.now.return_value = now\n cb1 = Mock()\n cb2 = Mock()\n h1 = Handle(now + clock.resolution() / 2, cb1)\n h2 = Handle(now + clock.resolution() * 2, cb2)\n scheduler = Scheduler([], [h1, h2])\n loop = make_loop(scheduler)\n\n loop.run_step()\n\n assert scheduler.get_items() == [h2]\n assert cb1.mock_calls == [call()]\n assert selector.mock_calls == [call.select(0)]\n assert clock.now() == now\n\n def test_run_both_pending_and_scheduled(self, clock, selector, make_loop):\n parent_cb = Mock()\n rcb0 = parent_cb.rcb0\n rcb1 = parent_cb.rcb1\n scb0 = parent_cb.scb0\n scb1 = parent_cb.scb1\n\n scheduler = Scheduler(\n [Handle(None, rcb0), Handle(None, rcb1)],\n [Handle(55.0, scb0), Handle(60.0, scb1)],\n )\n loop = make_loop(scheduler)\n\n # must consume all pending cbs and first scheduled on second step\n for i in range(2):\n loop.run_step()\n\n assert scheduler.get_items() == [Handle(60.0, scb1)]\n assert selector.mock_calls == [call.select(0), call.select(5.0)]\n\n # call order isn't guarantied\n assert sorted(parent_cb.mock_calls) == sorted([call.rcb0(), call.rcb1(), call.scb0()])\n\n def test_enqueue_pending_during_select(self, selector, clock, make_loop):\n first_cb = Mock(name=\"first-cb\")\n enqueued_cb = Mock(name=\"enqueued-cb\")\n\n scheduler = Scheduler(enqueued=[Handle(55.0, first_cb)])\n loop = make_loop(scheduler)\n\n selector.select.side_effect = (\n # returning DEFAULT force mock to proceed to call 'wraps' object\n lambda *_: (scheduler.enqueue(Handle(None, enqueued_cb)) or DEFAULT)\n )\n\n loop.run_step()\n\n assert selector.mock_calls == [call.select(5.0)]\n assert first_cb.mock_calls == [call()]\n assert enqueued_cb.mock_calls == [call()]\n assert scheduler.get_items() == []\n\n def test_enqueue_later_during_select(self, selector, clock, make_loop):\n first_cb = Mock(name=\"first-cb\")\n enqueued_cb = Mock(name=\"enqueued-cb\")\n\n scheduler = Scheduler(enqueued=[Handle(55.0, first_cb)])\n loop = make_loop(scheduler)\n\n selector.select.side_effect = (\n # returning DEFAULT force mock to proceed to call 'wraps' object\n lambda *_: (scheduler.enqueue(Handle(clock.now(), enqueued_cb)) or DEFAULT)\n )\n\n loop.run_step()\n\n assert selector.mock_calls == [call.select(5.0)]\n assert first_cb.mock_calls == [call()]\n assert enqueued_cb.mock_calls == [call()]\n assert scheduler.get_items() == []\n\n def test_enqueue_much_later_during_select(self, selector, clock, make_loop):\n first_cb = Mock(name=\"first-cb\")\n enqueued_cb = Mock(name=\"enqueued-cb\")\n enqueued_handle = Handle(100, enqueued_cb)\n\n scheduler = Scheduler(enqueued=[Handle(55.0, first_cb)])\n loop = make_loop(scheduler)\n\n selector.select.side_effect = (\n # returning DEFAULT force mock to proceed to call 'wraps' object\n lambda *_: (scheduler.enqueue(enqueued_handle) or DEFAULT)\n )\n\n loop.run_step()\n\n assert selector.mock_calls == [call.select(5.0)]\n assert first_cb.mock_calls == [call()]\n assert enqueued_cb.mock_calls == []\n assert scheduler.get_items() == [enqueued_handle]\n\n def test_sets_running_loop_cv_in_handle_callback(self, make_loop):\n with pytest.raises(LookupError):\n aio.loop._priv.running_loop.get()\n\n @mock_wraps\n def handle_cb():\n assert aio.loop._priv.running_loop.get() is loop\n\n loop = make_loop(Scheduler([Handle(None, handle_cb)]))\n loop.run_step()\n assert handle_cb.mock_calls == [call()]\n\n with pytest.raises(LookupError):\n aio.loop._priv.running_loop.get()\n\n def test_sets_running_loop_cv_in_io_callback(self, make_loop, selector):\n with pytest.raises(LookupError):\n aio.loop._priv.running_loop.get()\n\n test_exc = OSError(\"Test OS error\")\n\n @mock_wraps\n def io_callback(*_):\n assert aio.loop._priv.running_loop.get() is loop\n\n selector.select = lambda *_: [(io_callback, 0, 0, test_exc)]\n\n loop = make_loop(Scheduler())\n loop.run_step()\n\n assert io_callback.mock_calls == [call(0, 0, test_exc)]\n\n with pytest.raises(LookupError):\n aio.loop._priv.running_loop.get()\n\n\nclass TestLoopRunner:\n @pytest.fixture\n def loop(self, loop_policy):\n with loop_policy.create_loop() as loop:\n yield loop\n\n @pytest.fixture\n def loop_runner(self, loop_policy, loop):\n return loop_policy.create_loop_runner(loop)\n\n def test_runs_one_callback(self, loop, loop_runner):\n def callback():\n loop_runner.stop_loop()\n\n cb_mock = Mock(wraps=callback)\n\n loop.call_soon(cb_mock)\n\n with pytest.raises(LoopStopped):\n loop_runner.run_loop()\n\n assert cb_mock.mock_calls == [call()]\n\n def test_runs_callbacks_after_stop_callback(self, loop, loop_runner):\n def callback1():\n loop_runner.stop_loop()\n loop.call_soon(cb2_mock)\n\n cb1_mock = Mock(wraps=callback1)\n cb2_mock = Mock()\n\n loop.call_soon(cb1_mock)\n\n with pytest.raises(LoopStopped):\n loop_runner.run_loop()\n\n assert cb1_mock.mock_calls == [call()]\n assert cb2_mock.mock_calls == [call()]\n\n def test_runs_late_callbacks_after_stop_callback(self, loop, loop_runner):\n def callback1():\n loop_runner.stop_loop()\n loop.call_later(10, cb2_mock)\n\n cb1_mock = Mock(wraps=callback1)\n cb2_mock = Mock()\n\n loop.call_soon(cb1_mock)\n\n time_before = loop.clock.now()\n with pytest.raises(LoopStopped):\n loop_runner.run_loop()\n\n assert loop.clock.now() - time_before == 10\n assert cb1_mock.mock_calls == [call()]\n assert cb2_mock.mock_calls == [call()]\n\n\n@pytest.mark.usefixtures(\"loop_policy\")\nclass TestEntryRun:\n def test_runs_simple_coroutine(self):\n should_be_called = Mock()\n\n async def root():\n should_be_called()\n\n aio.run(root())\n\n assert should_be_called.mock_calls == [call()]\n\n def test_returns_coroutine_result(self):\n result = Mock(name=\"result\")\n\n async def root():\n return result\n\n assert aio.run(root()) == result\n\n def test_propagates_coroutine_exception(self):\n async def root():\n raise Exception(\"Some exception\")\n\n with pytest.raises(Exception, match=\"Some exception\"):\n aio.run(root())\n\n def test_runs_multi_suspend_coroutine(self, clock):\n should_be_called = Mock()\n\n async def root():\n at_start = clock.now()\n\n for i in range(1, 11):\n await aio.sleep(1)\n assert clock.now() - at_start == 1.0 * i\n\n should_be_called()\n\n aio.run(root())\n assert should_be_called.mock_calls == [call()]\n\n def test_changes_sigint_to_cancelled_v1(self):\n should_be_called = Mock()\n should_not_be_called = Mock()\n\n async def root():\n should_be_called()\n # Probably wont work in a lot of cases and may cause wired behaviour\n os.kill(os.getpid(), signal.SIGINT)\n # Suspend coroutine to initialize further processing\n await aio.sleep(10)\n\n should_not_be_called()\n\n with pytest.raises(aio.KeyboardCancelled):\n aio.run(root())\n\n assert should_be_called.mock_calls == [call()]\n assert should_not_be_called.mock_calls == []\n\n def test_changes_sigint_to_cancelled_v2(self):\n should_be_called = Mock()\n should_not_be_called = Mock()\n\n async def root():\n should_be_called()\n # Probably wont work in a lot of cases and may cause wired behaviour\n loop_ = await aio.loop.get_running()\n loop_.call_soon(os.kill, os.getpid(), signal.SIGINT)\n # Suspend coroutine to initialize further processing\n await aio.sleep(10)\n\n should_not_be_called()\n\n with pytest.raises(aio.KeyboardCancelled):\n aio.run(root())\n\n assert should_be_called.mock_calls == [call()]\n assert should_not_be_called.mock_calls == []\n\n def test_should_warn_if_async_gen_being_gc_while_not_finished(self, clock):\n should_be_called_in_root = Mock()\n should_be_called_in_gen = Mock()\n should_not_be_called_in_gen = Mock()\n\n async def async_gen():\n should_be_called_in_gen()\n yield 1\n yield 2\n should_not_be_called_in_gen()\n\n async def root():\n should_be_called_in_root()\n\n gen = async_gen()\n await gen.asend(None)\n del gen\n\n gc.collect()\n\n with pytest.warns(UserWarning) as warn_info:\n aio.run(root())\n\n assert should_be_called_in_root.mock_calls == [call()]\n assert should_be_called_in_gen.mock_calls == [call()]\n assert should_not_be_called_in_gen.mock_calls == []\n assert any(\n \"Async-generator shutdown request income for\" in warn.message.args[0]\n and warn.filename == __file__\n for warn in warn_info.list\n )\n\n def test_should_not_warn_if_async_gen_being_gc_after_finish(self, clock):\n should_be_called_in_root = Mock()\n should_be_called_in_gen = Mock()\n\n async def async_gen():\n should_be_called_in_gen()\n yield 1\n yield 2\n should_be_called_in_gen()\n\n async def root():\n should_be_called_in_root()\n\n async for _ in async_gen():\n pass\n\n gc.collect()\n\n with pytest.WarningsRecorder(_ispytest=True) as warn_info:\n aio.run(root())\n\n assert should_be_called_in_root.mock_calls == [call()]\n assert should_be_called_in_gen.mock_calls == [call(), call()]\n assert warn_info.list == []\n","sub_path":"tests/test_loop.py","file_name":"test_loop.py","file_ext":"py","file_size_in_byte":17912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"160815672","text":"from __future__ import division\n\nfrom models import *\nfrom utils.utils import *\nfrom utils.datasets import *\nfrom utils.parse_config import *\n\nimport os\nimport sys\nimport time\nimport datetime\nimport argparse\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom torch.autograd import Variable\nimport torch.optim as optim\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epochs', type=int, default=30, help='number of epochs')\nparser.add_argument('--image_folder', type=str, default='data/samples', help='path to dataset')\nparser.add_argument('--train_image_path', type=str, default='data/train_data/images', help='path to train images')\nparser.add_argument('--batch_size', type=int, default=8, help='size of each image batch')\nparser.add_argument('--model_config_path', type=str, default='config/yolov3.cfg', help='path to model config file')\nparser.add_argument('--data_config_path', type=str, default='config/coco.data', help='path to data config file')\nparser.add_argument('--weights_path', type=str, default='checkpoints_pre/57.weights', help='path to weights file')\nparser.add_argument('--load_weights', type=bool, default=False, help='whether to load pretrained weights')\nparser.add_argument('--class_path', type=str, default='data/smile.names', help='path to class label file')\nparser.add_argument('--conf_thres', type=float, default=0.8, help='object confidence threshold')\nparser.add_argument('--nms_thres', type=float, default=0.4, help='iou thresshold for non-maximum suppression')\nparser.add_argument('--n_cpu', type=int, default=0, help='number of cpu threads to use during batch generation')\nparser.add_argument('--img_size', type=int, default=416, help='size of each image dimension')\nparser.add_argument('--checkpoint_interval', type=int, default=1, help='interval between saving model weights')\nparser.add_argument('--checkpoint_dir', type=str, default='checkpoints', help='directory where model checkpoints are saved')\nparser.add_argument('--use_cuda', type=bool, default=False, help='whether to use cuda if available')\nopt = parser.parse_args()\nprint(opt)\n\ncuda = torch.cuda.is_available() and opt.use_cuda\n\nos.makedirs('output', exist_ok=True)\nos.makedirs('checkpoints', exist_ok=True)\n\nclasses = load_classes(opt.class_path)\n\n\n# # Get data configuration\n# data_config = parse_data_config(opt.data_config_path)\n# train_path = data_config['train']\n\n# Get hyper parameters\nhyperparams = parse_model_config(opt.model_config_path)[0]\nlearning_rate = float(hyperparams['learning_rate'])\nmomentum = float(hyperparams['momentum'])\ndecay = float(hyperparams['decay'])\nburn_in = int(hyperparams['burn_in'])\n\n\n# Initiate model\nmodel = Darknet(opt.model_config_path)\nif opt.load_weights == True:\n model.load_weights(opt.weights_path)\n print('================================================================================\\n'\n 'Loaded pretrained weights: %s\\n' % opt.weights_path)\nelse:\n model.apply(weights_init_normal)\n print('================================================================================\\n'\n '[Warning] No weights loaded!!! Initializing weights\\n'\n ': If you want to load pretrained weights, set the argument load_weights to TRUE\\n')\n\nif cuda:\n model = model.cuda()\n\nmodel.train()\n\n# Get dataloader\ndataloader = torch.utils.data.DataLoader(\n ListDataset(opt.train_image_path),\n batch_size=opt.batch_size, shuffle=False, num_workers=opt.n_cpu)\nprint('Data loaded: %d batches X %d images (with labels)\\n'\n '================================================================================'\n % (len(dataloader), opt.batch_size ))\n\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n\noptimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, dampening=0, weight_decay=decay)\n\nprint('Start Training...')\nfor epoch in range(opt.epochs):\n for batch_i, (_, imgs, targets) in enumerate(dataloader):\n imgs = Variable(imgs.type(Tensor))\n targets = Variable(targets.type(Tensor), requires_grad=False)\n\n optimizer.zero_grad()\n\n loss = model(imgs, targets)\n\n loss.backward()\n optimizer.step()\n\n print('[Epoch %d/%d, Batch %d/%d] [Losses: x %f, y %f, w %f, h %f, conf %f, cls %f, total %f, recall: %.5f]' %\n (epoch + 1, opt.epochs, batch_i + 1, len(dataloader),\n model.losses['x'], model.losses['y'], model.losses['w'],\n model.losses['h'], model.losses['conf'], model.losses['cls'],\n loss.item(), model.losses['recall']))\n\n model.seen += imgs.size(0)\n\n if epoch % opt.checkpoint_interval == 0:\n count = opt.weights_path.split('/')[1].split('.')[0]\n model.save_weights('%s/%d.weights' % (opt.checkpoint_dir, epoch+int(count)+1))\n print('Checkpoint saved')\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"312940837","text":"# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.nn as nn\n\n\nclass ParserLSTM(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_layers=1,\n batch_first=False, dropout=0, bidirectional=False):\n super(ParserLSTM, self).__init__()\n\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.batch_first = batch_first\n self.dropout = dropout\n self.bidirectional = bidirectional\n self.num_directions = 2 if bidirectional else 1\n\n self.f_cells = nn.ModuleList()\n self.b_cells = nn.ModuleList()\n for layer in range(self.num_layers):\n self.f_cells.append(nn.LSTMCell(input_size=input_size,\n hidden_size=hidden_size))\n if bidirectional:\n self.b_cells.append(nn.LSTMCell(input_size=input_size,\n hidden_size=hidden_size))\n input_size = hidden_size * self.num_directions\n\n self.reset_parameters()\n\n def reset_parameters(self):\n for i in self.parameters():\n # apply orthogonal_ to weight\n if len(i.shape) > 1:\n nn.init.orthogonal_(i)\n # apply zeros_ to bias\n else:\n nn.init.zeros_(i)\n\n def _lstm_forward(self, x, hx, mask, cell, in_mask,\n hid_mask, reverse):\n output = []\n seq_len = x.size(0)\n if in_mask is not None:\n x = x * in_mask\n steps = reversed(range(seq_len)) if reverse else range(seq_len)\n\n for t in steps:\n h_next, c_next = cell(input=x[t], hx=hx)\n h_next = h_next * mask[t]\n c_next = c_next * mask[t]\n output.append(h_next)\n if hid_mask is not None:\n h_next = h_next * hid_mask\n hx = (h_next, c_next)\n if reverse:\n output.reverse()\n output = torch.stack(output, 0)\n\n return output\n\n def forward(self, x, mask, hx=None):\n if self.batch_first:\n x = x.transpose(0, 1)\n mask = mask.transpose(0, 1)\n mask = torch.unsqueeze(mask, dim=2).float()\n seq_len, batch_size, input_size = x.shape\n\n if hx is None:\n initial = x.new_zeros(batch_size, self.hidden_size)\n hx = (initial, initial)\n\n for layer in range(self.num_layers):\n in_mask, hid_mask, b_hid_mask = None, None, None\n if self.training:\n in_mask = torch.bernoulli(\n x.new_full((batch_size, x.size(2)), 1 - self.dropout)\n ) / (1 - self.dropout)\n hid_mask = torch.bernoulli(\n x.new_full((batch_size, self.hidden_size),\n 1 - self.dropout)\n ) / (1 - self.dropout)\n if self.bidirectional:\n b_hid_mask = torch.bernoulli(\n x.new_full((batch_size, self.hidden_size),\n 1 - self.dropout)\n ) / (1 - self.dropout)\n\n layer_output = self._lstm_forward(x=x,\n hx=hx,\n mask=mask,\n cell=self.f_cells[layer],\n in_mask=in_mask,\n hid_mask=hid_mask,\n reverse=False)\n\n if self.bidirectional:\n b_layer_output = self._lstm_forward(x=x,\n hx=hx,\n mask=mask,\n cell=self.b_cells[layer],\n in_mask=in_mask,\n hid_mask=b_hid_mask,\n reverse=True)\n if self.bidirectional:\n x = torch.cat([layer_output, b_layer_output], 2)\n else:\n x = layer_output\n\n if self.batch_first:\n x = x.transpose(0, 1)\n\n return x\n","sub_path":"parser/modules/parser_lstm.py","file_name":"parser_lstm.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"644083885","text":"# coding=utf-8\nfrom __future__ import unicode_literals\n\nimport django.db.models.deletion\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='RateSource',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100)),\n ('base_currency', models.CharField(default=b'EUR', max_length=3, blank=True)),\n ('last_update', models.DateTimeField(auto_now=True)),\n ],\n ),\n migrations.CreateModel(\n name='Rate',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('currency', models.CharField(max_length=3)),\n ('value', models.DecimalField(max_digits=14, decimal_places=6)),\n ('date', models.DateField(auto_now_add=True)),\n ],\n ),\n migrations.AddField(\n model_name='rate',\n name='source',\n field=models.ForeignKey(\n related_query_name=b'rate', related_name='rates', on_delete=models.PROTECT, to='txmoneyrates.RateSource'\n ),\n ),\n migrations.AlterUniqueTogether(\n name='ratesource',\n unique_together={('name', 'base_currency')},\n ),\n migrations.AlterUniqueTogether(\n name='rate',\n unique_together={('source', 'currency', 'date')},\n ),\n ]\n","sub_path":"txmoney/rates/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"425492692","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\r\ndef solve(s):\r\n l=[]\r\n g=[]\r\n l.append(s.split(' '))\r\n \r\n \r\n for i in l[0][::]:\r\n c=i.capitalize()\r\n g.append(c)\r\n return \" \".join(g)","sub_path":"Capitalize.py","file_name":"Capitalize.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"151006660","text":"from p37_csv_DictReader使用_num和getcolors函数 import num\nfrom p37_csv_DictReader使用_num和getcolors函数 import getcolors\n\nimport csv\nimport matplotlib.pyplot as plt\n\n\n\n\ndef getQbNames():\n\tqbnames = ['peyton Manning']\n\t# name = ''\n\ti = 0\n\twith open('./qb_data.csv') as csvfile:\n\t\treader = csv.DictReader(csvfile)\n\t\tfor row in reader:\n\t\t\tif(qbnames[i] != row['Name']):\n\t\t\t\tqbnames.append(row['Name'])\n\t\t\t\ti = i+ 1\n\treturn qbnames\n\ndef readQbdata():\n\tresultdata = []\n\twith open('./qb_data.csv') as csvfile:\n\t\treader = csv.DictReader(csvfile)\n\t\tresultdata = [row for row in reader]\n\t\treturn resultdata\n\n# fdata = []\n# prevysum = 0\n\n\nqbnames = getQbNames()\nfdata = readQbdata()\n\ni = 0\n# rank = 0\nprevysum = 0\nlastyr = 0\nhighrank = 300\ncolorsdata = getcolors()\n\nfig = plt.figure(figsize=(15,13))\nax = fig.add_subplot(111,axisbg='white')\n\n# limits for TD\nplt.xlim(10,800)\nplt.ylim(1940,2021)\n\ncolindex = 0\nlastage = 20\n\n\nfor qbn in qbnames:\n\tx = []\n\ty = []\n\tprevysum = 0\n\tfor row in fdata:\n\t\tif(row['Name'] == qbn and row['Year'] != 'Career'):\n\t\t\tyrval = num(row['Year'])\n\t\t\tlastage = num(row['Age'])\n\t\t\tprevysum += num(row['TD'])\n\t\t\tlastyr = yrval\n\t\t\ty += [yrval]\n\t\t\tx += [prevysum]\n\n\n\tif(prevysum > highrank):\n\t\tplt.plot(x, y, color=colorsdata[colindex], label=qbn, linewidth=2.5)\n\t\tplt.legend(loc=0, prop={'size':10})\n\t\tcolindex = (colindex+1)%22\n\t\tif qbn == 'Tom Brady':\n\t\t\tplt.text( prevysum+2,lastyr+1, qbn+\"(\"+str(prevysum)+\"):\"+str(lastage), \\\n\t\t\tfontsize=9)\n\t\telse:\n\t\t\tplt.text( prevysum+2,lastyr-1, qbn+\"(\"+str(prevysum)+\"):\"+str(lastage), \\\n\t\t\tfontsize=9)\n\telse:\n\t\tplt.plot(x, y, color=colorsdata[22], linewidth=1.5)\nplt.xlabel('Year', fontsize=18)\nplt.ylabel('Cumulative Touch Downs', fontsize=18)\nplt.title(\"Cumulative Touch Downs by Quarter Backs\", fontsize=20)\nplt.show()\n","sub_path":"python数据可视化/p42_体育案例_(触地得分,年份).py","file_name":"p42_体育案例_(触地得分,年份).py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"189006739","text":"\n\n\ndef get_input(data_type, message):\n \"\"\"Asks the user for input and confirms input is correct\"\"\"\n response = raw_input(\"{}{}\".format(message, \"\\n\"))\n\n # Ask User to confirm input\n check_confirmation = 1\n while check_confirmation == 1:\n confirm = raw_input(\n \"You entered: '{}'. Press ENTER if that is correct:\".format(response))\n if confirm == '':\n check_confirmation = 0\n else:\n response = raw_input(\"{}{}\".format(message, \"\\n\"))\n\n # If input is wrong type, promt user for input again\n check_error = 1\n while check_error == 1:\n try:\n response = data_type(response)\n except:\n error_message = \"Your input isnt the correct type. Try Again.\\n\"\n response = raw_input(\"{}{}{}\".format(error_message, message, \"\\n\"))\n else: check_error = 0\n\n return response\n","sub_path":"game_chooser/lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"207726009","text":"# Import Packages\nfrom pypresence import Presence\nimport time\n\nclient_id = \"801321552250535977\" # Your application id here\n\n# button labels and urls\nbtn_label_1 = \"Invite Cartero\"\nbtn_label_2 = \"Join Support Server\"\nbtn_url_1 = \"https://discord.com/api/oauth2/authorize?client_id=801321552250535977&permissions=8&scope=bot%20applications.commands\"\nbtn_url_2 = \"https://discord.com/invite/bNZefkNqVw\"\n\n# Rich Presence setup\nRPC = Presence(client_id=client_id)\nRPC.connect()\nRPC.update(\n state='Developing Cartero', # Rich Presence state\n details='Moderation', # Rich Presence details\n small_image='small', small_text='Cartero', # Set small image and its text (Optional)\n large_image='large', large_text='Cartero', # Set large image and its text (Optional)\n buttons=[\n {\"label\": btn_label_1, \"url\": btn_url_1}, # btn 1 (Optional)\n {\"label\": btn_label_2, \"url\": btn_url_2} # btn 2 (Optional)\n ]\n)\n\nprint(\"Rich Presence enabled\")\n\nwhile 1:\n time.sleep(15)\n","sub_path":"RichPresence.py","file_name":"RichPresence.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"315361744","text":"import argparse\nimport datetime\nimport os\nimport subprocess\n\nimport numpy as np\nimport pandas as pd\nfrom pysb import *\nfrom pysb.integrate import odesolve\n\nfrom pysb_t_cell_network import write_columns, write_model_attributes\nfrom src.general.directory_handling import make_and_cd\n\n\ndef add_new_monomer(product):\n try:\n model.monomers[product]\n except:\n Monomer(product)\n Initial(eval(product + \"()\"), Parameter(product + \"_0\", 0))\n\n\ndef add_observable(species):\n Observable(\"O_{0}\".format(species), eval('{0}()'.format(species)))\n\n\nclass SoSFeedback(object):\n\n def __init__(self):\n self.run_time = 300\n self.tspan = np.linspace(0, self.run_time)\n\n self.sos = [round(i) for i in np.linspace(25, 500, num=40)]\n\n self.model = Model()\n\n def define_monomers(self):\n Monomer('Sos')\n Monomer('Ras_GDP')\n Monomer('Ras_GTP')\n Monomer('Ras_GAP')\n\n Parameter('Sos_0', self.sos[0])\n Parameter('Ras_GDP_0', 300)\n Parameter('Ras_GAP_0', 10)\n Parameter('Ras_GTP_0', 0)\n\n Initial(Sos(), Sos_0)\n Initial(Ras_GDP(), Ras_GDP_0)\n Initial(Ras_GAP(), Ras_GAP_0)\n Initial(Ras_GTP(), Ras_GTP_0)\n\n def add_step_1(self):\n Parameter('k_sos_on_rgdp', 0.0024)\n Parameter('k_sos_off_rgdp', 3.0)\n\n product = \"Sos_Ras_GDP\"\n add_new_monomer(product)\n\n Rule('{0}_bind'.format(product), Sos() + Ras_GDP() | eval('{0}()'.format(product)),\n k_sos_on_rgdp, k_sos_off_rgdp)\n\n add_observable(product)\n return product\n\n def add_step_2(self):\n Parameter('k_sos_on_rgtp', 0.0022)\n Parameter('k_sos_off_rgtp', 0.4)\n\n product = \"Sos_Ras_GTP\"\n add_new_monomer(product)\n\n Rule('{0}_bind'.format(product), Sos() + Ras_GTP() | eval('{0}()'.format(product)),\n k_sos_on_rgtp, k_sos_off_rgtp)\n\n add_observable(product)\n\n return product\n\n def add_step_3(self):\n Parameter('k_rgdp_on_sos_rgtp', 0.001)\n Parameter('k_rgdp_off_sos_rgtp', 0.1)\n Parameter('k_cat_3', 0.038 * 1.7)\n\n previous_product = self.add_step_2()\n\n product = \"Sos_Ras_GTP_Ras_GDP\"\n add_new_monomer(product)\n\n Rule('{0}_bind'.format(product),\n eval('{0}()'.format(previous_product)) + Ras_GDP() | eval('{0}()'.format(product)),\n k_rgdp_on_sos_rgtp, k_rgdp_off_sos_rgtp)\n\n Rule('{0}_cat'.format(product),\n eval('{0}()'.format(product)) >> eval('{0}()'.format(previous_product)) + Ras_GTP(),\n k_cat_3)\n\n def add_step_4(self):\n Parameter('k_rgdp_on_sos_rgdp', 0.0014)\n Parameter('k_rgdp_off_sos_rgdp', 1.0)\n Parameter('k_cat_4', 0.003)\n\n previous_product = self.add_step_1()\n\n product = \"Sos_Ras_GDP_Ras_GDP\"\n add_new_monomer(product)\n\n Rule('{0}_bind'.format(product),\n eval('{0}()'.format(previous_product)) + Ras_GDP() | eval('{0}()'.format(product)),\n k_rgdp_on_sos_rgdp, k_rgdp_off_sos_rgdp)\n\n Rule('{0}_cat'.format(product),\n eval('{0}()'.format(product)) >> eval('{0}()'.format(previous_product)) + Ras_GTP(),\n k_cat_4)\n\n def add_step_5(self):\n Parameter('k_rgap_on_rgtp', 0.0348)\n Parameter('k_rgap_off_rgtp', 0.2)\n Parameter('k_cat_5', 0.1)\n\n product = \"Ras_GAP_Ras_GTP\"\n add_new_monomer(product)\n\n Rule('{0}_bind'.format(product),\n Ras_GAP() + Ras_GTP() | eval('{0}()'.format(product)),\n k_rgap_on_rgtp, k_rgap_off_rgtp)\n\n add_observable(product)\n\n Rule('{0}_cat'.format(product),\n eval('{0}()'.format(product)) >> Ras_GAP() + Ras_GDP(),\n k_cat_5)\n\n def make_model(self):\n # Model()\n\n self.define_monomers()\n\n observables = []\n\n self.add_step_3()\n self.add_step_4()\n self.add_step_5()\n\n add_observable(\"Ras_GTP\")\n\n product = \"Ras_GTP\"\n observables.append(\"O_{0}\".format(product))\n\n return observables\n\n def main(self):\n sos_array = []\n output = []\n observables = self.make_model()\n\n write_columns(observables)\n write_model_attributes(model.rules, \"rules\")\n write_model_attributes(model.parameters, \"parameters\")\n write_model_attributes(model.observables, \"observables\")\n\n np.savetxt(\"time\", self.tspan, fmt='%f')\n\n for sos in self.sos:\n model.parameters['Sos_0'].value = sos\n y = odesolve(model, self.tspan, compiler=\"python\")\n\n sos_array.append(sos)\n # print(y[observables[0]][-1])\n output.append(y[observables[0]][-1])\n\n df = pd.DataFrame({'Sos': sos_array, 'RasGTP': output})\n df.to_csv(\"./sos_rasgtp\", sep='\\t')\n\n # np.savetxt(\"Sos\", sos_array, fmt='%f')\n # np.savetxt(\"RasGTP\", output, fmt='%f')\n\n\nclass SoSFeedbackLigandSpecific(SoSFeedback):\n def __init__(self):\n SoSFeedback.__init__(self)\n\n self.sos_total = [round(i) for i in np.linspace(25, 500, num=40)]\n\n\nclass LaunchQsub(object):\n def __init__(self):\n self.simulation_name = \"Sos_FB\"\n self.simulation_time = 2\n\n def generate_qsub(self):\n q = open(\"qsub.sh\", \"w\")\n q.write(\"#PBS -m ae\\n\")\n q.write(\"#PBS -q short\\n\")\n q.write(\"#PBS -V\\n\")\n q.write(\"#PBS -l walltime={1},nodes=1:ppn=2 -N {0}\\n\\n\".format(self.simulation_name,\n datetime.timedelta(\n minutes=self.simulation_time)))\n q.write(\"cd $PBS_O_WORKDIR\\n\")\n q.write(\"echo $PBS_JOBID > job_id\\n\\n\")\n\n q.write(\n \"python ~/SSC_python_modules/pysb_sos_fb.py --run\\n\")\n q.close()\n\n def launch(self):\n (stdout, stderr) = subprocess.Popen([\"qsub {0}\".format(\"qsub.sh\")], shell=True, stdout=subprocess.PIPE,\n cwd=os.getcwd()).communicate()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Submitting ode calculations as function of steps\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--run', action='store_true', default=False,\n help='Flag for submitting simulations.')\n\n args = parser.parse_args()\n\n if not args.run:\n make_and_cd(\"Ras_SoS_Fb_200\")\n qsub = LaunchQsub()\n qsub.generate_qsub()\n qsub.launch()\n else:\n sos = SoSFeedback()\n sos.main()\n","sub_path":"src/data/pysb_ras_sos.py","file_name":"pysb_ras_sos.py","file_ext":"py","file_size_in_byte":6682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"435857360","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('static', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='file',\n name='target',\n field=models.CharField(max_length=100,\n choices=[(b'catalog.Product.images', b'Product Image'),\n (b'category.Category.image', b'Category Image')]),\n ),\n ]\n","sub_path":"src/static/migrations/0002_auto_20150623_1707.py","file_name":"0002_auto_20150623_1707.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"225746284","text":"import cProfile\nimport json\nimport argparse\nimport glob\nimport collections\nimport itertools\n\nimport cv2\nimport numpy as np\nimport os\n\n\ndef region_of_interest(img, vertices):\n mask = np.zeros_like(img)\n\n if len(img.shape) > 2:\n channel_count = img.shape[2]\n select_mask_color = (255,) * channel_count\n else:\n select_mask_color = 255\n\n cv2.fillPoly(mask, vertices, select_mask_color)\n\n return cv2.bitwise_and(img, mask)\n\n\ndef sliding_window_search(nonzeroy, nonzerox, y_size, base, search_img):\n nwindows = 8\n window_height = np.int(y_size / nwindows)\n current = base\n margin = 40\n minpix = 10\n lane_inds = []\n\n for window in range(nwindows):\n win_y_low = y_size - (window + 1) * window_height\n win_y_high = y_size - window * window_height\n win_x_low = current - margin\n win_x_high = current + margin\n good_inds_mask = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high)\n & (nonzerox >= win_x_low) & (nonzerox < win_x_high))\n good_inds = good_inds_mask.nonzero()[0]\n\n lane_inds.append(good_inds)\n\n if len(good_inds) > minpix:\n current = np.int(np.mean(nonzerox[good_inds]))\n\n if search_img is not None:\n cv2.rectangle(search_img, (win_x_low, win_y_low), (win_x_high, win_y_high), 255, 2)\n\n lane_inds = np.concatenate(lane_inds)\n\n return nonzeroy[lane_inds], nonzerox[lane_inds]\n\n\ndef crop_image(input_img):\n y_size = input_img.shape[0]\n if input_img.shape[0] != 128:\n raise Exception(\"expected y dimension 128 but got: %d\" % y_size)\n cropped = input_img[88:, :, :]\n assert cropped.shape[0] == 40\n assert cropped.shape[1] == 160\n return cropped\n\n\nPipelineResult = collections.namedtuple(\"PipelineResult\",\n [\"input_img\", \"cropped\",\n \"sobel_h_x\", \"sobel_h_y\", \"sobel_h_mag\",\n \"sobel_h_mag_scaled\", \"sobel_h_thresholded\",\n \"sobel_s_thresholded\",\n \"l_threshold_mask\",\n \"yellow_mask\",\n \"binary\",\n \"search_img\",\n \"hls\", \"blurred_s\",\n \"sobel_s_mag_scaled\",\n \"blurred_h\",\n \"warped_input\",\n \"warped\", \"num_pts\", \"linex\", \"liney\",\n \"line_fit\"])\n\n\ndef get_yellow_mask(hls):\n lower_yellow = (21.25, 75, 40)\n # lower_yellow = (21.25, 75, 100.0)\n upper_yellow = (50, 178.5, 255)\n mask = cv2.inRange(hls, lower_yellow, upper_yellow)\n return mask\n\n\ndef get_ground_polygon():\n d1 = (0, 40) # bottom left of polygon\n d2 = (62, 0) # top left of polygon\n d3 = (98, 0) # top right of polygon\n d4 = (160, 40) # bottom right of polygon\n return np.int32([d1, d2, d3, d4])\n\n\ndef _perspective_mat_dst():\n d1 = (40, 40) # bottom mid-left\n d2 = (40, 0) # top mid-left\n d3 = (120, 0) # top mid-right\n d4 = (120, 40) # bottom mid-right\n return np.float32([d1, d2, d3, d4])\n\nclass Processor(object):\n\n def __init__(self, mtx, dist):\n self.mtx = mtx\n self.dist = dist\n src = get_ground_polygon().astype(np.float32)\n dst = _perspective_mat_dst()\n self.perspective_mat = cv2.getPerspectiveTransform(src, dst)\n self.perspective_mat_inv = cv2.getPerspectiveTransform(dst, src)\n\n def undistort(self, cropped):\n if self.mtx is not None:\n return cv2.undistort(cropped, mtx, dist, None, mtx)\n else:\n return cropped\n\n def inv_warp(self, img):\n return cv2.warpPerspective(\n img, self.perspective_mat_inv, (160, 40), flags=cv2.INTER_LINEAR)\n\n def warp(self, img):\n # copy = np.copy(img)\n # cv2.polylines(copy, [get_ground_polygon()], True, [255, 0, 255], 5)\n return cv2.warpPerspective(\n img, self.perspective_mat, (160, 40), flags=cv2.INTER_LINEAR)\n\n\n def process(self, input_img, debug=False):\n cropped = crop_image(input_img)\n\n y_size = 40\n x_size = 160\n assert cropped.shape[0] == y_size\n assert cropped.shape[1] == x_size\n\n undistorted = self.undistort(cropped)\n\n hls = cv2.cvtColor(cropped, cv2.COLOR_BGR2HLS)\n h_channel = hls[:, :, 0]\n l_channel = hls[:, :, 1]\n s_channel = hls[:, :, 2]\n\n blurred_s = cv2.GaussianBlur(s_channel, (5, 5), 0)\n blurred_h = cv2.GaussianBlur(h_channel, (5, 5), 0)\n\n yellow_mask = get_yellow_mask(hls)\n\n l_threshold_mask = cv2.inRange(l_channel, 50, 255)\n\n sobel_h_x = cv2.Sobel(blurred_h, cv2.CV_64F, 1, 0, ksize=5)\n sobel_h_y = cv2.Sobel(blurred_h, cv2.CV_64F, 0, 1, ksize=5)\n sobel_h_mag = np.sqrt(sobel_h_x ** 2 + sobel_h_y ** 2)\n sobel_h_mag = cv2.bitwise_and(sobel_h_mag, sobel_h_mag, mask=yellow_mask)\n\n sobel_h_scale_factor = np.max(sobel_h_mag) / 255\n if sobel_h_scale_factor > 0:\n sobel_h_mag_scaled = sobel_h_mag / sobel_h_scale_factor\n else:\n sobel_h_mag_scaled = sobel_h_mag\n sobel_h_threshold_mask = cv2.inRange(sobel_h_mag_scaled, 50, 255)\n sobel_h_thresholded = cv2.bitwise_and(sobel_h_mag_scaled, sobel_h_mag_scaled, mask=sobel_h_threshold_mask)\n\n sobel_s_x = cv2.Sobel(blurred_s, cv2.CV_64F, 1, 0, ksize=5)\n sobel_s_y = cv2.Sobel(blurred_s, cv2.CV_64F, 0, 1, ksize=5)\n sobel_s_mag = np.sqrt(sobel_s_x ** 2 + sobel_s_y ** 2)\n sobel_s_mag = cv2.bitwise_and(sobel_s_mag, sobel_s_mag, mask=yellow_mask)\n\n sobel_s_scale_factor = np.max(sobel_s_mag) / 255\n if sobel_s_scale_factor > 0:\n sobel_s_mag_scaled = sobel_s_mag / sobel_s_scale_factor\n else:\n sobel_s_mag_scaled = sobel_s_mag\n sobel_s_threshold_mask = cv2.inRange(sobel_s_mag_scaled, 50, 255)\n sobel_s_thresholded = cv2.bitwise_and(sobel_s_mag_scaled, sobel_s_mag_scaled, mask=sobel_s_threshold_mask)\n\n binary = np.ones_like(s_channel)\n binary = cv2.bitwise_and(binary, binary, mask=sobel_h_threshold_mask)\n binary = cv2.bitwise_and(binary, binary, mask=sobel_s_threshold_mask)\n\n if debug:\n warped_input = self.warp(cropped)\n else:\n warped_input = None\n\n warped_binary = self.warp(binary)\n\n histogram = np.sum(warped_binary[20:, :], axis=0)\n nonzero = warped_binary.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n x_base = np.argmax(histogram)\n\n if debug:\n search_img = np.zeros_like(warped_binary)\n else:\n search_img = None\n\n liney, linex = sliding_window_search(nonzeroy, nonzerox, y_size, x_base, search_img)\n num_pts = len(linex)\n if num_pts >= 10:\n line_fit = np.polyfit(liney, linex, 1)\n else:\n line_fit = None\n\n return PipelineResult(\n input_img=input_img,\n yellow_mask=yellow_mask,\n sobel_h_x=sobel_h_x,\n l_threshold_mask=l_threshold_mask,\n sobel_h_y=sobel_h_y,\n sobel_h_mag=sobel_h_mag,\n sobel_h_mag_scaled=sobel_h_mag_scaled,\n sobel_s_mag_scaled=sobel_s_mag_scaled,\n sobel_h_thresholded=sobel_h_thresholded,\n sobel_s_thresholded=sobel_s_thresholded,\n cropped=cropped,\n hls=hls,\n search_img=search_img,\n binary=binary,\n blurred_s=blurred_s,\n blurred_h=blurred_h,\n warped_input=warped_input,\n warped=warped_binary,\n liney=liney,\n linex=linex,\n line_fit=line_fit,\n num_pts=num_pts)\n\n\ndef handle_filepath(args, processor, filepath, report_file):\n bgr_image = load_bgr_image(filepath)\n result = processor.process(bgr_image)\n if report_file is not None:\n doc = {}\n doc['image'] = filepath\n if result.line_fit is None:\n doc['fit'] = False\n else:\n doc['fit'] = True\n doc['weight'] = result.num_pts\n doc['c0'] = result.line_fit[0]\n doc['c1'] = result.line_fit[1]\n json.dump(doc, report_file)\n report_file.write(\"\\n\")\n report_file.flush()\n\n\ndef load_bgr_image(bgr_filepath):\n bgr_array = np.fromfile(bgr_filepath, dtype=np.uint8)\n bgr_image = bgr_array.reshape(128, 160, 3)\n return bgr_image\n\n\ndef run3(args, processor, report_file):\n if args.imgfile is not None:\n with open(args.imgfile, \"r\") as infile:\n while True:\n line = infile.readline()\n if line is None:\n break\n filepath = line.rstrip()\n handle_filepath(args, processor, filepath, report_file)\n else:\n bgr_filepaths = list(glob.glob(os.path.join(args.imgdir, \"*.bgr\")))\n if args.ntake is not None:\n bgr_filepaths = take(args.ntake, bgr_filepaths)\n for filepath in bgr_filepaths:\n handle_filepath(args, processor, filepath, report_file)\n\n\ndef run2(args, processor):\n run3(args, processor, args.report)\n\n\ndef run1(args):\n if args.calibration is not None:\n with open(args.calibration, \"r\") as infile:\n doc = json.load(infile)\n mtx = np.array(doc['mtx'])\n dist = np.array(doc['dist'])\n processor = Processor(mtx, dist)\n else:\n processor = Processor(None, None)\n\n run2(args, processor)\n\n\ndef take(n, iterable):\n \"Return first n items of the iterable as a list\"\n return list(itertools.islice(iterable, n))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--demo\", action=\"store_true\")\n parser.add_argument(\"--imgdir\", default=\"/dev/shm/bgr\")\n parser.add_argument(\"--ntake\", default=None, type=int)\n parser.add_argument(\"--imgfile\")\n parser.add_argument(\"--report\", type=argparse.FileType('w'))\n parser.add_argument(\"--calibration\")\n args = parser.parse_args()\n return args\n\n\ndef demo():\n args = parse_args()\n run1(args)\n\n\ndef main():\n args = parse_args()\n\n if args.demo:\n cProfile.run('demo()', sort='cumulative')\n else:\n run1(args)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"lines/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":9793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"516968808","text":"from time import sleep\n\nfrom selenium import webdriver\nfrom selenium.webdriver import TouchActions\n\n\nclass TestTouchAc():\n def setup(self):\n op = webdriver.ChromeOptions()\n op.add_experimental_option('w3c', False)\n self.driver = webdriver.Chrome(options=op)\n self.driver.implicitly_wait(10)\n self.driver.maximize_window()\n\n def teardown(self):\n self.driver.quit()\n\n def test_touchaction_scrollbottom(self):\n self.driver.get(\"https://www.baidu.com\")\n self.driver.find_element_by_id(\"kw\").send_keys(\"selenium\")\n self.driver.find_element_by_id(\"su\").click()\n\n action_search = TouchActions(self.driver)\n action_search.tap(self.driver.find_element_by_id(\"su\")).perform()\n el = self.driver.find_element_by_id(\"su\")\n action = TouchActions(self.driver)\n\n action.scroll_from_element(el, 10, 10000).perform()\n self.driver.find_element_by_css_selector('#page_addmeber > div > a.n').click()\n sleep(3)\n\n\n\n","sub_path":"cekai/Hogwarts_selenium_Exercise/pytest_exercise_selenium/Exercise_selenium/test_TouchAction.py","file_name":"test_TouchAction.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"439059437","text":"\"\"\"\npy2app/py2exe build script for MyApplication.\n\nWill automatically ensure that all build prerequisites are available\nvia ez_setup\n\nUsage (Mac OS X):\npython setup.py py2app\n\nUsage (Windows):\npython setup.py py2exe\n\"\"\"\nimport ez_setup\nez_setup.use_setuptools()\n\nimport sys\nfrom setuptools import setup\n\nAPP = 'Python/ISDPR.py'\n\nif sys.platform == 'darwin':\n extra_options = dict(\n setup_requires=['py2app'],\n app=[APP],\n # Set the application icon\n options=dict(\n py2app=dict(iconfile='Icons/icon.icns')),\n )\nelif sys.platform == 'win32':\n extra_options = dict(\n setup_requires=['py2exe'],\n app=[APP],\n ) \nelse:\n extra_options = dict(\n # Normally unix-like platforms will use \"setup.py install\"\n # and install the main script as such\n scripts=[APP],\n ) \n\nsetup(\n name=\"ISDPR\",\n **extra_options\n)","sub_path":"src/master_setup.py","file_name":"master_setup.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"583525088","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 12 23:55:57 2020\n\n@author: hongfei7\n\"\"\"\n\nimport gensim\nfrom gensim.summarization import bm25\nimport os\nimport heapq\nimport spacy\n\n# NLTK\nfrom nltk.stem import PorterStemmer\n\nfilenames = []\n\ndef tokenization(filename):\n result = []\n filepath = \"corpus/\" + filename\n with open(filepath, 'r') as f:\n text = f.read()\n words = text.split('\\t')\n for word in words:\n if word != \"\":\n result.append(word)\n return result\n\ndef read_corpus(dir_path):\n corpus = [];\n for root,dirs,files in os.walk(dir_path):\n for f in files:\n if f == \".DS_Store\":\n continue\n corpus.append(tokenization(f))\n #print(\"file is :\" + f)\n new_filename = f.split(\".\")[0] + \".pdf\"\n filenames.append(new_filename)\n print(\"Corpus size is :\" + str(len(corpus)))\n return corpus\n # dictionary = corpora.Dictionary(corpus)\n # print len(dictionary)\n\nif __name__ == \"__main__\":\n dir_path = 'corpus/'\n \n # BM25 Model\n texts = read_corpus(dir_path)\n bm25Model = bm25.BM25(texts)\n \n # Doc Cosine Similarity\n dictionary = gensim.corpora.Dictionary(texts)\n corpus = [dictionary.doc2bow(doc) for doc in texts]\n \n tf_idf = gensim.models.TfidfModel(corpus)\n index = gensim.similarities.SparseMatrixSimilarity(tf_idf[corpus], num_features=len(dictionary))\n \n nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])\n allowed_postags = ['NOUN', 'ADJ', 'VERB', 'ADV']\n \n while True:\n search_type = input(\"Please input your search type (bm25 or sim): \")\n query_str = input(\"Please input your query: \")\n \n \n query = []\n for word in query_str.strip().split():\n query.append(word.lower())\n lemmatized = []\n res = nlp(\" \".join(query))\n for token in res:\n lemmatized.append(token.lemma_)\n #print(lemmatized)\n # STEMMING\n stemmer = PorterStemmer()\n stemmed = [stemmer.stem(x) for x in lemmatized]\n query = stemmed\n \n if search_type == \"bm25\":\n scores = bm25Model.get_scores(query)\n \n # get the top 10 indexes\n indexes = heapq.nlargest(10, range(len(scores)), scores.__getitem__)\n \n # get the top 10 values\n values = heapq.nlargest(10,scores)\n \n # print(indexes)\n print(values)\n \n for i in indexes:\n print(filenames[i])\n\n elif search_type == \"sim\":\n query_doc_bow = dictionary.doc2bow(query)\n sims = index[tf_idf[query_doc_bow]]\n \n indexes = heapq.nlargest(10, range(len(sims)), sims.__getitem__)\n \n values = heapq.nlargest(10,sims)\n \n # print(indexes)\n print(values)\n \n for i in indexes:\n print(filenames[i])\n else:\n print(\"Invalid type!\\n\")\n","sub_path":"Recommend_Slides/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"269138984","text":"from Features import Features\n\ndef load_data_and_create_features(path, dataset='Train', Features_Object = None):\n with open(path) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n words_set, tags_set = get_words_and_tags_set(content)\n if dataset=='Train':\n features_object = Features(words_set,tags_set)\n elif dataset=='Test':\n features_object = Features_Object\n word_possible_labels = {}\n words = []\n tags = []\n features = []\n print('creating words,tags and features')\n for idx, line in enumerate(content):\n #uncomment if we want to add ** in start of santance\n words.extend(['*', '*'])\n tags.extend(['*', '*'])\n features.extend([[],[]])\n splited_line = line.split()\n for i,word_tag in enumerate(splited_line):\n word, tag = word_tag.split('_')\n words.append(word)\n tags.append(tag)\n try:\n next_word, _ = splited_line[i+1].split('_')\n except IndexError:\n next_word = 'STOP'\n current_word_features = features_object.set_features_for_word(words[-3:],tags[-3:],next_word)\n features.append(current_word_features)\n #test\n #features_object.multiply_features_with_weighets(features[-1])\n #if word exists append tag to wotds list, else create a list and append the tag\n #word_possible_labels.setdefault(word, []).append(tag)\n words.append('STOP')\n tags.append('STOP')\n features.extend([[]])\n return words,tags,features,features_object\n\ndef get_words_and_tags_set(content):\n words_set = set()\n tags_set = set()\n for line in content:\n for word_tag in line.split():\n word, tag = word_tag.split('_')\n words_set.add(word)\n tags_set.add(tag)\n words_set.add('STOP')\n words_set.add('*')\n tags_set.add('STOP')\n tags_set.add('*')\n return words_set, tags_set\n\ndef create_word_tag_pairs(words_set,tags_set):\n word_tag_pairs = set()\n for tag in tags_set:\n for word in words_set:\n word_tag_pairs.add((tag,word))\n word_tag_pairs.add(('STOP','STOP'))\n\n\n\nif __name__ == '__main__':\n words, tags, features, Features_object = load_data_and_create_features('data/train.wtag','Train')\n load_data_and_create_features('data/test.wtag', 'Test', Features_object)\n\n\n","sub_path":"load_corpus.py","file_name":"load_corpus.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"291838999","text":"# rbtext.py by ApolloJustice\n# for use with Python 3\n# non PEP-8 compliant because honestly fuck that\n# probably not commented because too lazy\n\n__module_name__ = \"RainbowFonts\"\n__module_version__ = \"1.0\"\n__module_description__ = \"Rainbowifies text\"\n__author__ = \"ApolloJustice\"\n\nimport hexchat\nimport random\n\ndef rainbow(word, word_eol, userdata):\n\n\trainbowstr = \"\"\n\n\tfor character in word_eol[1]: rainbowstr += '\\003' + str(random.randint(2, 15)) + character\n\n\thexchat.command(\"say \" + rainbowstr)\n\trainbowstr = \"\"\n\treturn hexchat.EAT_ALL\n\nhexchat.hook_command(\"rb\", rainbow, help=\"/rb rainbowifies text\")\nhexchat.emit_print(\"Notice\", __module_name__ + \" [S]\", \"%s by %s loaded. You are using version %s of the script.\" % (__module_name__, __author__, __module_version__))","sub_path":"rbtext.py","file_name":"rbtext.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"}
+{"seq_id":"128792859","text":"import logging\nimport time\nimport uuid\nfrom typing import Any, Dict, Iterable, List, Optional, Union\n\nfrom globus_sdk import client, exc, paging, response, utils\nfrom globus_sdk.scopes import TransferScopes\n\nfrom .errors import TransferAPIError\nfrom .response import ActivationRequirementsResponse, IterableTransferResponse\n\nlog = logging.getLogger(__name__)\n\nID_PARAM_TYPE = Union[bytes, str, uuid.UUID]\n\n\ndef _get_page_size(paged_result):\n return len(paged_result[\"DATA\"])\n\n\nclass TransferClient(client.BaseClient):\n r\"\"\"\n Client for the\n `Globus Transfer API `_.\n\n This class provides helper methods for most common resources in the\n REST API, and basic ``get``, ``put``, ``post``, and ``delete`` methods\n from the base rest client that can be used to access any REST resource.\n\n Detailed documentation is available in the official REST API\n documentation, which is linked to from the method documentation. Methods\n that allow arbitrary keyword arguments will pass the extra arguments as\n query parameters.\n\n :param authorizer: An authorizer instance used for all calls to\n Globus Transfer\n :type authorizer: :class:`GlobusAuthorizer\\\n `\n\n **Paginated Calls**\n\n Methods which support pagination can be called as paginated or unpaginated methods.\n If the method name is ``TransferClient.foo``, the paginated version is\n ``TransferClient.paginated.foo``.\n Using ``TransferClient.endpoint_search`` as an example::\n\n from globus_sdk import TransferClient\n tc = TransferClient(...)\n\n # this is the unpaginated version\n for x in tc.endpoint_search(\"tutorial\"):\n print(\"Endpoint ID: {}\".format(x[\"id\"]))\n\n # this is the paginated version\n for page in tc.paginated.endpoint_search(\"testdata\"):\n for x in page:\n print(\"Endpoint ID: {}\".format(x[\"id\"]))\n\n .. automethodlist:: globus_sdk.TransferClient\n \"\"\"\n service_name = \"transfer\"\n base_path = \"/v0.10/\"\n error_class = TransferAPIError\n scopes = TransferScopes\n\n # Convenience methods, providing more pythonic access to common REST\n # resources\n\n #\n # Endpoint Management\n #\n\n def get_endpoint(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``GET /endpoint/``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> endpoint = tc.get_endpoint(endpoint_id)\n >>> print(\"Endpoint name:\",\n >>> endpoint[\"display_name\"] or endpoint[\"canonical_name\"])\n\n **External Documentation**\n\n See\n `Get Endpoint by ID \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.get_endpoint({endpoint_id_s})\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s)\n return self.get(path, query_params=query_params)\n\n def update_endpoint(\n self,\n endpoint_id: ID_PARAM_TYPE,\n data,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``PUT /endpoint/``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> epup = dict(display_name=\"My New Endpoint Name\",\n >>> description=\"Better Description\")\n >>> update_result = tc.update_endpoint(endpoint_id, epup)\n\n **External Documentation**\n\n See\n `Update Endpoint by ID \\\n `_\n in the REST documentation for details.\n \"\"\"\n if data.get(\"myproxy_server\"):\n if data.get(\"oauth_server\"):\n raise exc.GlobusSDKUsageError(\n \"an endpoint cannot be reconfigured to use multiple \"\n \"identity providers for activation; specify either \"\n \"MyProxy or OAuth, not both\"\n )\n else:\n data[\"oauth_server\"] = None\n elif data.get(\"oauth_server\"):\n data[\"myproxy_server\"] = None\n\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.update_endpoint({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s)\n return self.put(path, data=data, query_params=query_params)\n\n def create_endpoint(self, data) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /endpoint/``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> ep_data = {\n >>> \"DATA_TYPE\": \"endpoint\",\n >>> \"display_name\": display_name,\n >>> \"DATA\": [\n >>> {\n >>> \"DATA_TYPE\": \"server\",\n >>> \"hostname\": \"gridftp.example.edu\",\n >>> },\n >>> ],\n >>> }\n >>> create_result = tc.create_endpoint(ep_data)\n >>> endpoint_id = create_result[\"id\"]\n\n **External Documentation**\n\n See\n `Create endpoint \\\n `_\n in the REST documentation for details.\n \"\"\"\n if data.get(\"myproxy_server\") and data.get(\"oauth_server\"):\n raise exc.GlobusSDKUsageError(\n \"an endpoint cannot be created using multiple identity \"\n \"providers for activation; specify either MyProxy or OAuth, \"\n \"not both\"\n )\n\n log.info(\"TransferClient.create_endpoint(...)\")\n return self.post(\"endpoint\", data=data)\n\n def delete_endpoint(\n self, endpoint_id: ID_PARAM_TYPE\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``DELETE /endpoint/``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> delete_result = tc.delete_endpoint(endpoint_id)\n\n **External Documentation**\n\n See\n `Delete endpoint by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.delete_endpoint({endpoint_id_s})\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s)\n return self.delete(path)\n\n @paging.has_paginator(\n paging.HasNextPaginator,\n items_key=\"DATA\",\n get_page_size=_get_page_size,\n max_total_results=1000,\n page_size=100,\n )\n def endpoint_search(\n self,\n filter_fulltext: Optional[str] = None,\n filter_scope: Optional[str] = None,\n filter_owner_id: Optional[str] = None,\n filter_host_endpoint: Optional[ID_PARAM_TYPE] = None,\n filter_non_functional: Optional[bool] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> IterableTransferResponse:\n r\"\"\"\n .. parsed-literal::\n\n GET /endpoint_search\\\n ?filter_fulltext=&filter_scope=\n\n :param filter_fulltext: The string to use in a full text search on endpoints.\n Effectively, the \"search query\" which is being requested. May be omitted\n with specific ``filter_scope`` values.\n :type filter_fulltext: str, optional\n :param filter_scope: A \"scope\" within which to search for endpoints. This must\n be one of the limited and known names known to the service, which can be\n found documented in the **External Documentation** below. Defaults to\n searching all endpoints (in which case ``filter_fulltext`` is required)\n :type filter_scope: str, optional\n :param filter_owner_id: Limit search to endpoints owned by the specified Globus\n Auth identity. Conflicts with scopes 'my-endpoints', 'my-gcp-endpoints', and\n 'shared-by-me'.\n :type filter_owner_id: str, optional\n :param filter_host_endpoint: Limit search to endpoints hosted by the specified\n endpoint. May cause BadRequest or PermissionDenied errors if the endpoint ID\n given is not valid for this operation.\n :type filter_host_endpoint: str, optional\n :param filter_non_functional: Limit search to endpoints which have the\n 'non_functional' flag set to True or False.\n :type filter_non_functional: bool, optional\n :param limit: limit the number of results\n :type limit: int, optional\n :param offset: offset used in paging\n :type offset: int, optional\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **Examples**\n\n Search for a given string as a fulltext search:\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> for ep in tc.endpoint_search('String to search for!'):\n >>> print(ep['display_name'])\n\n Search for a given string, but only on endpoints that you own:\n\n >>> for ep in tc.endpoint_search('foo', filter_scope='my-endpoints'):\n >>> print('{0} has ID {1}'.format(ep['display_name'], ep['id']))\n\n It is important to be aware that the Endpoint Search API limits\n you to 1000 results for any search query.\n\n **External Documentation**\n\n For additional information, see `Endpoint Search\n `_.\n in the REST documentation for details.\n \"\"\"\n if query_params is None:\n query_params = {}\n if filter_scope is not None:\n query_params[\"filter_scope\"] = filter_scope\n if filter_fulltext is not None:\n query_params[\"filter_fulltext\"] = filter_fulltext\n if filter_owner_id is not None:\n query_params[\"filter_owner_id\"] = filter_owner_id\n if filter_host_endpoint is not None: # convert to str (may be UUID)\n query_params[\"filter_host_endpoint\"] = utils.safe_stringify(\n filter_host_endpoint\n )\n if filter_non_functional is not None: # convert to int (expect bool input)\n query_params[\"filter_non_functional\"] = 1 if filter_non_functional else 0\n if limit is not None:\n query_params[\"limit\"] = limit\n if offset is not None:\n query_params[\"offset\"] = offset\n log.info(f\"TransferClient.endpoint_search({query_params})\")\n return IterableTransferResponse(\n self.get(\"endpoint_search\", query_params=query_params)\n )\n\n def endpoint_autoactivate(\n self,\n endpoint_id: ID_PARAM_TYPE,\n if_expires_in: Optional[int] = None,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n r\"\"\"\n ``POST /endpoint//autoactivate``\n\n :rtype: :class:`TransferResponse\n `\n\n The following example will try to \"auto\" activate the endpoint\n using a credential available from another endpoint or sign in by\n the user with the same identity provider, but only if the\n endpoint is not already activated or going to expire within an\n hour (3600 seconds). If that fails, direct the user to the\n globus website to perform activation:\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> r = tc.endpoint_autoactivate(ep_id, if_expires_in=3600)\n >>> while (r[\"code\"] == \"AutoActivationFailed\"):\n >>> print(\"Endpoint requires manual activation, please open \"\n >>> \"the following URL in a browser to activate the \"\n >>> \"endpoint:\")\n >>> print(\"https://app.globus.org/file-manager?origin_id=%s\"\n >>> % ep_id)\n >>> input(\"Press ENTER after activating the endpoint:\")\n >>> r = tc.endpoint_autoactivate(ep_id, if_expires_in=3600)\n\n This is the recommended flow for most thick client applications,\n because many endpoints require activation via OAuth MyProxy,\n which must be done in a browser anyway. Web based clients can\n link directly to the URL.\n\n You also might want messaging or logging depending on why and how the\n operation succeeded, in which case you'll need to look at the value of\n the \"code\" field and either decide on your own messaging or use the\n response's \"message\" field.\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> r = tc.endpoint_autoactivate(ep_id, if_expires_in=3600)\n >>> if r['code'] == 'AutoActivationFailed':\n >>> print('Endpoint({}) Not Active! Error! Source message: {}'\n >>> .format(ep_id, r['message']))\n >>> sys.exit(1)\n >>> elif r['code'] == 'AutoActivated.CachedCredential':\n >>> print('Endpoint({}) autoactivated using a cached credential.'\n >>> .format(ep_id))\n >>> elif r['code'] == 'AutoActivated.GlobusOnlineCredential':\n >>> print(('Endpoint({}) autoactivated using a built-in Globus '\n >>> 'credential.').format(ep_id))\n >>> elif r['code'] = 'AlreadyActivated':\n >>> print('Endpoint({}) already active until at least {}'\n >>> .format(ep_id, 3600))\n\n **External Documentation**\n\n See\n `Autoactivate endpoint \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n if query_params is None:\n query_params = {}\n if if_expires_in is not None:\n query_params[\"if_expires_in\"] = if_expires_in\n log.info(f\"TransferClient.endpoint_autoactivate({endpoint_id_s})\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"autoactivate\")\n return self.post(path, query_params=query_params)\n\n def endpoint_deactivate(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /endpoint//deactivate``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Deactive endpoint \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.endpoint_deactivate({endpoint_id_s})\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"deactivate\")\n return self.post(path, query_params=query_params)\n\n def endpoint_activate(\n self,\n endpoint_id: ID_PARAM_TYPE,\n requirements_data,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /endpoint//activate``\n\n :rtype: :class:`TransferResponse\n `\n\n Consider using autoactivate and web activation instead, described\n in the example for\n :meth:`~globus_sdk.TransferClient.endpoint_autoactivate`.\n\n **External Documentation**\n\n See\n `Activate endpoint \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.endpoint_activate({endpoint_id_s})\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"activate\")\n return self.post(path, data=requirements_data, query_params=query_params)\n\n def endpoint_get_activation_requirements(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> ActivationRequirementsResponse:\n \"\"\"\n ``GET /endpoint//activation_requirements``\n\n :rtype: :class:`ActivationRequirementsResponse\n `\n\n **External Documentation**\n\n See\n `Get activation requirements \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"activation_requirements\")\n return ActivationRequirementsResponse(self.get(path, query_params=query_params))\n\n def my_effective_pause_rule_list(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n ``GET /endpoint//my_effective_pause_rule_list``\n\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get my effective endpoint pause rules \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.my_effective_pause_rule_list({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\n \"endpoint\", endpoint_id_s, \"my_effective_pause_rule_list\"\n )\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n # Shared Endpoints\n\n def my_shared_endpoint_list(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n ``GET /endpoint//my_shared_endpoint_list``\n\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get shared endpoint list \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.my_shared_endpoint_list({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"my_shared_endpoint_list\")\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n @paging.has_paginator(\n paging.NextTokenPaginator,\n items_key=\"shared_endpoints\",\n )\n def get_shared_endpoint_list(\n self,\n endpoint_id: ID_PARAM_TYPE,\n max_results: Optional[int] = None,\n next_token: Optional[str] = None,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> IterableTransferResponse:\n \"\"\"\n ``GET /endpoint//shared_endpoint_list``\n\n :param max_results: cap to the number of results\n :type max_results: int, optional\n :param next_token: token used for paging\n :type next_token: str, optional\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_param: dict, optional\n\n\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get shared endpoint list \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.get_shared_endpoint_list({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"shared_endpoint_list\")\n if query_params is None:\n query_params = {}\n if max_results is not None:\n query_params[\"max_results\"] = str(max_results)\n if next_token is not None:\n query_params[\"next_token\"] = str(next_token)\n return IterableTransferResponse(\n self.get(path, query_params=query_params), iter_key=\"shared_endpoints\"\n )\n\n def create_shared_endpoint(self, data):\n \"\"\"\n ``POST /shared_endpoint``\n\n :param data: A python dict representation of a ``shared_endpoint`` document\n :type data: dict\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> shared_ep_data = {\n >>> \"DATA_TYPE\": \"shared_endpoint\",\n >>> \"host_endpoint\": host_endpoint_id,\n >>> \"host_path\": host_path,\n >>> \"display_name\": display_name,\n >>> # optionally specify additional endpoint fields\n >>> \"description\": \"my test share\"\n >>> }\n >>> create_result = tc.create_shared_endpoint(shared_ep_data)\n >>> endpoint_id = create_result[\"id\"]\n\n **External Documentation**\n\n See\n `Create shared endpoint \\\n `_\n in the REST documentation for details.\n \"\"\"\n log.info(\"TransferClient.create_shared_endpoint(...)\")\n return self.post(\"shared_endpoint\", data=data)\n\n # Endpoint servers\n\n def endpoint_server_list(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n ``GET /endpoint//server_list``\n\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get endpoint server list \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.endpoint_server_list({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"server_list\")\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def get_endpoint_server(\n self,\n endpoint_id: ID_PARAM_TYPE,\n server_id,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``GET /endpoint//server/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get endpoint server by id\\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(\n \"TransferClient.get_endpoint_server(%s, %s, ...)\", endpoint_id_s, server_id\n )\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"server\", str(server_id))\n return self.get(path, query_params=query_params)\n\n def add_endpoint_server(\n self, endpoint_id: ID_PARAM_TYPE, server_data: Dict\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /endpoint//server``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Add endpoint server \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.add_endpoint_server({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"server\")\n return self.post(path, data=server_data)\n\n def update_endpoint_server(\n self, endpoint_id: ID_PARAM_TYPE, server_id, server_data: Dict\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``PUT /endpoint//server/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Update endpoint server by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(\n \"TransferClient.update_endpoint_server(%s, %s, ...)\",\n endpoint_id_s,\n server_id,\n )\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"server\", str(server_id))\n return self.put(path, data=server_data)\n\n def delete_endpoint_server(\n self, endpoint_id: ID_PARAM_TYPE, server_id\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``DELETE /endpoint//server/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Delete endpoint server by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(\n \"TransferClient.delete_endpoint_server(%s, %s)\", endpoint_id_s, server_id\n )\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"server\", str(server_id))\n return self.delete(path)\n\n #\n # Roles\n #\n\n def endpoint_role_list(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n ``GET /endpoint//role_list``\n\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get list of endpoint roles \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.endpoint_role_list({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"role_list\")\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def add_endpoint_role(\n self, endpoint_id: ID_PARAM_TYPE, role_data: Dict\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /endpoint//role``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Create endpoint role \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.add_endpoint_role({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"role\")\n return self.post(path, data=role_data)\n\n def get_endpoint_role(\n self,\n endpoint_id: ID_PARAM_TYPE,\n role_id,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``GET /endpoint//role/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get endpoint role by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.get_endpoint_role({endpoint_id_s}, {role_id}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"role\", role_id)\n return self.get(path, query_params=query_params)\n\n def delete_endpoint_role(\n self, endpoint_id: ID_PARAM_TYPE, role_id\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``DELETE /endpoint//role/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Delete endpoint role by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.delete_endpoint_role({endpoint_id_s}, {role_id})\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"role\", role_id)\n return self.delete(path)\n\n #\n # ACLs\n #\n\n def endpoint_acl_list(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n ``GET /endpoint//access_list``\n\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get list of access rules \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.endpoint_acl_list({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"access_list\")\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def get_endpoint_acl_rule(\n self,\n endpoint_id: ID_PARAM_TYPE,\n rule_id,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``GET /endpoint//access/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get access rule by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(\n \"TransferClient.get_endpoint_acl_rule(%s, %s, ...)\", endpoint_id_s, rule_id\n )\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"access\", rule_id)\n return self.get(path, query_params=query_params)\n\n def add_endpoint_acl_rule(\n self, endpoint_id: ID_PARAM_TYPE, rule_data: Dict\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /endpoint//access``\n\n :param endpoint_id: ID of endpoint to which to add the acl\n :type endpoint_id: str\n :param rule_data: A python dict representation of an ``access`` document\n :type rule_data: dict\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> rule_data = {\n >>> \"DATA_TYPE\": \"access\",\n >>> \"principal_type\": \"identity\",\n >>> \"principal\": identity_id,\n >>> \"path\": \"/dataset1/\",\n >>> \"permissions\": \"rw\",\n >>> }\n >>> result = tc.add_endpoint_acl_rule(endpoint_id, rule_data)\n >>> rule_id = result[\"access_id\"]\n\n Note that if this rule is being created on a shared endpoint\n the \"path\" field is relative to the \"host_path\" of the shared endpoint.\n\n **External Documentation**\n\n See\n `Create access rule \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.add_endpoint_acl_rule({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"access\")\n return self.post(path, data=rule_data)\n\n def update_endpoint_acl_rule(\n self, endpoint_id: ID_PARAM_TYPE, rule_id, rule_data: Dict\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``PUT /endpoint//access/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Update access rule \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(\n \"TransferClient.update_endpoint_acl_rule(%s, %s, ...)\",\n endpoint_id_s,\n rule_id,\n )\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"access\", rule_id)\n return self.put(path, data=rule_data)\n\n def delete_endpoint_acl_rule(\n self, endpoint_id: ID_PARAM_TYPE, rule_id\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``DELETE /endpoint//access/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Delete access rule \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(\n \"TransferClient.delete_endpoint_acl_rule(%s, %s)\", endpoint_id_s, rule_id\n )\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"access\", rule_id)\n return self.delete(path)\n\n #\n # Bookmarks\n #\n\n def bookmark_list(\n self, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n ``GET /bookmark_list``\n\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get list of bookmarks \\\n `_\n in the REST documentation for details.\n \"\"\"\n log.info(f\"TransferClient.bookmark_list({query_params})\")\n return IterableTransferResponse(\n self.get(\"bookmark_list\", query_params=query_params)\n )\n\n def create_bookmark(self, bookmark_data: Dict) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /bookmark``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Create bookmark \\\n `_\n in the REST documentation for details.\n \"\"\"\n log.info(f\"TransferClient.create_bookmark({bookmark_data})\")\n return self.post(\"bookmark\", data=bookmark_data)\n\n def get_bookmark(\n self, bookmark_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``GET /bookmark/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get bookmark by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n bookmark_id_s = utils.safe_stringify(bookmark_id)\n log.info(f\"TransferClient.get_bookmark({bookmark_id_s})\")\n path = self.qjoin_path(\"bookmark\", bookmark_id_s)\n return self.get(path, query_params=query_params)\n\n def update_bookmark(\n self, bookmark_id: ID_PARAM_TYPE, bookmark_data: Dict\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``PUT /bookmark/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Update bookmark \\\n `_\n in the REST documentation for details.\n \"\"\"\n bookmark_id_s = utils.safe_stringify(bookmark_id)\n log.info(f\"TransferClient.update_bookmark({bookmark_id_s})\")\n path = self.qjoin_path(\"bookmark\", bookmark_id_s)\n return self.put(path, data=bookmark_data)\n\n def delete_bookmark(\n self, bookmark_id: ID_PARAM_TYPE\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``DELETE /bookmark/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Delete bookmark by id\\\n `_\n in the REST documentation for details.\n \"\"\"\n bookmark_id_s = utils.safe_stringify(bookmark_id)\n log.info(f\"TransferClient.delete_bookmark({bookmark_id_s})\")\n path = self.qjoin_path(\"bookmark\", bookmark_id_s)\n return self.delete(path)\n\n #\n # Synchronous Filesys Operations\n #\n\n def operation_ls(\n self,\n endpoint_id: ID_PARAM_TYPE,\n path: Optional[str] = None,\n show_hidden: Optional[bool] = None,\n orderby: Optional[Union[str, List[str]]] = None,\n # note: filter is a soft keyword in python, so using this name is okay\n filter: Optional[str] = None,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> IterableTransferResponse:\n \"\"\"\n ``GET /operation/endpoint//ls``\n\n :param path: Path to a directory on the endpoint to list\n :type path: str, optional\n :param show_hidden: Show hidden files (names beginning in dot).\n Defaults to true.\n :type show_hidden: bool, optional\n :param orderby: One or more order-by options. Each option is\n either a field name or a field name followed by a space and 'ASC' or 'DESC'\n for ascending or descending.\n :type orderby: str, optional\n :param filter: Only return file documents that match these filter clauses. For\n the filter syntax, see the **External Documentation** linked below.\n :type filter: str, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **Examples**\n\n List with a path:\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> for entry in tc.operation_ls(ep_id, path=\"/~/project1/\"):\n >>> print(entry[\"name\"], entry[\"type\"])\n\n List with explicit ordering:\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> for entry in tc.operation_ls(\n >>> ep_id,\n >>> path=\"/~/project1/\",\n >>> orderby=[\"type\", \"name\"]\n >>> ):\n >>> print(entry[\"name DESC\"], entry[\"type\"])\n\n **External Documentation**\n\n See\n `List Directory Contents \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n\n if query_params is None:\n query_params = {}\n if path is not None:\n query_params[\"path\"] = path\n if show_hidden is not None:\n query_params[\"show_hidden\"] = 1 if show_hidden else 0\n if orderby is not None:\n if isinstance(orderby, str):\n query_params[\"orderby\"] = orderby\n else:\n query_params[\"orderby\"] = \",\".join(orderby)\n if filter is not None:\n query_params[\"filter\"] = filter\n\n log.info(f\"TransferClient.operation_ls({endpoint_id_s}, {query_params})\")\n req_path = self.qjoin_path(\"operation/endpoint\", endpoint_id_s, \"ls\")\n return IterableTransferResponse(self.get(req_path, query_params=query_params))\n\n def operation_mkdir(\n self,\n endpoint_id: ID_PARAM_TYPE,\n path,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /operation/endpoint//mkdir``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> tc.operation_mkdir(ep_id, path=\"/~/newdir/\")\n\n **External Documentation**\n\n See\n `Make Directory \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n path = utils.safe_stringify(path)\n log.info(\n \"TransferClient.operation_mkdir({}, {}, {})\".format(\n endpoint_id_s, path, query_params\n )\n )\n resource_path = self.qjoin_path(\"operation/endpoint\", endpoint_id_s, \"mkdir\")\n json_body = {\"DATA_TYPE\": \"mkdir\", \"path\": path}\n return self.post(resource_path, data=json_body, query_params=query_params)\n\n def operation_rename(\n self,\n endpoint_id: ID_PARAM_TYPE,\n oldpath,\n newpath,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /operation/endpoint//rename``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> tc.operation_rename(ep_id, oldpath=\"/~/file1.txt\",\n >>> newpath=\"/~/project1data.txt\")\n\n **External Documentation**\n\n See\n `Rename \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n oldpath = utils.safe_stringify(oldpath)\n newpath = utils.safe_stringify(newpath)\n log.info(\n \"TransferClient.operation_rename({}, {}, {}, {})\".format(\n endpoint_id_s, oldpath, newpath, query_params\n )\n )\n resource_path = self.qjoin_path(\"operation/endpoint\", endpoint_id_s, \"rename\")\n json_body = {\"DATA_TYPE\": \"rename\", \"old_path\": oldpath, \"new_path\": newpath}\n return self.post(resource_path, data=json_body, query_params=query_params)\n\n def operation_symlink(\n self,\n endpoint_id: ID_PARAM_TYPE,\n symlink_target,\n path,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /operation/endpoint//symlink``\n\n :rtype: :class:`TransferResponse\n `\n\n The ``path`` is the name of the symlink, and the ``symlink_target`` is\n the path referenced by the symlink.\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> tc.operation_symlink(ep_id, symlink_target=\"/~/file1.txt\",\n >>> path=\"/~/link-to-file1.txt\")\n\n **External Documentation**\n\n See\n `Symlink \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n symlink_target = utils.safe_stringify(symlink_target)\n path = utils.safe_stringify(path)\n log.info(\n \"TransferClient.operation_symlink({}, {}, {}, {})\".format(\n endpoint_id_s, symlink_target, path, query_params\n )\n )\n resource_path = self.qjoin_path(\"operation/endpoint\", endpoint_id_s, \"symlink\")\n data = {\n \"DATA_TYPE\": \"symlink\",\n \"symlink_target\": symlink_target,\n \"path\": path,\n }\n return self.post(resource_path, data=data, query_params=query_params)\n\n #\n # Task Submission\n #\n\n def get_submission_id(\n self, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``GET /submission_id``\n\n :rtype: :class:`TransferResponse\n `\n\n Submission IDs are required to submit tasks to the Transfer service\n via the :meth:`submit_transfer <.submit_transfer>` and\n :meth:`submit_delete <.submit_delete>` methods.\n\n Most users will not need to call this method directly, as the\n convenience classes :class:`TransferData `\n and :class:`DeleteData ` will call it\n automatically if they are not passed a ``submission_id`` explicitly.\n\n **External Documentation**\n\n See\n `Get a submission id \\\n `_\n in the REST documentation for more details.\n \"\"\"\n log.info(f\"TransferClient.get_submission_id({query_params})\")\n return self.get(\"submission_id\", query_params=query_params)\n\n def submit_transfer(self, data) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /transfer``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> tdata = globus_sdk.TransferData(tc, source_endpoint_id,\n >>> destination_endpoint_id,\n >>> label=\"SDK example\",\n >>> sync_level=\"checksum\")\n >>> tdata.add_item(\"/source/path/dir/\", \"/dest/path/dir/\",\n >>> recursive=True)\n >>> tdata.add_item(\"/source/path/file.txt\",\n >>> \"/dest/path/file.txt\")\n >>> transfer_result = tc.submit_transfer(tdata)\n >>> print(\"task_id =\", transfer_result[\"task_id\"])\n\n The `data` parameter can be a normal Python dictionary, or\n a :class:`TransferData ` object.\n\n **External Documentation**\n\n See\n `Submit a transfer task \\\n `_\n in the REST documentation for more details.\n \"\"\"\n log.info(\"TransferClient.submit_transfer(...)\")\n return self.post(\"/transfer\", data=data)\n\n def submit_delete(self, data) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /delete``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> ddata = globus_sdk.DeleteData(tc, endpoint_id, recursive=True)\n >>> ddata.add_item(\"/dir/to/delete/\")\n >>> ddata.add_item(\"/file/to/delete/file.txt\")\n >>> delete_result = tc.submit_delete(ddata)\n >>> print(\"task_id =\", delete_result[\"task_id\"])\n\n The `data` parameter can be a normal Python dictionary, or\n a :class:`DeleteData ` object.\n\n **External Documentation**\n\n See\n `Submit a delete task \\\n `_\n in the REST documentation for details.\n \"\"\"\n log.info(\"TransferClient.submit_delete(...)\")\n return self.post(\"/delete\", data=data)\n\n #\n # Task inspection and management\n #\n\n @paging.has_paginator(\n paging.LimitOffsetTotalPaginator,\n items_key=\"DATA\",\n get_page_size=_get_page_size,\n max_total_results=1000,\n page_size=1000,\n )\n def task_list(\n self,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> IterableTransferResponse:\n \"\"\"\n Get an iterable of task documents owned by the current user.\n\n ``GET /task_list``\n\n :param limit: limit the number of results\n :type limit: int, optional\n :param offset: offset used in paging\n :type offset: int, optional\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **Examples**\n\n Fetch 10 tasks and print some basic info:\n\n >>> tc = TransferClient(...)\n >>> for task in tc.task_list(limit=10):\n >>> print(\"Task({}): {} -> {}\".format(\n >>> task[\"task_id\"], task[\"source_endpoint\"],\n >>> task[\"destination_endpoint\"]))\n\n **External Documentation**\n\n See\n `Task list \\\n `_\n in the REST documentation for details.\n \"\"\"\n log.info(\"TransferClient.task_list(...)\")\n if query_params is None:\n query_params = {}\n if limit is not None:\n query_params[\"limit\"] = limit\n if offset is not None:\n query_params[\"offset\"] = offset\n return IterableTransferResponse(\n self.get(\"task_list\", query_params=query_params)\n )\n\n @paging.has_paginator(\n paging.LimitOffsetTotalPaginator,\n items_key=\"DATA\",\n get_page_size=_get_page_size,\n max_total_results=1000,\n page_size=1000,\n )\n def task_event_list(\n self,\n task_id: ID_PARAM_TYPE,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> IterableTransferResponse:\n r\"\"\"\n List events (for example, faults and errors) for a given Task.\n\n ``GET /task//event_list``\n\n :param task_id: The ID of the task to inspect.\n :type task_id: str\n :param limit: limit the number of results\n :type limit: int, optional\n :param offset: offset used in paging\n :type offset: int, optional\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **Examples**\n\n Fetch 10 events and print some basic info:\n\n >>> tc = TransferClient(...)\n >>> task_id = ...\n >>> for event in tc.task_event_list(task_id, limit=10):\n >>> print(\"Event on Task({}) at {}:\\n{}\".format(\n >>> task_id, event[\"time\"], event[\"description\"])\n\n **External Documentation**\n\n See\n `Get event list \\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.task_event_list({task_id_s}, ...)\")\n path = self.qjoin_path(\"task\", task_id_s, \"event_list\")\n if query_params is None:\n query_params = {}\n if limit is not None:\n query_params[\"limit\"] = limit\n if offset is not None:\n query_params[\"offset\"] = offset\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def get_task(\n self, task_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``GET /task/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get task by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.get_task({task_id_s}, ...)\")\n resource_path = self.qjoin_path(\"task\", task_id_s)\n return self.get(resource_path, query_params=query_params)\n\n def update_task(\n self,\n task_id: ID_PARAM_TYPE,\n data: Dict,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``PUT /task/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Update task by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.update_task({task_id_s}, ...)\")\n resource_path = self.qjoin_path(\"task\", task_id_s)\n return self.put(resource_path, data=data, query_params=query_params)\n\n def cancel_task(self, task_id: ID_PARAM_TYPE) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /task//cancel``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Cancel task by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.cancel_task({task_id_s})\")\n resource_path = self.qjoin_path(\"task\", task_id_s, \"cancel\")\n return self.post(resource_path)\n\n def task_wait(\n self, task_id: ID_PARAM_TYPE, timeout=10, polling_interval=10\n ) -> bool:\n r\"\"\"\n Wait until a Task is complete or fails, with a time limit. If the task\n is \"ACTIVE\" after time runs out, returns ``False``. Otherwise returns\n ``True``.\n\n :param task_id: ID of the Task to wait on for completion\n :type task_id: str\n :param timeout: Number of seconds to wait in total. Minimum 1. [Default: ``10``]\n :type timeout: int, optional\n :param polling_interval: Number of seconds between queries to Globus about the\n Task status. Minimum 1. [Default: ``10``]\n :type polling_interval: int, optional\n\n **Examples**\n\n If you want to wait for a task to terminate, but want to warn every\n minute that it doesn't terminate, you could:\n\n >>> tc = TransferClient(...)\n >>> while not tc.task_wait(task_id, timeout=60):\n >>> print(\"Another minute went by without {0} terminating\"\n >>> .format(task_id))\n\n Or perhaps you want to check on a task every minute for 10 minutes, and\n give up if it doesn't complete in that time:\n\n >>> tc = TransferClient(...)\n >>> done = tc.task_wait(task_id, timeout=600, polling_interval=60):\n >>> if not done:\n >>> print(\"{0} didn't successfully terminate!\"\n >>> .format(task_id))\n >>> else:\n >>> print(\"{0} completed\".format(task_id))\n\n You could print dots while you wait for a task by only waiting one\n second at a time:\n\n >>> tc = TransferClient(...)\n >>> while not tc.task_wait(task_id, timeout=1, polling_interval=1):\n >>> print(\".\", end=\"\")\n >>> print(\"\\n{0} completed!\".format(task_id))\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(\n \"TransferClient.task_wait({}, {}, {})\".format(\n task_id_s, timeout, polling_interval\n )\n )\n\n # check valid args\n if timeout < 1:\n log.error(f\"task_wait() timeout={timeout} is less than minimum of 1s\")\n raise exc.GlobusSDKUsageError(\n \"TransferClient.task_wait timeout has a minimum of 1\"\n )\n if polling_interval < 1:\n log.error(\n \"task_wait() polling_interval={} is less than minimum of 1s\".format(\n polling_interval\n )\n )\n raise exc.GlobusSDKUsageError(\n \"TransferClient.task_wait polling_interval has a minimum of 1\"\n )\n\n # ensure that we always wait at least one interval, even if the timeout\n # is shorter than the polling interval, by reducing the interval to the\n # timeout if it is larger\n polling_interval = min(timeout, polling_interval)\n\n # helper for readability\n def timed_out(waited_time):\n return waited_time > timeout\n\n waited_time = 0\n # doing this as a while-True loop actually makes it simpler than doing\n # while not timed_out(waited_time) because of the end condition\n while True:\n # get task, check if status != ACTIVE\n task = self.get_task(task_id_s)\n status = task[\"status\"]\n if status != \"ACTIVE\":\n log.debug(\n \"task_wait(task_id={}) terminated with status={}\".format(\n task_id_s, status\n )\n )\n return True\n\n # make sure to check if we timed out before sleeping again, so we\n # don't sleep an extra polling_interval\n waited_time += polling_interval\n if timed_out(waited_time):\n log.debug(f\"task_wait(task_id={task_id_s}) timed out\")\n return False\n\n log.debug(f\"task_wait(task_id={task_id_s}) waiting {polling_interval}s\")\n time.sleep(polling_interval)\n # unreachable -- end of task_wait\n\n def task_pause_info(\n self, task_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``GET /task//pause_info``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get task pause info \\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.task_pause_info({task_id_s}, ...)\")\n resource_path = self.qjoin_path(\"task\", task_id_s, \"pause_info\")\n return self.get(resource_path, query_params=query_params)\n\n @paging.has_paginator(paging.MarkerPaginator, items_key=\"DATA\")\n def task_successful_transfers(\n self, task_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n Get the successful file transfers for a completed Task.\n\n .. note::\n\n Only files that were actually transferred are included. This does\n not include directories, files that were checked but skipped as\n part of a sync transfer, or files which were skipped due to\n skip_source_errors being set on the task.\n\n ``GET /task//successful_transfers``\n\n :param task_id: The ID of the task to inspect.\n :type task_id: str\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **Examples**\n\n Fetch all transferred files for a task and print some basic info:\n\n >>> tc = TransferClient(...)\n >>> task_id = ...\n >>> for info in tc.task_successful_transfers(task_id):\n >>> print(\"{} -> {}\".format(\n >>> info[\"source_path\"], info[\"destination_path\"]))\n\n **External Documentation**\n\n See\n `Get Task Successful Transfers\\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.task_successful_transfers({task_id_s}, ...)\")\n path = self.qjoin_path(\"task\", task_id_s, \"successful_transfers\")\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n @paging.has_paginator(paging.MarkerPaginator, items_key=\"DATA\")\n def task_skipped_errors(\n self, task_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n Get path and error information for all paths that were skipped due\n to skip_source_errors being set on a completed transfer Task.\n\n ``GET /task//skipped_errors``\n\n :param task_id: The ID of the task to inspect.\n :type task_id: str\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **Examples**\n\n Fetch all skipped errors for a task and print some basic info:\n\n >>> tc = TransferClient(...)\n >>> task_id = ...\n >>> for info in tc.task_skipped_errors(task_id):\n >>> print(\"{} -> {}\".format(\n >>> info[\"error_code\"], info[\"source_path\"]))\n\n **External Documentation**\n\n See\n `Get Task Skipped Errors\\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(\n \"TransferClient.endpoint_manager_task_skipped_errors(%s, ...)\", task_id_s\n )\n resource_path = self.qjoin_path(\"task\", task_id_s, \"skipped_errors\")\n return IterableTransferResponse(\n self.get(resource_path, query_params=query_params)\n )\n\n #\n # advanced endpoint management (requires endpoint manager role)\n #\n\n def endpoint_manager_monitored_endpoints(\n self, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n Get endpoints the current user is a monitor or manager on.\n\n ``GET endpoint_manager/monitored_endpoints``\n\n :rtype: iterable of :class:`GlobusResponse\n `\n\n See\n `Get monitored endpoints \\\n `_\n in the REST documentation for details.\n \"\"\"\n log.info(f\"TransferClient.endpoint_manager_monitored_endpoints({query_params})\")\n path = self.qjoin_path(\"endpoint_manager\", \"monitored_endpoints\")\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def endpoint_manager_hosted_endpoint_list(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n Get shared endpoints hosted on the given endpoint.\n\n ``GET /endpoint_manager/endpoint//hosted_endpoint_list``\n\n :rtype: iterable of :class:`GlobusResponse\n `\n\n See\n `Get hosted endpoint list \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(\n f\"TransferClient.endpoint_manager_hosted_endpoint_list({endpoint_id_s})\"\n )\n path = self.qjoin_path(\n \"endpoint_manager\", \"endpoint\", endpoint_id_s, \"hosted_endpoint_list\"\n )\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def endpoint_manager_get_endpoint(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n Get endpoint details as an admin.\n\n ``GET /endpoint_manager/endpoint/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get endpoint as admin \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.endpoint_manager_get_endpoint({endpoint_id_s})\")\n path = self.qjoin_path(\"endpoint_manager\", \"endpoint\", endpoint_id_s)\n return self.get(path, query_params=query_params)\n\n def endpoint_manager_acl_list(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n Get a list of access control rules on specified endpoint as an admin.\n\n ``GET endpoint_manager/endpoint//access_list``\n\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get endpoint access list as admin \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(\n f\"TransferClient.endpoint_manager_endpoint_acl_list({endpoint_id_s}, ...)\"\n )\n path = self.qjoin_path(\n \"endpoint_manager\", \"endpoint\", endpoint_id_s, \"access_list\"\n )\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n #\n # endpoint manager task methods\n #\n\n @paging.has_paginator(paging.LastKeyPaginator, items_key=\"DATA\")\n def endpoint_manager_task_list(\n self, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n r\"\"\"\n Get a list of tasks visible via ``activity_monitor`` role, as opposed\n to tasks owned by the current user.\n\n ``GET endpoint_manager/task_list``\n\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **Filters**\n\n The following filters are supported (passed as keyword arguments in\n ``query_params``). For any query that doesn’t specify a filter_status\n that is a subset of (\"ACTIVE\", \"INACTIVE\"), at least one of\n filter_task_id or filter_endpoint is required.\n\n ====================== ================ ========================\n Query Parameter Filter Type Description\n ====================== ================ ========================\n filter_status equality list |filter_status|\n filter_task_id equality list |filter_task_id|\n filter_owner_id equality |filter_owner_id|\n filter_endpoint equality |filter_endpoint|\n filter_is_paused boolean equality |filter_is_paused|\n filter_completion_time datetime range |filter_completion_time|\n filter_min_faults int |filter_min_faults|\n filter_local_user equality |filter_local_user|\n ====================== ================ ========================\n\n .. |filter_status| replace::\n Comma separated list of task statuses. Return only tasks with any of the\n specified statuses. Note that in-progress tasks will have status \"ACTIVE\" or\n \"INACTIVE\", and completed tasks will have status \"SUCCEEDED\" or \"FAILED\".\n\n .. |filter_task_id| replace::\n Comma separated list of task_ids, limit 50. Return only tasks with any of\n the specified ids. If any of the specified tasks do not involve an endpoint\n the user has an appropriate role for, a ``PermissionDenied`` error will be\n returned. This filter can't be combined with any other filter. If another\n filter is passed, a ``BadRequest`` will be returned.\n\n .. |filter_owner_id| replace::\n A Globus Auth identity id. Limit results to tasks submitted by the specified\n identity, or linked to the specified identity, at submit time. Returns\n ``UserNotFound`` if the identity does not exist or has never used the Globus\n Transfer service. If no tasks were submitted by this user to an endpoint the\n current user has an appropriate role on, an empty result set will be\n returned. Unless filtering for running tasks (i.e. ``filter_status`` is a\n subset of (\"ACTIVE\", \"INACTIVE\"), ``filter_endpoint`` is required when using\n ``filter_owner_id``.\n\n .. |filter_endpoint| replace::\n Single endpoint id or canonical name. Using canonical name is deprecated.\n Return only tasks with a matching source or destination endpoint or matching\n source or destination host endpoint.\n\n .. |filter_is_paused| replace::\n Return only tasks with the specified ``is_paused`` value. Requires that\n ``filter_status`` is also passed and contains a subset of \"ACTIVE\" and\n \"INACTIVE\". Completed tasks always have ``is_paused`` equal to \"false\" and\n filtering on their paused state is not useful and not supported. Note that\n pausing is an async operation, and after a pause rule is inserted it will\n take time before the is_paused flag is set on all affected tasks. Tasks\n paused by id will have the ``is_paused`` flag set immediately.\n\n .. |filter_completion_time| replace::\n Start and end date-times separated by a comma. Each datetime should be\n specified as a string in ISO 8601 format: ``YYYY-MM-DDTHH:MM:SS``, where the\n \"T\" separating date and time is literal, with optional \\+/-HH:MM for\n timezone. If no timezone is specified, UTC is assumed, or a trailing \"Z\" can\n be specified to make UTC explicit. A space can be used between the date and\n time instead of a space. A blank string may be used for either the start or\n end (but not both) to indicate no limit on that side. Returns only complete\n tasks with ``completion_time`` in the specified range. If the end date is\n blank, it will also include all active tasks, since they will complete some\n time in the future.\n\n .. |filter_min_faults| replace::\n Minimum number of cumulative faults, inclusive. Return only tasks with\n ``faults >= N``, where N is the filter value. Use ``filter_min_faults=1`` to\n find all tasks with at least one fault. Note that many errors are not fatal\n and the task may still be successful even if ``faults >= 1``.\n\n .. |filter_local_user| replace::\n A valid username for the target system running the endpoint, as a utf8\n encoded string. Requires that ``filter_endpoint`` is also set. Return only\n tasks that have successfully fetched the local user from the endpoint, and\n match the values of ``filter_endpoint`` and ``filter_local_user`` on the\n source or on the destination.\n\n **Examples**\n\n Fetch some tasks and print some basic info:\n\n >>> tc = TransferClient(...)\n >>> for task in tc.endpoint_manager_task_list(filter_status=\"ACTIVE\"):\n >>> print(\"Task({}): {} -> {}\\n was submitted by\\n {}\".format(\n >>> task[\"task_id\"], task[\"source_endpoint\"],\n >>> task[\"destination_endpoint\"], task[\"owner_string\"]))\n\n Do that same operation on *all* tasks visible via ``activity_monitor``\n status:\n\n >>> tc = TransferClient(...)\n >>> for page in tc.paginated.endpoint_manager_task_list(\n >>> filter_status=\"ACTIVE\"\n >>> ):\n >>> for task in page:\n >>> print(\"Task({}): {} -> {}\\n was submitted by\\n {}\".format(\n >>> task[\"task_id\"], task[\"source_endpoint\"],\n >>> task[\"destination_endpoint\"), task[\"owner_string\"])\n\n **External Documentation**\n\n See\n `Advanced Endpoint Management: Get tasks \\\n `_\n in the REST documentation for details.\n \"\"\"\n log.info(\"TransferClient.endpoint_manager_task_list(...)\")\n path = self.qjoin_path(\"endpoint_manager\", \"task_list\")\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def endpoint_manager_get_task(\n self, task_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ):\n \"\"\"\n Get task info as an admin. Requires activity monitor effective role on\n the destination endpoint of the task.\n\n ``GET /endpoint_manager/task/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get task as admin \\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.endpoint_manager_get_task({task_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint_manager\", \"task\", task_id_s)\n return self.get(path, query_params=query_params)\n\n @paging.has_paginator(\n paging.LimitOffsetTotalPaginator,\n items_key=\"DATA\",\n get_page_size=_get_page_size,\n max_total_results=1000,\n page_size=1000,\n )\n def endpoint_manager_task_event_list(\n self,\n task_id: ID_PARAM_TYPE,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> IterableTransferResponse:\n \"\"\"\n List events (for example, faults and errors) for a given task as an\n admin. Requires activity monitor effective role on the destination\n endpoint of the task.\n\n ``GET /task//event_list``\n\n :param task_id: The ID of the task to inspect.\n :type task_id: str\n :param limit: limit the number of results\n :type limit: int, optional\n :param offset: offset used in paging\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get task events as admin \\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.endpoint_manager_task_event_list({task_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint_manager\", \"task\", task_id_s, \"event_list\")\n if query_params is None:\n query_params = {}\n if limit is not None:\n query_params[\"limit\"] = limit\n if offset is not None:\n query_params[\"offset\"] = offset\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def endpoint_manager_task_pause_info(\n self, task_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n Get details about why a task is paused as an admin. Requires activity\n monitor effective role on the destination endpoint of the task.\n\n ``GET /endpoint_manager/task//pause_info``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get task pause info as admin \\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.endpoint_manager_task_pause_info({task_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint_manager\", \"task\", task_id_s, \"pause_info\")\n return self.get(path, query_params=query_params)\n\n @paging.has_paginator(paging.MarkerPaginator, items_key=\"DATA\")\n def endpoint_manager_task_successful_transfers(\n self, task_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n Get the successful file transfers for a completed Task as an admin.\n\n ``GET /endpoint_manager/task//successful_transfers``\n\n :param task_id: The ID of the task to inspect.\n :type task_id: str\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get task successful transfers as admin\\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(\n \"TransferClient.endpoint_manager_task_successful_transfers(%s, ...)\",\n task_id_s,\n )\n path = self.qjoin_path(\n \"endpoint_manager\", \"task\", task_id_s, \"successful_transfers\"\n )\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def endpoint_manager_task_skipped_errors(\n self, task_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n Get skipped errors for a completed Task as an admin.\n\n ``GET /endpoint_manager/task//skipped_errors``\n\n :param task_id: The ID of the task to inspect.\n :type task_id: str\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get task skipped errors as admin\\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(\n f\"TransferClient.endpoint_manager_task_skipped_errors({task_id_s}, ...)\"\n )\n path = self.qjoin_path(\"endpoint_manager\", \"task\", task_id_s, \"skipped_errors\")\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def endpoint_manager_cancel_tasks(\n self,\n task_ids: Iterable[ID_PARAM_TYPE],\n message,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n Cancel a list of tasks as an admin. Requires activity manager effective\n role on the task(s) source or destination endpoint(s).\n\n ``POST /endpoint_manager/admin_cancel``\n\n :param task_ids: List of task ids to cancel.\n :type task_ids: iterable of str\n :param message: Message given to all users who's tasks have been canceled.\n :type message: str\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Cancel tasks as admin \\\n `_\n in the REST documentation for details.\n \"\"\"\n str_task_ids = [utils.safe_stringify(i) for i in task_ids]\n message = utils.safe_stringify(message)\n log.info(\n f\"TransferClient.endpoint_manager_cancel_tasks({str_task_ids}, {message})\"\n )\n data = {\"message\": utils.safe_stringify(message), \"task_id_list\": str_task_ids}\n path = self.qjoin_path(\"endpoint_manager\", \"admin_cancel\")\n return self.post(path, data=data, query_params=query_params)\n\n def endpoint_manager_cancel_status(\n self, admin_cancel_id, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n Get the status of an an admin cancel (result of\n endpoint_manager_cancel_tasks).\n\n ``GET /endpoint_manager/admin_cancel/